File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
new file mode 100644
index 0000000..a1c862b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -0,0 +1,142 @@
+#
+# Marvell device configuration
+#
+
+config NET_VENDOR_MARVELL
+	bool "Marvell devices"
+	default y
+	depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Marvell devices. If you say Y, you will be
+	  asked for your specific card in the following questions.
+
+if NET_VENDOR_MARVELL
+
+config MV643XX_ETH
+	tristate "Marvell Discovery (643XX) and Orion ethernet support"
+	depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
+	select PHYLIB
+	select MVMDIO
+	---help---
+	  This driver supports the gigabit ethernet MACs in the
+	  Marvell Discovery PPC/MIPS chipset family (MV643XX) and
+	  in the Marvell Orion ARM SoC family.
+
+	  Some boards that use the Discovery chipset are the Momenco
+	  Ocelot C and Jaguar ATX and Pegasos II.
+
+config MVMDIO
+	tristate "Marvell MDIO interface support"
+	depends on HAS_IOMEM
+	select PHYLIB
+	---help---
+	  This driver supports the MDIO interface found in the network
+	  interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
+	  Dove, Armada 370 and Armada XP).
+
+	  This driver is used by the MV643XX_ETH and MVNETA drivers.
+
+config MVNETA
+	tristate "Marvell Armada 370/38x/XP network interface support"
+	depends on PLAT_ORION
+	select MVMDIO
+	select FIXED_PHY
+	---help---
+	  This driver supports the network interface units in the
+	  Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
+
+	  Note that this driver is distinct from the mv643xx_eth
+	  driver, which should be used for the older Marvell SoCs
+	  (Dove, Orion, Discovery, Kirkwood).
+
+config MVPP2
+	tristate "Marvell Armada 375 network interface support"
+	depends on MACH_ARMADA_375
+	select MVMDIO
+	---help---
+	  This driver supports the network interface units in the
+	  Marvell ARMADA 375 SoC.
+
+config PXA168_ETH
+	tristate "Marvell pxa168 ethernet support"
+	depends on HAS_IOMEM && HAS_DMA
+	depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
+	select PHYLIB
+	---help---
+	  This driver supports the pxa168 Ethernet ports.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called pxa168_eth.
+
+config SKGE
+	tristate "Marvell Yukon Gigabit Ethernet support"
+	depends on PCI
+	select CRC32
+	---help---
+	  This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
+	  and related Gigabit Ethernet adapters. It is a new smaller driver
+	  with better performance and more complete ethtool support.
+
+	  It does not support the link failover and network management
+	  features that "portable" vendor supplied sk98lin driver does.
+
+	  This driver supports adapters based on the original Yukon chipset:
+	  Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
+	  Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
+
+	  It does not support the newer Yukon2 chipset: a separate driver,
+	  sky2, is provided for these adapters.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called skge.  This is recommended.
+
+config SKGE_DEBUG
+	bool "Debugging interface"
+	depends on SKGE && DEBUG_FS
+	---help---
+	  This option adds the ability to dump driver state for debugging.
+	  The file /sys/kernel/debug/skge/ethX displays the state of the internal
+	  transmit and receive rings.
+
+	  If unsure, say N.
+
+config SKGE_GENESIS
+	bool "Support for older SysKonnect Genesis boards"
+	depends on SKGE
+	---help---
+	 This enables support for the older and uncommon SysKonnect Genesis
+	 chips, which support MII via an external transceiver, instead of
+	 an internal one. Disabling this option will save some memory
+	 by making code smaller. If unsure say Y.
+
+config SKY2
+	tristate "Marvell Yukon 2 support"
+	depends on PCI
+	select CRC32
+	---help---
+	  This driver supports Gigabit Ethernet adapters based on the
+	  Marvell Yukon 2 chipset:
+	  Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
+	  88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
+
+	  There is companion driver for the older Marvell Yukon and
+	  SysKonnect Genesis based adapters: skge.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sky2.  This is recommended.
+
+config SKY2_DEBUG
+	bool "Debugging interface"
+	depends on SKY2 && DEBUG_FS
+	---help---
+	  This option adds the ability to dump driver state for debugging.
+	  The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+	  transmit and receive rings.
+
+	  If unsure, say N.
+
+endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
new file mode 100644
index 0000000..f6425bd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Marvell device drivers.
+#
+
+obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_MVNETA) += mvneta.o
+obj-$(CONFIG_MVPP2) += mvpp2.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
+obj-$(CONFIG_SKGE) += skge.o
+obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
new file mode 100644
index 0000000..4182290
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -0,0 +1,3287 @@
+/*
+ * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
+ * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
+ *
+ * Based on the 64360 driver from:
+ * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
+ *		      Rabeeh Khoury <rabeeh@marvell.com>
+ *
+ * Copyright (C) 2003 PMC-Sierra, Inc.,
+ *	written by Manish Lachwani
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (C) 2004-2006 MontaVista Software, Inc.
+ *			   Dale Farnsworth <dale@farnsworth.org>
+ *
+ * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
+ *				     <sjhill@realitydiluted.com>
+ *
+ * Copyright (C) 2007-2008 Marvell Semiconductor
+ *			   Lennert Buytenhek <buytenh@marvell.com>
+ *
+ * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/tso.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+static char mv643xx_eth_driver_name[] = "mv643xx_eth";
+static char mv643xx_eth_driver_version[] = "1.4";
+
+
+/*
+ * Registers shared between all ports.
+ */
+#define PHY_ADDR			0x0000
+#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
+#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
+#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
+#define WINDOW_BAR_ENABLE		0x0290
+#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
+
+/*
+ * Main per-port registers.  These live at offset 0x0400 for
+ * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
+ */
+#define PORT_CONFIG			0x0000
+#define  UNICAST_PROMISCUOUS_MODE	0x00000001
+#define PORT_CONFIG_EXT			0x0004
+#define MAC_ADDR_LOW			0x0014
+#define MAC_ADDR_HIGH			0x0018
+#define SDMA_CONFIG			0x001c
+#define  TX_BURST_SIZE_16_64BIT		0x01000000
+#define  TX_BURST_SIZE_4_64BIT		0x00800000
+#define  BLM_TX_NO_SWAP			0x00000020
+#define  BLM_RX_NO_SWAP			0x00000010
+#define  RX_BURST_SIZE_16_64BIT		0x00000008
+#define  RX_BURST_SIZE_4_64BIT		0x00000004
+#define PORT_SERIAL_CONTROL		0x003c
+#define  SET_MII_SPEED_TO_100		0x01000000
+#define  SET_GMII_SPEED_TO_1000		0x00800000
+#define  SET_FULL_DUPLEX_MODE		0x00200000
+#define  MAX_RX_PACKET_9700BYTE		0x000a0000
+#define  DISABLE_AUTO_NEG_SPEED_GMII	0x00002000
+#define  DO_NOT_FORCE_LINK_FAIL		0x00000400
+#define  SERIAL_PORT_CONTROL_RESERVED	0x00000200
+#define  DISABLE_AUTO_NEG_FOR_FLOW_CTRL	0x00000008
+#define  DISABLE_AUTO_NEG_FOR_DUPLEX	0x00000004
+#define  FORCE_LINK_PASS		0x00000002
+#define  SERIAL_PORT_ENABLE		0x00000001
+#define PORT_STATUS			0x0044
+#define  TX_FIFO_EMPTY			0x00000400
+#define  TX_IN_PROGRESS			0x00000080
+#define  PORT_SPEED_MASK		0x00000030
+#define  PORT_SPEED_1000		0x00000010
+#define  PORT_SPEED_100			0x00000020
+#define  PORT_SPEED_10			0x00000000
+#define  FLOW_CONTROL_ENABLED		0x00000008
+#define  FULL_DUPLEX			0x00000004
+#define  LINK_UP			0x00000002
+#define TXQ_COMMAND			0x0048
+#define TXQ_FIX_PRIO_CONF		0x004c
+#define PORT_SERIAL_CONTROL1		0x004c
+#define  CLK125_BYPASS_EN		0x00000010
+#define TX_BW_RATE			0x0050
+#define TX_BW_MTU			0x0058
+#define TX_BW_BURST			0x005c
+#define INT_CAUSE			0x0060
+#define  INT_TX_END			0x07f80000
+#define  INT_TX_END_0			0x00080000
+#define  INT_RX				0x000003fc
+#define  INT_RX_0			0x00000004
+#define  INT_EXT			0x00000002
+#define INT_CAUSE_EXT			0x0064
+#define  INT_EXT_LINK_PHY		0x00110000
+#define  INT_EXT_TX			0x000000ff
+#define INT_MASK			0x0068
+#define INT_MASK_EXT			0x006c
+#define TX_FIFO_URGENT_THRESHOLD	0x0074
+#define RX_DISCARD_FRAME_CNT		0x0084
+#define RX_OVERRUN_FRAME_CNT		0x0088
+#define TXQ_FIX_PRIO_CONF_MOVED		0x00dc
+#define TX_BW_RATE_MOVED		0x00e0
+#define TX_BW_MTU_MOVED			0x00e8
+#define TX_BW_BURST_MOVED		0x00ec
+#define RXQ_CURRENT_DESC_PTR(q)		(0x020c + ((q) << 4))
+#define RXQ_COMMAND			0x0280
+#define TXQ_CURRENT_DESC_PTR(q)		(0x02c0 + ((q) << 2))
+#define TXQ_BW_TOKENS(q)		(0x0300 + ((q) << 4))
+#define TXQ_BW_CONF(q)			(0x0304 + ((q) << 4))
+#define TXQ_BW_WRR_CONF(q)		(0x0308 + ((q) << 4))
+
+/*
+ * Misc per-port registers.
+ */
+#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
+#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
+#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
+#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
+
+
+/*
+ * SDMA configuration register default value.
+ */
+#if defined(__BIG_ENDIAN)
+#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
+		(RX_BURST_SIZE_4_64BIT	|	\
+		 TX_BURST_SIZE_4_64BIT)
+#elif defined(__LITTLE_ENDIAN)
+#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
+		(RX_BURST_SIZE_4_64BIT	|	\
+		 BLM_RX_NO_SWAP		|	\
+		 BLM_TX_NO_SWAP		|	\
+		 TX_BURST_SIZE_4_64BIT)
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+
+/*
+ * Misc definitions.
+ */
+#define DEFAULT_RX_QUEUE_SIZE	128
+#define DEFAULT_TX_QUEUE_SIZE	512
+#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+#define TSO_HEADER_SIZE		128
+
+/* Max number of allowed TCP segments for software TSO */
+#define MV643XX_MAX_TSO_SEGS 100
+#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+	((addr >= txq->tso_hdrs_dma) && \
+	 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
+/*
+ * RX/TX descriptors.
+ */
+#if defined(__BIG_ENDIAN)
+struct rx_desc {
+	u16 byte_cnt;		/* Descriptor buffer byte count		*/
+	u16 buf_size;		/* Buffer size				*/
+	u32 cmd_sts;		/* Descriptor command status		*/
+	u32 next_desc_ptr;	/* Next descriptor pointer		*/
+	u32 buf_ptr;		/* Descriptor buffer pointer		*/
+};
+
+struct tx_desc {
+	u16 byte_cnt;		/* buffer byte count			*/
+	u16 l4i_chk;		/* CPU provided TCP checksum		*/
+	u32 cmd_sts;		/* Command/status field			*/
+	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
+	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
+};
+#elif defined(__LITTLE_ENDIAN)
+struct rx_desc {
+	u32 cmd_sts;		/* Descriptor command status		*/
+	u16 buf_size;		/* Buffer size				*/
+	u16 byte_cnt;		/* Descriptor buffer byte count		*/
+	u32 buf_ptr;		/* Descriptor buffer pointer		*/
+	u32 next_desc_ptr;	/* Next descriptor pointer		*/
+};
+
+struct tx_desc {
+	u32 cmd_sts;		/* Command/status field			*/
+	u16 l4i_chk;		/* CPU provided TCP checksum		*/
+	u16 byte_cnt;		/* buffer byte count			*/
+	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
+	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
+};
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+/* RX & TX descriptor command */
+#define BUFFER_OWNED_BY_DMA		0x80000000
+
+/* RX & TX descriptor status */
+#define ERROR_SUMMARY			0x00000001
+
+/* RX descriptor status */
+#define LAYER_4_CHECKSUM_OK		0x40000000
+#define RX_ENABLE_INTERRUPT		0x20000000
+#define RX_FIRST_DESC			0x08000000
+#define RX_LAST_DESC			0x04000000
+#define RX_IP_HDR_OK			0x02000000
+#define RX_PKT_IS_IPV4			0x01000000
+#define RX_PKT_IS_ETHERNETV2		0x00800000
+#define RX_PKT_LAYER4_TYPE_MASK		0x00600000
+#define RX_PKT_LAYER4_TYPE_TCP_IPV4	0x00000000
+#define RX_PKT_IS_VLAN_TAGGED		0x00080000
+
+/* TX descriptor command */
+#define TX_ENABLE_INTERRUPT		0x00800000
+#define GEN_CRC				0x00400000
+#define TX_FIRST_DESC			0x00200000
+#define TX_LAST_DESC			0x00100000
+#define ZERO_PADDING			0x00080000
+#define GEN_IP_V4_CHECKSUM		0x00040000
+#define GEN_TCP_UDP_CHECKSUM		0x00020000
+#define UDP_FRAME			0x00010000
+#define MAC_HDR_EXTRA_4_BYTES		0x00008000
+#define GEN_TCP_UDP_CHK_FULL		0x00000400
+#define MAC_HDR_EXTRA_8_BYTES		0x00000200
+
+#define TX_IHL_SHIFT			11
+
+
+/* global *******************************************************************/
+struct mv643xx_eth_shared_private {
+	/*
+	 * Ethernet controller base address.
+	 */
+	void __iomem *base;
+
+	/*
+	 * Per-port MBUS window access register value.
+	 */
+	u32 win_protect;
+
+	/*
+	 * Hardware-specific parameters.
+	 */
+	int extended_rx_coal_limit;
+	int tx_bw_control;
+	int tx_csum_limit;
+	struct clk *clk;
+};
+
+#define TX_BW_CONTROL_ABSENT		0
+#define TX_BW_CONTROL_OLD_LAYOUT	1
+#define TX_BW_CONTROL_NEW_LAYOUT	2
+
+static int mv643xx_eth_open(struct net_device *dev);
+static int mv643xx_eth_stop(struct net_device *dev);
+
+
+/* per-port *****************************************************************/
+struct mib_counters {
+	u64 good_octets_received;
+	u32 bad_octets_received;
+	u32 internal_mac_transmit_err;
+	u32 good_frames_received;
+	u32 bad_frames_received;
+	u32 broadcast_frames_received;
+	u32 multicast_frames_received;
+	u32 frames_64_octets;
+	u32 frames_65_to_127_octets;
+	u32 frames_128_to_255_octets;
+	u32 frames_256_to_511_octets;
+	u32 frames_512_to_1023_octets;
+	u32 frames_1024_to_max_octets;
+	u64 good_octets_sent;
+	u32 good_frames_sent;
+	u32 excessive_collision;
+	u32 multicast_frames_sent;
+	u32 broadcast_frames_sent;
+	u32 unrec_mac_control_received;
+	u32 fc_sent;
+	u32 good_fc_received;
+	u32 bad_fc_received;
+	u32 undersize_received;
+	u32 fragments_received;
+	u32 oversize_received;
+	u32 jabber_received;
+	u32 mac_receive_error;
+	u32 bad_crc_event;
+	u32 collision;
+	u32 late_collision;
+	/* Non MIB hardware counters */
+	u32 rx_discard;
+	u32 rx_overrun;
+};
+
+struct rx_queue {
+	int index;
+
+	int rx_ring_size;
+
+	int rx_desc_count;
+	int rx_curr_desc;
+	int rx_used_desc;
+
+	struct rx_desc *rx_desc_area;
+	dma_addr_t rx_desc_dma;
+	int rx_desc_area_size;
+	struct sk_buff **rx_skb;
+};
+
+struct tx_queue {
+	int index;
+
+	int tx_ring_size;
+
+	int tx_desc_count;
+	int tx_curr_desc;
+	int tx_used_desc;
+
+	int tx_stop_threshold;
+	int tx_wake_threshold;
+
+	char *tso_hdrs;
+	dma_addr_t tso_hdrs_dma;
+
+	struct tx_desc *tx_desc_area;
+	char *tx_desc_mapping; /* array to track the type of the dma mapping */
+	dma_addr_t tx_desc_dma;
+	int tx_desc_area_size;
+
+	struct sk_buff_head tx_skb;
+
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+	unsigned long tx_dropped;
+};
+
+struct mv643xx_eth_private {
+	struct mv643xx_eth_shared_private *shared;
+	void __iomem *base;
+	int port_num;
+
+	struct net_device *dev;
+
+	struct phy_device *phy;
+
+	struct timer_list mib_counters_timer;
+	spinlock_t mib_counters_lock;
+	struct mib_counters mib_counters;
+
+	struct work_struct tx_timeout_task;
+
+	struct napi_struct napi;
+	u32 int_mask;
+	u8 oom;
+	u8 work_link;
+	u8 work_tx;
+	u8 work_tx_end;
+	u8 work_rx;
+	u8 work_rx_refill;
+
+	int skb_size;
+
+	/*
+	 * RX state.
+	 */
+	int rx_ring_size;
+	unsigned long rx_desc_sram_addr;
+	int rx_desc_sram_size;
+	int rxq_count;
+	struct timer_list rx_oom;
+	struct rx_queue rxq[8];
+
+	/*
+	 * TX state.
+	 */
+	int tx_ring_size;
+	unsigned long tx_desc_sram_addr;
+	int tx_desc_sram_size;
+	int txq_count;
+	struct tx_queue txq[8];
+
+	/*
+	 * Hardware-specific parameters.
+	 */
+	struct clk *clk;
+	unsigned int t_clk;
+};
+
+
+/* port register accessors **************************************************/
+static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
+{
+	return readl(mp->shared->base + offset);
+}
+
+static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
+{
+	return readl(mp->base + offset);
+}
+
+static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
+{
+	writel(data, mp->shared->base + offset);
+}
+
+static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
+{
+	writel(data, mp->base + offset);
+}
+
+
+/* rxq/txq helper functions *************************************************/
+static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
+{
+	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
+}
+
+static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
+{
+	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
+}
+
+static void rxq_enable(struct rx_queue *rxq)
+{
+	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
+}
+
+static void rxq_disable(struct rx_queue *rxq)
+{
+	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+	u8 mask = 1 << rxq->index;
+
+	wrlp(mp, RXQ_COMMAND, mask << 8);
+	while (rdlp(mp, RXQ_COMMAND) & mask)
+		udelay(10);
+}
+
+static void txq_reset_hw_ptr(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	u32 addr;
+
+	addr = (u32)txq->tx_desc_dma;
+	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
+	wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
+}
+
+static void txq_enable(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	wrlp(mp, TXQ_COMMAND, 1 << txq->index);
+}
+
+static void txq_disable(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	u8 mask = 1 << txq->index;
+
+	wrlp(mp, TXQ_COMMAND, mask << 8);
+	while (rdlp(mp, TXQ_COMMAND) & mask)
+		udelay(10);
+}
+
+static void txq_maybe_wake(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+
+	if (netif_tx_queue_stopped(nq)) {
+		__netif_tx_lock(nq, smp_processor_id());
+		if (txq->tx_desc_count <= txq->tx_wake_threshold)
+			netif_tx_wake_queue(nq);
+		__netif_tx_unlock(nq);
+	}
+}
+
+static int rxq_process(struct rx_queue *rxq, int budget)
+{
+	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+	struct net_device_stats *stats = &mp->dev->stats;
+	int rx;
+
+	rx = 0;
+	while (rx < budget && rxq->rx_desc_count) {
+		struct rx_desc *rx_desc;
+		unsigned int cmd_sts;
+		struct sk_buff *skb;
+		u16 byte_cnt;
+
+		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
+
+		cmd_sts = rx_desc->cmd_sts;
+		if (cmd_sts & BUFFER_OWNED_BY_DMA)
+			break;
+		rmb();
+
+		skb = rxq->rx_skb[rxq->rx_curr_desc];
+		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
+
+		rxq->rx_curr_desc++;
+		if (rxq->rx_curr_desc == rxq->rx_ring_size)
+			rxq->rx_curr_desc = 0;
+
+		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
+				 rx_desc->buf_size, DMA_FROM_DEVICE);
+		rxq->rx_desc_count--;
+		rx++;
+
+		mp->work_rx_refill |= 1 << rxq->index;
+
+		byte_cnt = rx_desc->byte_cnt;
+
+		/*
+		 * Update statistics.
+		 *
+		 * Note that the descriptor byte count includes 2 dummy
+		 * bytes automatically inserted by the hardware at the
+		 * start of the packet (which we don't count), and a 4
+		 * byte CRC at the end of the packet (which we do count).
+		 */
+		stats->rx_packets++;
+		stats->rx_bytes += byte_cnt - 2;
+
+		/*
+		 * In case we received a packet without first / last bits
+		 * on, or the error summary bit is set, the packet needs
+		 * to be dropped.
+		 */
+		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
+			!= (RX_FIRST_DESC | RX_LAST_DESC))
+			goto err;
+
+		/*
+		 * The -4 is for the CRC in the trailer of the
+		 * received packet
+		 */
+		skb_put(skb, byte_cnt - 2 - 4);
+
+		if (cmd_sts & LAYER_4_CHECKSUM_OK)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		skb->protocol = eth_type_trans(skb, mp->dev);
+
+		napi_gro_receive(&mp->napi, skb);
+
+		continue;
+
+err:
+		stats->rx_dropped++;
+
+		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+			(RX_FIRST_DESC | RX_LAST_DESC)) {
+			if (net_ratelimit())
+				netdev_err(mp->dev,
+					   "received packet spanning multiple descriptors\n");
+		}
+
+		if (cmd_sts & ERROR_SUMMARY)
+			stats->rx_errors++;
+
+		dev_kfree_skb(skb);
+	}
+
+	if (rx < budget)
+		mp->work_rx &= ~(1 << rxq->index);
+
+	return rx;
+}
+
+static int rxq_refill(struct rx_queue *rxq, int budget)
+{
+	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+	int refilled;
+
+	refilled = 0;
+	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
+		struct sk_buff *skb;
+		int rx;
+		struct rx_desc *rx_desc;
+		int size;
+
+		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
+
+		if (skb == NULL) {
+			mp->oom = 1;
+			goto oom;
+		}
+
+		if (SKB_DMA_REALIGN)
+			skb_reserve(skb, SKB_DMA_REALIGN);
+
+		refilled++;
+		rxq->rx_desc_count++;
+
+		rx = rxq->rx_used_desc++;
+		if (rxq->rx_used_desc == rxq->rx_ring_size)
+			rxq->rx_used_desc = 0;
+
+		rx_desc = rxq->rx_desc_area + rx;
+
+		size = skb_end_pointer(skb) - skb->data;
+		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+						  skb->data, size,
+						  DMA_FROM_DEVICE);
+		rx_desc->buf_size = size;
+		rxq->rx_skb[rx] = skb;
+		wmb();
+		rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
+		wmb();
+
+		/*
+		 * The hardware automatically prepends 2 bytes of
+		 * dummy data to each received packet, so that the
+		 * IP header ends up 16-byte aligned.
+		 */
+		skb_reserve(skb, 2);
+	}
+
+	if (refilled < budget)
+		mp->work_rx_refill &= ~(1 << rxq->index);
+
+oom:
+	return refilled;
+}
+
+
+/* tx ***********************************************************************/
+static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
+{
+	int frag;
+
+	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+		if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
+			return 1;
+	}
+
+	return 0;
+}
+
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+	return (__force __be16)sum;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+		       u16 *l4i_chk, u32 *command, int length)
+{
+	int ret;
+	u32 cmd = 0;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int hdr_len;
+		int tag_bytes;
+
+		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+		       skb->protocol != htons(ETH_P_8021Q));
+
+		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+		tag_bytes = hdr_len - ETH_HLEN;
+
+		if (length - hdr_len > mp->shared->tx_csum_limit ||
+		    unlikely(tag_bytes & ~12)) {
+			ret = skb_checksum_help(skb);
+			if (!ret)
+				goto no_csum;
+			return ret;
+		}
+
+		if (tag_bytes & 4)
+			cmd |= MAC_HDR_EXTRA_4_BYTES;
+		if (tag_bytes & 8)
+			cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+		cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+			   GEN_IP_V4_CHECKSUM   |
+			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+		/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+		 * it seems we don't need to pass the initial checksum. */
+		switch (ip_hdr(skb)->protocol) {
+		case IPPROTO_UDP:
+			cmd |= UDP_FRAME;
+			*l4i_chk = 0;
+			break;
+		case IPPROTO_TCP:
+			*l4i_chk = 0;
+			break;
+		default:
+			WARN(1, "protocol not supported");
+		}
+	} else {
+no_csum:
+		/* Errata BTS #50, IHL must be 5 if no HW checksum */
+		cmd |= 5 << TX_IHL_SHIFT;
+	}
+	*command = cmd;
+	return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+		 struct sk_buff *skb, char *data, int length,
+		 bool last_tcp, bool is_last)
+{
+	int tx_index;
+	u32 cmd_sts;
+	struct tx_desc *desc;
+
+	tx_index = txq->tx_curr_desc++;
+	if (txq->tx_curr_desc == txq->tx_ring_size)
+		txq->tx_curr_desc = 0;
+	desc = &txq->tx_desc_area[tx_index];
+	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+
+	desc->l4i_chk = 0;
+	desc->byte_cnt = length;
+
+	if (length <= 8 && (uintptr_t)data & 0x7) {
+		/* Copy unaligned small data fragment to TSO header data area */
+		memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+		       data, length);
+		desc->buf_ptr = txq->tso_hdrs_dma
+			+ txq->tx_curr_desc * TSO_HEADER_SIZE;
+	} else {
+		/* Alignment is okay, map buffer and hand off to hardware */
+		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+		desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+			length, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev->dev.parent,
+					       desc->buf_ptr))) {
+			WARN(1, "dma_map_single failed!\n");
+			return -ENOMEM;
+		}
+	}
+
+	cmd_sts = BUFFER_OWNED_BY_DMA;
+	if (last_tcp) {
+		/* last descriptor in the TCP packet */
+		cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+		/* last descriptor in SKB */
+		if (is_last)
+			cmd_sts |= TX_ENABLE_INTERRUPT;
+	}
+	desc->cmd_sts = cmd_sts;
+	return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+		u32 *first_cmd_sts, bool first_desc)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	int tx_index;
+	struct tx_desc *desc;
+	int ret;
+	u32 cmd_csum = 0;
+	u16 l4i_chk = 0;
+	u32 cmd_sts;
+
+	tx_index = txq->tx_curr_desc;
+	desc = &txq->tx_desc_area[tx_index];
+
+	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+	if (ret)
+		WARN(1, "failed to prepare checksum!");
+
+	/* Should we set this? Can't use the value from skb_tx_csum()
+	 * as it's not the correct initial L4 checksum to use. */
+	desc->l4i_chk = 0;
+
+	desc->byte_cnt = hdr_len;
+	desc->buf_ptr = txq->tso_hdrs_dma +
+			txq->tx_curr_desc * TSO_HEADER_SIZE;
+	cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
+				   GEN_CRC;
+
+	/* Defer updating the first command descriptor until all
+	 * following descriptors have been written.
+	 */
+	if (first_desc)
+		*first_cmd_sts = cmd_sts;
+	else
+		desc->cmd_sts = cmd_sts;
+
+	txq->tx_curr_desc++;
+	if (txq->tx_curr_desc == txq->tx_ring_size)
+		txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+			  struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	int total_len, data_left, ret;
+	int desc_count = 0;
+	struct tso_t tso;
+	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	struct tx_desc *first_tx_desc;
+	u32 first_cmd_sts = 0;
+
+	/* Count needed descriptors */
+	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+		netdev_dbg(dev, "not enough descriptors for TSO!\n");
+		return -EBUSY;
+	}
+
+	first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
+	/* Initialize the TSO handler, and prepare the first payload */
+	tso_start(skb, &tso);
+
+	total_len = skb->len - hdr_len;
+	while (total_len > 0) {
+		bool first_desc = (desc_count == 0);
+		char *hdr;
+
+		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+		total_len -= data_left;
+		desc_count++;
+
+		/* prepare packet headers: MAC + IP + TCP */
+		hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+		txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+				first_desc);
+
+		while (data_left > 0) {
+			int size;
+			desc_count++;
+
+			size = min_t(int, tso.size, data_left);
+			ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+					       size == data_left,
+					       total_len == 0);
+			if (ret)
+				goto err_release;
+			data_left -= size;
+			tso_build_data(skb, &tso, size);
+		}
+	}
+
+	__skb_queue_tail(&txq->tx_skb, skb);
+	skb_tx_timestamp(skb);
+
+	/* ensure all other descriptors are written before first cmd_sts */
+	wmb();
+	first_tx_desc->cmd_sts = first_cmd_sts;
+
+	/* clear TX_END status */
+	mp->work_tx_end &= ~(1 << txq->index);
+
+	/* ensure all descriptors are written before poking hardware */
+	wmb();
+	txq_enable(txq);
+	txq->tx_desc_count += desc_count;
+	return 0;
+err_release:
+	/* TODO: Release all used data descriptors; header descriptors must not
+	 * be DMA-unmapped.
+	 */
+	return ret;
+}
+
+static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	int frag;
+
+	for (frag = 0; frag < nr_frags; frag++) {
+		skb_frag_t *this_frag;
+		int tx_index;
+		struct tx_desc *desc;
+
+		this_frag = &skb_shinfo(skb)->frags[frag];
+		tx_index = txq->tx_curr_desc++;
+		if (txq->tx_curr_desc == txq->tx_ring_size)
+			txq->tx_curr_desc = 0;
+		desc = &txq->tx_desc_area[tx_index];
+		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
+
+		/*
+		 * The last fragment will generate an interrupt
+		 * which will free the skb on TX completion.
+		 */
+		if (frag == nr_frags - 1) {
+			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
+					ZERO_PADDING | TX_LAST_DESC |
+					TX_ENABLE_INTERRUPT;
+		} else {
+			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
+		}
+
+		desc->l4i_chk = 0;
+		desc->byte_cnt = skb_frag_size(this_frag);
+		desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+						 this_frag, 0, desc->byte_cnt,
+						 DMA_TO_DEVICE);
+	}
+}
+
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
+			  struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	int tx_index;
+	struct tx_desc *desc;
+	u32 cmd_sts;
+	u16 l4i_chk;
+	int length, ret;
+
+	cmd_sts = 0;
+	l4i_chk = 0;
+
+	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+		if (net_ratelimit())
+			netdev_err(dev, "tx queue full?!\n");
+		return -EBUSY;
+	}
+
+	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+	if (ret)
+		return ret;
+	cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+
+	tx_index = txq->tx_curr_desc++;
+	if (txq->tx_curr_desc == txq->tx_ring_size)
+		txq->tx_curr_desc = 0;
+	desc = &txq->tx_desc_area[tx_index];
+	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+
+	if (nr_frags) {
+		txq_submit_frag_skb(txq, skb);
+		length = skb_headlen(skb);
+	} else {
+		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
+		length = skb->len;
+	}
+
+	desc->l4i_chk = l4i_chk;
+	desc->byte_cnt = length;
+	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+				       length, DMA_TO_DEVICE);
+
+	__skb_queue_tail(&txq->tx_skb, skb);
+
+	skb_tx_timestamp(skb);
+
+	/* ensure all other descriptors are written before first cmd_sts */
+	wmb();
+	desc->cmd_sts = cmd_sts;
+
+	/* clear TX_END status */
+	mp->work_tx_end &= ~(1 << txq->index);
+
+	/* ensure all descriptors are written before poking hardware */
+	wmb();
+	txq_enable(txq);
+
+	txq->tx_desc_count += nr_frags + 1;
+
+	return 0;
+}
+
+static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int length, queue, ret;
+	struct tx_queue *txq;
+	struct netdev_queue *nq;
+
+	queue = skb_get_queue_mapping(skb);
+	txq = mp->txq + queue;
+	nq = netdev_get_tx_queue(dev, queue);
+
+	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
+		netdev_printk(KERN_DEBUG, dev,
+			      "failed to linearize skb with tiny unaligned fragment\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	length = skb->len;
+
+	if (skb_is_gso(skb))
+		ret = txq_submit_tso(txq, skb, dev);
+	else
+		ret = txq_submit_skb(txq, skb, dev);
+	if (!ret) {
+		txq->tx_bytes += length;
+		txq->tx_packets++;
+
+		if (txq->tx_desc_count >= txq->tx_stop_threshold)
+			netif_tx_stop_queue(nq);
+	} else {
+		txq->tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+
+/* tx napi ******************************************************************/
+static void txq_kick(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+	u32 hw_desc_ptr;
+	u32 expected_ptr;
+
+	__netif_tx_lock(nq, smp_processor_id());
+
+	if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
+		goto out;
+
+	hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
+	expected_ptr = (u32)txq->tx_desc_dma +
+				txq->tx_curr_desc * sizeof(struct tx_desc);
+
+	if (hw_desc_ptr != expected_ptr)
+		txq_enable(txq);
+
+out:
+	__netif_tx_unlock(nq);
+
+	mp->work_tx_end &= ~(1 << txq->index);
+}
+
+static int txq_reclaim(struct tx_queue *txq, int budget, int force)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+	int reclaimed;
+
+	__netif_tx_lock_bh(nq);
+
+	reclaimed = 0;
+	while (reclaimed < budget && txq->tx_desc_count > 0) {
+		int tx_index;
+		struct tx_desc *desc;
+		u32 cmd_sts;
+		char desc_dma_map;
+
+		tx_index = txq->tx_used_desc;
+		desc = &txq->tx_desc_area[tx_index];
+		desc_dma_map = txq->tx_desc_mapping[tx_index];
+
+		cmd_sts = desc->cmd_sts;
+
+		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
+			if (!force)
+				break;
+			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
+		}
+
+		txq->tx_used_desc = tx_index + 1;
+		if (txq->tx_used_desc == txq->tx_ring_size)
+			txq->tx_used_desc = 0;
+
+		reclaimed++;
+		txq->tx_desc_count--;
+
+		if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+			if (desc_dma_map == DESC_DMA_MAP_PAGE)
+				dma_unmap_page(mp->dev->dev.parent,
+					       desc->buf_ptr,
+					       desc->byte_cnt,
+					       DMA_TO_DEVICE);
+			else
+				dma_unmap_single(mp->dev->dev.parent,
+						 desc->buf_ptr,
+						 desc->byte_cnt,
+						 DMA_TO_DEVICE);
+		}
+
+		if (cmd_sts & TX_ENABLE_INTERRUPT) {
+			struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+			if (!WARN_ON(!skb))
+				dev_kfree_skb(skb);
+		}
+
+		if (cmd_sts & ERROR_SUMMARY) {
+			netdev_info(mp->dev, "tx error\n");
+			mp->dev->stats.tx_errors++;
+		}
+
+	}
+
+	__netif_tx_unlock_bh(nq);
+
+	if (reclaimed < budget)
+		mp->work_tx &= ~(1 << txq->index);
+
+	return reclaimed;
+}
+
+
+/* tx rate control **********************************************************/
+/*
+ * Set total maximum TX rate (shared by all TX queues for this port)
+ * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
+ */
+static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
+{
+	int token_rate;
+	int mtu;
+	int bucket_size;
+
+	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
+	if (token_rate > 1023)
+		token_rate = 1023;
+
+	mtu = (mp->dev->mtu + 255) >> 8;
+	if (mtu > 63)
+		mtu = 63;
+
+	bucket_size = (burst + 255) >> 8;
+	if (bucket_size > 65535)
+		bucket_size = 65535;
+
+	switch (mp->shared->tx_bw_control) {
+	case TX_BW_CONTROL_OLD_LAYOUT:
+		wrlp(mp, TX_BW_RATE, token_rate);
+		wrlp(mp, TX_BW_MTU, mtu);
+		wrlp(mp, TX_BW_BURST, bucket_size);
+		break;
+	case TX_BW_CONTROL_NEW_LAYOUT:
+		wrlp(mp, TX_BW_RATE_MOVED, token_rate);
+		wrlp(mp, TX_BW_MTU_MOVED, mtu);
+		wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
+		break;
+	}
+}
+
+static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	int token_rate;
+	int bucket_size;
+
+	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
+	if (token_rate > 1023)
+		token_rate = 1023;
+
+	bucket_size = (burst + 255) >> 8;
+	if (bucket_size > 65535)
+		bucket_size = 65535;
+
+	wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
+	wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
+}
+
+static void txq_set_fixed_prio_mode(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+	int off;
+	u32 val;
+
+	/*
+	 * Turn on fixed priority mode.
+	 */
+	off = 0;
+	switch (mp->shared->tx_bw_control) {
+	case TX_BW_CONTROL_OLD_LAYOUT:
+		off = TXQ_FIX_PRIO_CONF;
+		break;
+	case TX_BW_CONTROL_NEW_LAYOUT:
+		off = TXQ_FIX_PRIO_CONF_MOVED;
+		break;
+	}
+
+	if (off) {
+		val = rdlp(mp, off);
+		val |= 1 << txq->index;
+		wrlp(mp, off, val);
+	}
+}
+
+
+/* mii management interface *************************************************/
+static void mv643xx_eth_adjust_link(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+	u32 autoneg_disable = FORCE_LINK_PASS |
+	             DISABLE_AUTO_NEG_SPEED_GMII |
+		     DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
+		     DISABLE_AUTO_NEG_FOR_DUPLEX;
+
+	if (mp->phy->autoneg == AUTONEG_ENABLE) {
+		/* enable auto negotiation */
+		pscr &= ~autoneg_disable;
+		goto out_write;
+	}
+
+	pscr |= autoneg_disable;
+
+	if (mp->phy->speed == SPEED_1000) {
+		/* force gigabit, half duplex not supported */
+		pscr |= SET_GMII_SPEED_TO_1000;
+		pscr |= SET_FULL_DUPLEX_MODE;
+		goto out_write;
+	}
+
+	pscr &= ~SET_GMII_SPEED_TO_1000;
+
+	if (mp->phy->speed == SPEED_100)
+		pscr |= SET_MII_SPEED_TO_100;
+	else
+		pscr &= ~SET_MII_SPEED_TO_100;
+
+	if (mp->phy->duplex == DUPLEX_FULL)
+		pscr |= SET_FULL_DUPLEX_MODE;
+	else
+		pscr &= ~SET_FULL_DUPLEX_MODE;
+
+out_write:
+	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+}
+
+/* statistics ***************************************************************/
+static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	unsigned long tx_packets = 0;
+	unsigned long tx_bytes = 0;
+	unsigned long tx_dropped = 0;
+	int i;
+
+	for (i = 0; i < mp->txq_count; i++) {
+		struct tx_queue *txq = mp->txq + i;
+
+		tx_packets += txq->tx_packets;
+		tx_bytes += txq->tx_bytes;
+		tx_dropped += txq->tx_dropped;
+	}
+
+	stats->tx_packets = tx_packets;
+	stats->tx_bytes = tx_bytes;
+	stats->tx_dropped = tx_dropped;
+
+	return stats;
+}
+
+static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
+{
+	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
+}
+
+static void mib_counters_clear(struct mv643xx_eth_private *mp)
+{
+	int i;
+
+	for (i = 0; i < 0x80; i += 4)
+		mib_read(mp, i);
+
+	/* Clear non MIB hw counters also */
+	rdlp(mp, RX_DISCARD_FRAME_CNT);
+	rdlp(mp, RX_OVERRUN_FRAME_CNT);
+}
+
+static void mib_counters_update(struct mv643xx_eth_private *mp)
+{
+	struct mib_counters *p = &mp->mib_counters;
+
+	spin_lock_bh(&mp->mib_counters_lock);
+	p->good_octets_received += mib_read(mp, 0x00);
+	p->bad_octets_received += mib_read(mp, 0x08);
+	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
+	p->good_frames_received += mib_read(mp, 0x10);
+	p->bad_frames_received += mib_read(mp, 0x14);
+	p->broadcast_frames_received += mib_read(mp, 0x18);
+	p->multicast_frames_received += mib_read(mp, 0x1c);
+	p->frames_64_octets += mib_read(mp, 0x20);
+	p->frames_65_to_127_octets += mib_read(mp, 0x24);
+	p->frames_128_to_255_octets += mib_read(mp, 0x28);
+	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
+	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
+	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
+	p->good_octets_sent += mib_read(mp, 0x38);
+	p->good_frames_sent += mib_read(mp, 0x40);
+	p->excessive_collision += mib_read(mp, 0x44);
+	p->multicast_frames_sent += mib_read(mp, 0x48);
+	p->broadcast_frames_sent += mib_read(mp, 0x4c);
+	p->unrec_mac_control_received += mib_read(mp, 0x50);
+	p->fc_sent += mib_read(mp, 0x54);
+	p->good_fc_received += mib_read(mp, 0x58);
+	p->bad_fc_received += mib_read(mp, 0x5c);
+	p->undersize_received += mib_read(mp, 0x60);
+	p->fragments_received += mib_read(mp, 0x64);
+	p->oversize_received += mib_read(mp, 0x68);
+	p->jabber_received += mib_read(mp, 0x6c);
+	p->mac_receive_error += mib_read(mp, 0x70);
+	p->bad_crc_event += mib_read(mp, 0x74);
+	p->collision += mib_read(mp, 0x78);
+	p->late_collision += mib_read(mp, 0x7c);
+	/* Non MIB hardware counters */
+	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
+	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
+	spin_unlock_bh(&mp->mib_counters_lock);
+}
+
+static void mib_counters_timer_wrapper(unsigned long _mp)
+{
+	struct mv643xx_eth_private *mp = (void *)_mp;
+	mib_counters_update(mp);
+	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
+}
+
+
+/* interrupt coalescing *****************************************************/
+/*
+ * Hardware coalescing parameters are set in units of 64 t_clk
+ * cycles.  I.e.:
+ *
+ *	coal_delay_in_usec = 64000000 * register_value / t_clk_rate
+ *
+ *	register_value = coal_delay_in_usec * t_clk_rate / 64000000
+ *
+ * In the ->set*() methods, we round the computed register value
+ * to the nearest integer.
+ */
+static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
+{
+	u32 val = rdlp(mp, SDMA_CONFIG);
+	u64 temp;
+
+	if (mp->shared->extended_rx_coal_limit)
+		temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
+	else
+		temp = (val & 0x003fff00) >> 8;
+
+	temp *= 64000000;
+	do_div(temp, mp->t_clk);
+
+	return (unsigned int)temp;
+}
+
+static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
+{
+	u64 temp;
+	u32 val;
+
+	temp = (u64)usec * mp->t_clk;
+	temp += 31999999;
+	do_div(temp, 64000000);
+
+	val = rdlp(mp, SDMA_CONFIG);
+	if (mp->shared->extended_rx_coal_limit) {
+		if (temp > 0xffff)
+			temp = 0xffff;
+		val &= ~0x023fff80;
+		val |= (temp & 0x8000) << 10;
+		val |= (temp & 0x7fff) << 7;
+	} else {
+		if (temp > 0x3fff)
+			temp = 0x3fff;
+		val &= ~0x003fff00;
+		val |= (temp & 0x3fff) << 8;
+	}
+	wrlp(mp, SDMA_CONFIG, val);
+}
+
+static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
+{
+	u64 temp;
+
+	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
+	temp *= 64000000;
+	do_div(temp, mp->t_clk);
+
+	return (unsigned int)temp;
+}
+
+static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
+{
+	u64 temp;
+
+	temp = (u64)usec * mp->t_clk;
+	temp += 31999999;
+	do_div(temp, 64000000);
+
+	if (temp > 0x3fff)
+		temp = 0x3fff;
+
+	wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
+}
+
+
+/* ethtool ******************************************************************/
+struct mv643xx_eth_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int netdev_off;
+	int mp_off;
+};
+
+#define SSTAT(m)						\
+	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
+	  offsetof(struct net_device, stats.m), -1 }
+
+#define MIBSTAT(m)						\
+	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
+	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
+
+static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
+	SSTAT(rx_packets),
+	SSTAT(tx_packets),
+	SSTAT(rx_bytes),
+	SSTAT(tx_bytes),
+	SSTAT(rx_errors),
+	SSTAT(tx_errors),
+	SSTAT(rx_dropped),
+	SSTAT(tx_dropped),
+	MIBSTAT(good_octets_received),
+	MIBSTAT(bad_octets_received),
+	MIBSTAT(internal_mac_transmit_err),
+	MIBSTAT(good_frames_received),
+	MIBSTAT(bad_frames_received),
+	MIBSTAT(broadcast_frames_received),
+	MIBSTAT(multicast_frames_received),
+	MIBSTAT(frames_64_octets),
+	MIBSTAT(frames_65_to_127_octets),
+	MIBSTAT(frames_128_to_255_octets),
+	MIBSTAT(frames_256_to_511_octets),
+	MIBSTAT(frames_512_to_1023_octets),
+	MIBSTAT(frames_1024_to_max_octets),
+	MIBSTAT(good_octets_sent),
+	MIBSTAT(good_frames_sent),
+	MIBSTAT(excessive_collision),
+	MIBSTAT(multicast_frames_sent),
+	MIBSTAT(broadcast_frames_sent),
+	MIBSTAT(unrec_mac_control_received),
+	MIBSTAT(fc_sent),
+	MIBSTAT(good_fc_received),
+	MIBSTAT(bad_fc_received),
+	MIBSTAT(undersize_received),
+	MIBSTAT(fragments_received),
+	MIBSTAT(oversize_received),
+	MIBSTAT(jabber_received),
+	MIBSTAT(mac_receive_error),
+	MIBSTAT(bad_crc_event),
+	MIBSTAT(collision),
+	MIBSTAT(late_collision),
+	MIBSTAT(rx_discard),
+	MIBSTAT(rx_overrun),
+};
+
+static int
+mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
+			     struct ethtool_cmd *cmd)
+{
+	int err;
+
+	err = phy_read_status(mp->phy);
+	if (err == 0)
+		err = phy_ethtool_gset(mp->phy, cmd);
+
+	/*
+	 * The MAC does not support 1000baseT_Half.
+	 */
+	cmd->supported &= ~SUPPORTED_1000baseT_Half;
+	cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+
+	return err;
+}
+
+static int
+mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
+				 struct ethtool_cmd *cmd)
+{
+	u32 port_status;
+
+	port_status = rdlp(mp, PORT_STATUS);
+
+	cmd->supported = SUPPORTED_MII;
+	cmd->advertising = ADVERTISED_MII;
+	switch (port_status & PORT_SPEED_MASK) {
+	case PORT_SPEED_10:
+		ethtool_cmd_speed_set(cmd, SPEED_10);
+		break;
+	case PORT_SPEED_100:
+		ethtool_cmd_speed_set(cmd, SPEED_100);
+		break;
+	case PORT_SPEED_1000:
+		ethtool_cmd_speed_set(cmd, SPEED_1000);
+		break;
+	default:
+		cmd->speed = -1;
+		break;
+	}
+	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+	cmd->port = PORT_MII;
+	cmd->phy_address = 0;
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->autoneg = AUTONEG_DISABLE;
+	cmd->maxtxpkt = 1;
+	cmd->maxrxpkt = 1;
+
+	return 0;
+}
+
+static void
+mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	wol->supported = 0;
+	wol->wolopts = 0;
+	if (mp->phy)
+		phy_ethtool_get_wol(mp->phy, wol);
+}
+
+static int
+mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int err;
+
+	if (mp->phy == NULL)
+		return -EOPNOTSUPP;
+
+	err = phy_ethtool_set_wol(mp->phy, wol);
+	/* Given that mv643xx_eth works without the marvell-specific PHY driver,
+	 * this debugging hint is useful to have.
+	 */
+	if (err == -EOPNOTSUPP)
+		netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
+	return err;
+}
+
+static int
+mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	if (mp->phy != NULL)
+		return mv643xx_eth_get_settings_phy(mp, cmd);
+	else
+		return mv643xx_eth_get_settings_phyless(mp, cmd);
+}
+
+static int
+mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int ret;
+
+	if (mp->phy == NULL)
+		return -EINVAL;
+
+	/*
+	 * The MAC does not support 1000baseT_Half.
+	 */
+	cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+
+	ret = phy_ethtool_sset(mp->phy, cmd);
+	if (!ret)
+		mv643xx_eth_adjust_link(dev);
+	return ret;
+}
+
+static void mv643xx_eth_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+}
+
+static int mv643xx_eth_nway_reset(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	if (mp->phy == NULL)
+		return -EINVAL;
+
+	return genphy_restart_aneg(mp->phy);
+}
+
+static int
+mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	ec->rx_coalesce_usecs = get_rx_coal(mp);
+	ec->tx_coalesce_usecs = get_tx_coal(mp);
+
+	return 0;
+}
+
+static int
+mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	set_rx_coal(mp, ec->rx_coalesce_usecs);
+	set_tx_coal(mp, ec->tx_coalesce_usecs);
+
+	return 0;
+}
+
+static void
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	er->rx_max_pending = 4096;
+	er->tx_max_pending = 4096;
+
+	er->rx_pending = mp->rx_ring_size;
+	er->tx_pending = mp->tx_ring_size;
+}
+
+static int
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	if (er->rx_mini_pending || er->rx_jumbo_pending)
+		return -EINVAL;
+
+	mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
+	mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
+				   MV643XX_MAX_SKB_DESCS * 2, 4096);
+	if (mp->tx_ring_size != er->tx_pending)
+		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+			    mp->tx_ring_size, er->tx_pending);
+
+	if (netif_running(dev)) {
+		mv643xx_eth_stop(dev);
+		if (mv643xx_eth_open(dev)) {
+			netdev_err(dev,
+				   "fatal error on re-opening device after ring param change\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+
+static int
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	bool rx_csum = features & NETIF_F_RXCSUM;
+
+	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
+
+	return 0;
+}
+
+static void mv643xx_eth_get_strings(struct net_device *dev,
+				    uint32_t stringset, uint8_t *data)
+{
+	int i;
+
+	if (stringset == ETH_SS_STATS) {
+		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+				mv643xx_eth_stats[i].stat_string,
+				ETH_GSTRING_LEN);
+		}
+	}
+}
+
+static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
+					  struct ethtool_stats *stats,
+					  uint64_t *data)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int i;
+
+	mv643xx_eth_get_stats(dev);
+	mib_counters_update(mp);
+
+	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
+		const struct mv643xx_eth_stats *stat;
+		void *p;
+
+		stat = mv643xx_eth_stats + i;
+
+		if (stat->netdev_off >= 0)
+			p = ((void *)mp->dev) + stat->netdev_off;
+		else
+			p = ((void *)mp) + stat->mp_off;
+
+		data[i] = (stat->sizeof_stat == 8) ?
+				*(uint64_t *)p : *(uint32_t *)p;
+	}
+}
+
+static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(mv643xx_eth_stats);
+
+	return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
+	.get_settings		= mv643xx_eth_get_settings,
+	.set_settings		= mv643xx_eth_set_settings,
+	.get_drvinfo		= mv643xx_eth_get_drvinfo,
+	.nway_reset		= mv643xx_eth_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_coalesce		= mv643xx_eth_get_coalesce,
+	.set_coalesce		= mv643xx_eth_set_coalesce,
+	.get_ringparam		= mv643xx_eth_get_ringparam,
+	.set_ringparam		= mv643xx_eth_set_ringparam,
+	.get_strings		= mv643xx_eth_get_strings,
+	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
+	.get_sset_count		= mv643xx_eth_get_sset_count,
+	.get_ts_info		= ethtool_op_get_ts_info,
+	.get_wol                = mv643xx_eth_get_wol,
+	.set_wol                = mv643xx_eth_set_wol,
+};
+
+
+/* address handling *********************************************************/
+static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
+{
+	unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
+	unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
+
+	addr[0] = (mac_h >> 24) & 0xff;
+	addr[1] = (mac_h >> 16) & 0xff;
+	addr[2] = (mac_h >> 8) & 0xff;
+	addr[3] = mac_h & 0xff;
+	addr[4] = (mac_l >> 8) & 0xff;
+	addr[5] = mac_l & 0xff;
+}
+
+static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
+{
+	wrlp(mp, MAC_ADDR_HIGH,
+		(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
+	wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
+}
+
+static u32 uc_addr_filter_mask(struct net_device *dev)
+{
+	struct netdev_hw_addr *ha;
+	u32 nibbles;
+
+	if (dev->flags & IFF_PROMISC)
+		return 0;
+
+	nibbles = 1 << (dev->dev_addr[5] & 0x0f);
+	netdev_for_each_uc_addr(ha, dev) {
+		if (memcmp(dev->dev_addr, ha->addr, 5))
+			return 0;
+		if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
+			return 0;
+
+		nibbles |= 1 << (ha->addr[5] & 0x0f);
+	}
+
+	return nibbles;
+}
+
+static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	u32 port_config;
+	u32 nibbles;
+	int i;
+
+	uc_addr_set(mp, dev->dev_addr);
+
+	port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
+	nibbles = uc_addr_filter_mask(dev);
+	if (!nibbles) {
+		port_config |= UNICAST_PROMISCUOUS_MODE;
+		nibbles = 0xffff;
+	}
+
+	for (i = 0; i < 16; i += 4) {
+		int off = UNICAST_TABLE(mp->port_num) + i;
+		u32 v;
+
+		v = 0;
+		if (nibbles & 1)
+			v |= 0x00000001;
+		if (nibbles & 2)
+			v |= 0x00000100;
+		if (nibbles & 4)
+			v |= 0x00010000;
+		if (nibbles & 8)
+			v |= 0x01000000;
+		nibbles >>= 4;
+
+		wrl(mp, off, v);
+	}
+
+	wrlp(mp, PORT_CONFIG, port_config);
+}
+
+static int addr_crc(unsigned char *addr)
+{
+	int crc = 0;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		int j;
+
+		crc = (crc ^ addr[i]) << 8;
+		for (j = 7; j >= 0; j--) {
+			if (crc & (0x100 << j))
+				crc ^= 0x107 << j;
+		}
+	}
+
+	return crc;
+}
+
+static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	u32 *mc_spec;
+	u32 *mc_other;
+	struct netdev_hw_addr *ha;
+	int i;
+
+	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+		goto promiscuous;
+
+	/* Allocate both mc_spec and mc_other tables */
+	mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
+	if (!mc_spec)
+		goto promiscuous;
+	mc_other = &mc_spec[64];
+
+	netdev_for_each_mc_addr(ha, dev) {
+		u8 *a = ha->addr;
+		u32 *table;
+		u8 entry;
+
+		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
+			table = mc_spec;
+			entry = a[5];
+		} else {
+			table = mc_other;
+			entry = addr_crc(a);
+		}
+
+		table[entry >> 2] |= 1 << (8 * (entry & 3));
+	}
+
+	for (i = 0; i < 64; i++) {
+		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+		    mc_spec[i]);
+		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+		    mc_other[i]);
+	}
+
+	kfree(mc_spec);
+	return;
+
+promiscuous:
+	for (i = 0; i < 64; i++) {
+		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+		    0x01010101u);
+		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+		    0x01010101u);
+	}
+}
+
+static void mv643xx_eth_set_rx_mode(struct net_device *dev)
+{
+	mv643xx_eth_program_unicast_filter(dev);
+	mv643xx_eth_program_multicast_filter(dev);
+}
+
+static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *sa = addr;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+
+	netif_addr_lock_bh(dev);
+	mv643xx_eth_program_unicast_filter(dev);
+	netif_addr_unlock_bh(dev);
+
+	return 0;
+}
+
+
+/* rx/tx queue initialisation ***********************************************/
+static int rxq_init(struct mv643xx_eth_private *mp, int index)
+{
+	struct rx_queue *rxq = mp->rxq + index;
+	struct rx_desc *rx_desc;
+	int size;
+	int i;
+
+	rxq->index = index;
+
+	rxq->rx_ring_size = mp->rx_ring_size;
+
+	rxq->rx_desc_count = 0;
+	rxq->rx_curr_desc = 0;
+	rxq->rx_used_desc = 0;
+
+	size = rxq->rx_ring_size * sizeof(struct rx_desc);
+
+	if (index == 0 && size <= mp->rx_desc_sram_size) {
+		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
+						mp->rx_desc_sram_size);
+		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
+	} else {
+		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+						       size, &rxq->rx_desc_dma,
+						       GFP_KERNEL);
+	}
+
+	if (rxq->rx_desc_area == NULL) {
+		netdev_err(mp->dev,
+			   "can't allocate rx ring (%d bytes)\n", size);
+		goto out;
+	}
+	memset(rxq->rx_desc_area, 0, size);
+
+	rxq->rx_desc_area_size = size;
+	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+				    GFP_KERNEL);
+	if (rxq->rx_skb == NULL)
+		goto out_free;
+
+	rx_desc = rxq->rx_desc_area;
+	for (i = 0; i < rxq->rx_ring_size; i++) {
+		int nexti;
+
+		nexti = i + 1;
+		if (nexti == rxq->rx_ring_size)
+			nexti = 0;
+
+		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
+					nexti * sizeof(struct rx_desc);
+	}
+
+	return 0;
+
+
+out_free:
+	if (index == 0 && size <= mp->rx_desc_sram_size)
+		iounmap(rxq->rx_desc_area);
+	else
+		dma_free_coherent(mp->dev->dev.parent, size,
+				  rxq->rx_desc_area,
+				  rxq->rx_desc_dma);
+
+out:
+	return -ENOMEM;
+}
+
+static void rxq_deinit(struct rx_queue *rxq)
+{
+	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+	int i;
+
+	rxq_disable(rxq);
+
+	for (i = 0; i < rxq->rx_ring_size; i++) {
+		if (rxq->rx_skb[i]) {
+			dev_kfree_skb(rxq->rx_skb[i]);
+			rxq->rx_desc_count--;
+		}
+	}
+
+	if (rxq->rx_desc_count) {
+		netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
+			   rxq->rx_desc_count);
+	}
+
+	if (rxq->index == 0 &&
+	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
+		iounmap(rxq->rx_desc_area);
+	else
+		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
+				  rxq->rx_desc_area, rxq->rx_desc_dma);
+
+	kfree(rxq->rx_skb);
+}
+
+static int txq_init(struct mv643xx_eth_private *mp, int index)
+{
+	struct tx_queue *txq = mp->txq + index;
+	struct tx_desc *tx_desc;
+	int size;
+	int ret;
+	int i;
+
+	txq->index = index;
+
+	txq->tx_ring_size = mp->tx_ring_size;
+
+	/* A queue must always have room for at least one skb.
+	 * Therefore, stop the queue when the free entries reaches
+	 * the maximum number of descriptors per skb.
+	 */
+	txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
+	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+	txq->tx_desc_count = 0;
+	txq->tx_curr_desc = 0;
+	txq->tx_used_desc = 0;
+
+	size = txq->tx_ring_size * sizeof(struct tx_desc);
+
+	if (index == 0 && size <= mp->tx_desc_sram_size) {
+		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
+						mp->tx_desc_sram_size);
+		txq->tx_desc_dma = mp->tx_desc_sram_addr;
+	} else {
+		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+						       size, &txq->tx_desc_dma,
+						       GFP_KERNEL);
+	}
+
+	if (txq->tx_desc_area == NULL) {
+		netdev_err(mp->dev,
+			   "can't allocate tx ring (%d bytes)\n", size);
+		return -ENOMEM;
+	}
+	memset(txq->tx_desc_area, 0, size);
+
+	txq->tx_desc_area_size = size;
+
+	tx_desc = txq->tx_desc_area;
+	for (i = 0; i < txq->tx_ring_size; i++) {
+		struct tx_desc *txd = tx_desc + i;
+		int nexti;
+
+		nexti = i + 1;
+		if (nexti == txq->tx_ring_size)
+			nexti = 0;
+
+		txd->cmd_sts = 0;
+		txd->next_desc_ptr = txq->tx_desc_dma +
+					nexti * sizeof(struct tx_desc);
+	}
+
+	txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+				       GFP_KERNEL);
+	if (!txq->tx_desc_mapping) {
+		ret = -ENOMEM;
+		goto err_free_desc_area;
+	}
+
+	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+	txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+					   txq->tx_ring_size * TSO_HEADER_SIZE,
+					   &txq->tso_hdrs_dma, GFP_KERNEL);
+	if (txq->tso_hdrs == NULL) {
+		ret = -ENOMEM;
+		goto err_free_desc_mapping;
+	}
+	skb_queue_head_init(&txq->tx_skb);
+
+	return 0;
+
+err_free_desc_mapping:
+	kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+	if (index == 0 && size <= mp->tx_desc_sram_size)
+		iounmap(txq->tx_desc_area);
+	else
+		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+				  txq->tx_desc_area, txq->tx_desc_dma);
+	return ret;
+}
+
+static void txq_deinit(struct tx_queue *txq)
+{
+	struct mv643xx_eth_private *mp = txq_to_mp(txq);
+
+	txq_disable(txq);
+	txq_reclaim(txq, txq->tx_ring_size, 1);
+
+	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
+
+	if (txq->index == 0 &&
+	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
+		iounmap(txq->tx_desc_area);
+	else
+		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+				  txq->tx_desc_area, txq->tx_desc_dma);
+	kfree(txq->tx_desc_mapping);
+
+	if (txq->tso_hdrs)
+		dma_free_coherent(mp->dev->dev.parent,
+				  txq->tx_ring_size * TSO_HEADER_SIZE,
+				  txq->tso_hdrs, txq->tso_hdrs_dma);
+}
+
+
+/* netdev ops and related ***************************************************/
+static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
+{
+	u32 int_cause;
+	u32 int_cause_ext;
+
+	int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
+	if (int_cause == 0)
+		return 0;
+
+	int_cause_ext = 0;
+	if (int_cause & INT_EXT) {
+		int_cause &= ~INT_EXT;
+		int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
+	}
+
+	if (int_cause) {
+		wrlp(mp, INT_CAUSE, ~int_cause);
+		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
+				~(rdlp(mp, TXQ_COMMAND) & 0xff);
+		mp->work_rx |= (int_cause & INT_RX) >> 2;
+	}
+
+	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
+	if (int_cause_ext) {
+		wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
+		if (int_cause_ext & INT_EXT_LINK_PHY)
+			mp->work_link = 1;
+		mp->work_tx |= int_cause_ext & INT_EXT_TX;
+	}
+
+	return 1;
+}
+
+static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	if (unlikely(!mv643xx_eth_collect_events(mp)))
+		return IRQ_NONE;
+
+	wrlp(mp, INT_MASK, 0);
+	napi_schedule(&mp->napi);
+
+	return IRQ_HANDLED;
+}
+
+static void handle_link_event(struct mv643xx_eth_private *mp)
+{
+	struct net_device *dev = mp->dev;
+	u32 port_status;
+	int speed;
+	int duplex;
+	int fc;
+
+	port_status = rdlp(mp, PORT_STATUS);
+	if (!(port_status & LINK_UP)) {
+		if (netif_carrier_ok(dev)) {
+			int i;
+
+			netdev_info(dev, "link down\n");
+
+			netif_carrier_off(dev);
+
+			for (i = 0; i < mp->txq_count; i++) {
+				struct tx_queue *txq = mp->txq + i;
+
+				txq_reclaim(txq, txq->tx_ring_size, 1);
+				txq_reset_hw_ptr(txq);
+			}
+		}
+		return;
+	}
+
+	switch (port_status & PORT_SPEED_MASK) {
+	case PORT_SPEED_10:
+		speed = 10;
+		break;
+	case PORT_SPEED_100:
+		speed = 100;
+		break;
+	case PORT_SPEED_1000:
+		speed = 1000;
+		break;
+	default:
+		speed = -1;
+		break;
+	}
+	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+
+	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
+
+	if (!netif_carrier_ok(dev))
+		netif_carrier_on(dev);
+}
+
+static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
+{
+	struct mv643xx_eth_private *mp;
+	int work_done;
+
+	mp = container_of(napi, struct mv643xx_eth_private, napi);
+
+	if (unlikely(mp->oom)) {
+		mp->oom = 0;
+		del_timer(&mp->rx_oom);
+	}
+
+	work_done = 0;
+	while (work_done < budget) {
+		u8 queue_mask;
+		int queue;
+		int work_tbd;
+
+		if (mp->work_link) {
+			mp->work_link = 0;
+			handle_link_event(mp);
+			work_done++;
+			continue;
+		}
+
+		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
+		if (likely(!mp->oom))
+			queue_mask |= mp->work_rx_refill;
+
+		if (!queue_mask) {
+			if (mv643xx_eth_collect_events(mp))
+				continue;
+			break;
+		}
+
+		queue = fls(queue_mask) - 1;
+		queue_mask = 1 << queue;
+
+		work_tbd = budget - work_done;
+		if (work_tbd > 16)
+			work_tbd = 16;
+
+		if (mp->work_tx_end & queue_mask) {
+			txq_kick(mp->txq + queue);
+		} else if (mp->work_tx & queue_mask) {
+			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
+			txq_maybe_wake(mp->txq + queue);
+		} else if (mp->work_rx & queue_mask) {
+			work_done += rxq_process(mp->rxq + queue, work_tbd);
+		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
+			work_done += rxq_refill(mp->rxq + queue, work_tbd);
+		} else {
+			BUG();
+		}
+	}
+
+	if (work_done < budget) {
+		if (mp->oom)
+			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
+		napi_complete(napi);
+		wrlp(mp, INT_MASK, mp->int_mask);
+	}
+
+	return work_done;
+}
+
+static inline void oom_timer_wrapper(unsigned long data)
+{
+	struct mv643xx_eth_private *mp = (void *)data;
+
+	napi_schedule(&mp->napi);
+}
+
+static void port_start(struct mv643xx_eth_private *mp)
+{
+	u32 pscr;
+	int i;
+
+	/*
+	 * Perform PHY reset, if there is a PHY.
+	 */
+	if (mp->phy != NULL) {
+		struct ethtool_cmd cmd;
+
+		mv643xx_eth_get_settings(mp->dev, &cmd);
+		phy_init_hw(mp->phy);
+		mv643xx_eth_set_settings(mp->dev, &cmd);
+		phy_start(mp->phy);
+	}
+
+	/*
+	 * Configure basic link parameters.
+	 */
+	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+
+	pscr |= SERIAL_PORT_ENABLE;
+	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+
+	pscr |= DO_NOT_FORCE_LINK_FAIL;
+	if (mp->phy == NULL)
+		pscr |= FORCE_LINK_PASS;
+	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+
+	/*
+	 * Configure TX path and queues.
+	 */
+	tx_set_rate(mp, 1000000000, 16777216);
+	for (i = 0; i < mp->txq_count; i++) {
+		struct tx_queue *txq = mp->txq + i;
+
+		txq_reset_hw_ptr(txq);
+		txq_set_rate(txq, 1000000000, 16777216);
+		txq_set_fixed_prio_mode(txq);
+	}
+
+	/*
+	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
+	 * frames to RX queue #0, and include the pseudo-header when
+	 * calculating receive checksums.
+	 */
+	mv643xx_eth_set_features(mp->dev, mp->dev->features);
+
+	/*
+	 * Treat BPDUs as normal multicasts, and disable partition mode.
+	 */
+	wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
+
+	/*
+	 * Add configured unicast addresses to address filter table.
+	 */
+	mv643xx_eth_program_unicast_filter(mp->dev);
+
+	/*
+	 * Enable the receive queues.
+	 */
+	for (i = 0; i < mp->rxq_count; i++) {
+		struct rx_queue *rxq = mp->rxq + i;
+		u32 addr;
+
+		addr = (u32)rxq->rx_desc_dma;
+		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
+		wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
+
+		rxq_enable(rxq);
+	}
+}
+
+static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
+{
+	int skb_size;
+
+	/*
+	 * Reserve 2+14 bytes for an ethernet header (the hardware
+	 * automatically prepends 2 bytes of dummy data to each
+	 * received packet), 16 bytes for up to four VLAN tags, and
+	 * 4 bytes for the trailing FCS -- 36 bytes total.
+	 */
+	skb_size = mp->dev->mtu + 36;
+
+	/*
+	 * Make sure that the skb size is a multiple of 8 bytes, as
+	 * the lower three bits of the receive descriptor's buffer
+	 * size field are ignored by the hardware.
+	 */
+	mp->skb_size = (skb_size + 7) & ~7;
+
+	/*
+	 * If NET_SKB_PAD is smaller than a cache line,
+	 * netdev_alloc_skb() will cause skb->data to be misaligned
+	 * to a cache line boundary.  If this is the case, include
+	 * some extra space to allow re-aligning the data area.
+	 */
+	mp->skb_size += SKB_DMA_REALIGN;
+}
+
+static int mv643xx_eth_open(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int err;
+	int i;
+
+	wrlp(mp, INT_CAUSE, 0);
+	wrlp(mp, INT_CAUSE_EXT, 0);
+	rdlp(mp, INT_CAUSE_EXT);
+
+	err = request_irq(dev->irq, mv643xx_eth_irq,
+			  IRQF_SHARED, dev->name, dev);
+	if (err) {
+		netdev_err(dev, "can't assign irq\n");
+		return -EAGAIN;
+	}
+
+	mv643xx_eth_recalc_skb_size(mp);
+
+	napi_enable(&mp->napi);
+
+	mp->int_mask = INT_EXT;
+
+	for (i = 0; i < mp->rxq_count; i++) {
+		err = rxq_init(mp, i);
+		if (err) {
+			while (--i >= 0)
+				rxq_deinit(mp->rxq + i);
+			goto out;
+		}
+
+		rxq_refill(mp->rxq + i, INT_MAX);
+		mp->int_mask |= INT_RX_0 << i;
+	}
+
+	if (mp->oom) {
+		mp->rx_oom.expires = jiffies + (HZ / 10);
+		add_timer(&mp->rx_oom);
+	}
+
+	for (i = 0; i < mp->txq_count; i++) {
+		err = txq_init(mp, i);
+		if (err) {
+			while (--i >= 0)
+				txq_deinit(mp->txq + i);
+			goto out_free;
+		}
+		mp->int_mask |= INT_TX_END_0 << i;
+	}
+
+	add_timer(&mp->mib_counters_timer);
+	port_start(mp);
+
+	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
+	wrlp(mp, INT_MASK, mp->int_mask);
+
+	return 0;
+
+
+out_free:
+	for (i = 0; i < mp->rxq_count; i++)
+		rxq_deinit(mp->rxq + i);
+out:
+	free_irq(dev->irq, dev);
+
+	return err;
+}
+
+static void port_reset(struct mv643xx_eth_private *mp)
+{
+	unsigned int data;
+	int i;
+
+	for (i = 0; i < mp->rxq_count; i++)
+		rxq_disable(mp->rxq + i);
+	for (i = 0; i < mp->txq_count; i++)
+		txq_disable(mp->txq + i);
+
+	while (1) {
+		u32 ps = rdlp(mp, PORT_STATUS);
+
+		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
+			break;
+		udelay(10);
+	}
+
+	/* Reset the Enable bit in the Configuration Register */
+	data = rdlp(mp, PORT_SERIAL_CONTROL);
+	data &= ~(SERIAL_PORT_ENABLE		|
+		  DO_NOT_FORCE_LINK_FAIL	|
+		  FORCE_LINK_PASS);
+	wrlp(mp, PORT_SERIAL_CONTROL, data);
+}
+
+static int mv643xx_eth_stop(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int i;
+
+	wrlp(mp, INT_MASK_EXT, 0x00000000);
+	wrlp(mp, INT_MASK, 0x00000000);
+	rdlp(mp, INT_MASK);
+
+	napi_disable(&mp->napi);
+
+	del_timer_sync(&mp->rx_oom);
+
+	netif_carrier_off(dev);
+	if (mp->phy)
+		phy_stop(mp->phy);
+	free_irq(dev->irq, dev);
+
+	port_reset(mp);
+	mv643xx_eth_get_stats(dev);
+	mib_counters_update(mp);
+	del_timer_sync(&mp->mib_counters_timer);
+
+	for (i = 0; i < mp->rxq_count; i++)
+		rxq_deinit(mp->rxq + i);
+	for (i = 0; i < mp->txq_count; i++)
+		txq_deinit(mp->txq + i);
+
+	return 0;
+}
+
+static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+	int ret;
+
+	if (mp->phy == NULL)
+		return -ENOTSUPP;
+
+	ret = phy_mii_ioctl(mp->phy, ifr, cmd);
+	if (!ret)
+		mv643xx_eth_adjust_link(dev);
+	return ret;
+}
+
+static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	if (new_mtu < 64 || new_mtu > 9500)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	mv643xx_eth_recalc_skb_size(mp);
+	tx_set_rate(mp, 1000000000, 16777216);
+
+	if (!netif_running(dev))
+		return 0;
+
+	/*
+	 * Stop and then re-open the interface. This will allocate RX
+	 * skbs of the new MTU.
+	 * There is a possible danger that the open will not succeed,
+	 * due to memory being full.
+	 */
+	mv643xx_eth_stop(dev);
+	if (mv643xx_eth_open(dev)) {
+		netdev_err(dev,
+			   "fatal error on re-opening device after MTU change\n");
+	}
+
+	return 0;
+}
+
+static void tx_timeout_task(struct work_struct *ugly)
+{
+	struct mv643xx_eth_private *mp;
+
+	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
+	if (netif_running(mp->dev)) {
+		netif_tx_stop_all_queues(mp->dev);
+		port_reset(mp);
+		port_start(mp);
+		netif_tx_wake_all_queues(mp->dev);
+	}
+}
+
+static void mv643xx_eth_tx_timeout(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	netdev_info(dev, "tx timeout\n");
+
+	schedule_work(&mp->tx_timeout_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mv643xx_eth_netpoll(struct net_device *dev)
+{
+	struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+	wrlp(mp, INT_MASK, 0x00000000);
+	rdlp(mp, INT_MASK);
+
+	mv643xx_eth_irq(dev->irq, dev);
+
+	wrlp(mp, INT_MASK, mp->int_mask);
+}
+#endif
+
+
+/* platform glue ************************************************************/
+static void
+mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
+			      const struct mbus_dram_target_info *dram)
+{
+	void __iomem *base = msp->base;
+	u32 win_enable;
+	u32 win_protect;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		writel(0, base + WINDOW_BASE(i));
+		writel(0, base + WINDOW_SIZE(i));
+		if (i < 4)
+			writel(0, base + WINDOW_REMAP_HIGH(i));
+	}
+
+	win_enable = 0x3f;
+	win_protect = 0;
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel((cs->base & 0xffff0000) |
+			(cs->mbus_attr << 8) |
+			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+		win_enable &= ~(1 << i);
+		win_protect |= 3 << (2 * i);
+	}
+
+	writel(win_enable, base + WINDOW_BAR_ENABLE);
+	msp->win_protect = win_protect;
+}
+
+static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
+{
+	/*
+	 * Check whether we have a 14-bit coal limit field in bits
+	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
+	 * SDMA config register.
+	 */
+	writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
+	if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
+		msp->extended_rx_coal_limit = 1;
+	else
+		msp->extended_rx_coal_limit = 0;
+
+	/*
+	 * Check whether the MAC supports TX rate control, and if
+	 * yes, whether its associated registers are in the old or
+	 * the new place.
+	 */
+	writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
+	if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
+		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
+	} else {
+		writel(7, msp->base + 0x0400 + TX_BW_RATE);
+		if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
+			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
+		else
+			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
+	}
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mv643xx_eth_shared_ids[] = {
+	{ .compatible = "marvell,orion-eth", },
+	{ .compatible = "marvell,kirkwood-eth", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
+#endif
+
+#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
+#define mv643xx_eth_property(_np, _name, _v)				\
+	do {								\
+		u32 tmp;						\
+		if (!of_property_read_u32(_np, "marvell," _name, &tmp))	\
+			_v = tmp;					\
+	} while (0)
+
+static struct platform_device *port_platdev[3];
+
+static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+					  struct device_node *pnp)
+{
+	struct platform_device *ppdev;
+	struct mv643xx_eth_platform_data ppd;
+	struct resource res;
+	const char *mac_addr;
+	int ret;
+	int dev_num = 0;
+
+	memset(&ppd, 0, sizeof(ppd));
+	ppd.shared = pdev;
+
+	memset(&res, 0, sizeof(res));
+	if (!of_irq_to_resource(pnp, 0, &res)) {
+		dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
+		dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
+		return -EINVAL;
+	}
+
+	if (ppd.port_number >= 3) {
+		dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
+		return -EINVAL;
+	}
+
+	while (dev_num < 3 && port_platdev[dev_num])
+		dev_num++;
+
+	if (dev_num == 3) {
+		dev_err(&pdev->dev, "too many ports registered\n");
+		return -EINVAL;
+	}
+
+	mac_addr = of_get_mac_address(pnp);
+	if (mac_addr)
+		memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
+
+	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
+	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
+	mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
+	mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
+	mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
+	mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
+
+	ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
+	if (!ppd.phy_node) {
+		ppd.phy_addr = MV643XX_ETH_PHY_NONE;
+		of_property_read_u32(pnp, "speed", &ppd.speed);
+		of_property_read_u32(pnp, "duplex", &ppd.duplex);
+	}
+
+	ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
+	if (!ppdev)
+		return -ENOMEM;
+	ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	ppdev->dev.of_node = pnp;
+
+	ret = platform_device_add_resources(ppdev, &res, 1);
+	if (ret)
+		goto port_err;
+
+	ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
+	if (ret)
+		goto port_err;
+
+	ret = platform_device_add(ppdev);
+	if (ret)
+		goto port_err;
+
+	port_platdev[dev_num] = ppdev;
+
+	return 0;
+
+port_err:
+	platform_device_put(ppdev);
+	return ret;
+}
+
+static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
+{
+	struct mv643xx_eth_shared_platform_data *pd;
+	struct device_node *pnp, *np = pdev->dev.of_node;
+	int ret;
+
+	/* bail out if not registered from DT */
+	if (!np)
+		return 0;
+
+	pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+	pdev->dev.platform_data = pd;
+
+	mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
+
+	for_each_available_child_of_node(np, pnp) {
+		ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
+		if (ret) {
+			of_node_put(pnp);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static void mv643xx_eth_shared_of_remove(void)
+{
+	int n;
+
+	for (n = 0; n < 3; n++) {
+		platform_device_del(port_platdev[n]);
+		port_platdev[n] = NULL;
+	}
+}
+#else
+static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void mv643xx_eth_shared_of_remove(void)
+{
+}
+#endif
+
+static int mv643xx_eth_shared_probe(struct platform_device *pdev)
+{
+	static int mv643xx_eth_version_printed;
+	struct mv643xx_eth_shared_platform_data *pd;
+	struct mv643xx_eth_shared_private *msp;
+	const struct mbus_dram_target_info *dram;
+	struct resource *res;
+	int ret;
+
+	if (!mv643xx_eth_version_printed++)
+		pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
+			  mv643xx_eth_driver_version);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+
+	msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
+	if (msp == NULL)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, msp);
+
+	msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (msp->base == NULL)
+		return -ENOMEM;
+
+	msp->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(msp->clk))
+		clk_prepare_enable(msp->clk);
+
+	/*
+	 * (Re-)program MBUS remapping windows if we are asked to.
+	 */
+	dram = mv_mbus_dram_info();
+	if (dram)
+		mv643xx_eth_conf_mbus_windows(msp, dram);
+
+	ret = mv643xx_eth_shared_of_probe(pdev);
+	if (ret)
+		return ret;
+	pd = dev_get_platdata(&pdev->dev);
+
+	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
+					pd->tx_csum_limit : 9 * 1024;
+	infer_hw_params(msp);
+
+	return 0;
+}
+
+static int mv643xx_eth_shared_remove(struct platform_device *pdev)
+{
+	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
+
+	mv643xx_eth_shared_of_remove();
+	if (!IS_ERR(msp->clk))
+		clk_disable_unprepare(msp->clk);
+	return 0;
+}
+
+static struct platform_driver mv643xx_eth_shared_driver = {
+	.probe		= mv643xx_eth_shared_probe,
+	.remove		= mv643xx_eth_shared_remove,
+	.driver = {
+		.name	= MV643XX_ETH_SHARED_NAME,
+		.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
+	},
+};
+
+static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
+{
+	int addr_shift = 5 * mp->port_num;
+	u32 data;
+
+	data = rdl(mp, PHY_ADDR);
+	data &= ~(0x1f << addr_shift);
+	data |= (phy_addr & 0x1f) << addr_shift;
+	wrl(mp, PHY_ADDR, data);
+}
+
+static int phy_addr_get(struct mv643xx_eth_private *mp)
+{
+	unsigned int data;
+
+	data = rdl(mp, PHY_ADDR);
+
+	return (data >> (5 * mp->port_num)) & 0x1f;
+}
+
+static void set_params(struct mv643xx_eth_private *mp,
+		       struct mv643xx_eth_platform_data *pd)
+{
+	struct net_device *dev = mp->dev;
+	unsigned int tx_ring_size;
+
+	if (is_valid_ether_addr(pd->mac_addr))
+		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+	else
+		uc_addr_get(mp, dev->dev_addr);
+
+	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
+	if (pd->rx_queue_size)
+		mp->rx_ring_size = pd->rx_queue_size;
+	mp->rx_desc_sram_addr = pd->rx_sram_addr;
+	mp->rx_desc_sram_size = pd->rx_sram_size;
+
+	mp->rxq_count = pd->rx_queue_count ? : 1;
+
+	tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
+	if (pd->tx_queue_size)
+		tx_ring_size = pd->tx_queue_size;
+
+	mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
+				   MV643XX_MAX_SKB_DESCS * 2, 4096);
+	if (mp->tx_ring_size != tx_ring_size)
+		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+			    mp->tx_ring_size, tx_ring_size);
+
+	mp->tx_desc_sram_addr = pd->tx_sram_addr;
+	mp->tx_desc_sram_size = pd->tx_sram_size;
+
+	mp->txq_count = pd->tx_queue_count ? : 1;
+}
+
+static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
+				   int phy_addr)
+{
+	struct phy_device *phydev;
+	int start;
+	int num;
+	int i;
+	char phy_id[MII_BUS_ID_SIZE + 3];
+
+	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
+		start = phy_addr_get(mp) & 0x1f;
+		num = 32;
+	} else {
+		start = phy_addr & 0x1f;
+		num = 1;
+	}
+
+	/* Attempt to connect to the PHY using orion-mdio */
+	phydev = ERR_PTR(-ENODEV);
+	for (i = 0; i < num; i++) {
+		int addr = (start + i) & 0x1f;
+
+		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+				"orion-mdio-mii", addr);
+
+		phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
+				PHY_INTERFACE_MODE_GMII);
+		if (!IS_ERR(phydev)) {
+			phy_addr_set(mp, addr);
+			break;
+		}
+	}
+
+	return phydev;
+}
+
+static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+	struct phy_device *phy = mp->phy;
+
+	if (speed == 0) {
+		phy->autoneg = AUTONEG_ENABLE;
+		phy->speed = 0;
+		phy->duplex = 0;
+		phy->advertising = phy->supported | ADVERTISED_Autoneg;
+	} else {
+		phy->autoneg = AUTONEG_DISABLE;
+		phy->advertising = 0;
+		phy->speed = speed;
+		phy->duplex = duplex;
+	}
+	phy_start_aneg(phy);
+}
+
+static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+	u32 pscr;
+
+	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+	if (pscr & SERIAL_PORT_ENABLE) {
+		pscr &= ~SERIAL_PORT_ENABLE;
+		wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+	}
+
+	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
+	if (mp->phy == NULL) {
+		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
+		if (speed == SPEED_1000)
+			pscr |= SET_GMII_SPEED_TO_1000;
+		else if (speed == SPEED_100)
+			pscr |= SET_MII_SPEED_TO_100;
+
+		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
+
+		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
+		if (duplex == DUPLEX_FULL)
+			pscr |= SET_FULL_DUPLEX_MODE;
+	}
+
+	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+}
+
+static const struct net_device_ops mv643xx_eth_netdev_ops = {
+	.ndo_open		= mv643xx_eth_open,
+	.ndo_stop		= mv643xx_eth_stop,
+	.ndo_start_xmit		= mv643xx_eth_xmit,
+	.ndo_set_rx_mode	= mv643xx_eth_set_rx_mode,
+	.ndo_set_mac_address	= mv643xx_eth_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= mv643xx_eth_ioctl,
+	.ndo_change_mtu		= mv643xx_eth_change_mtu,
+	.ndo_set_features	= mv643xx_eth_set_features,
+	.ndo_tx_timeout		= mv643xx_eth_tx_timeout,
+	.ndo_get_stats		= mv643xx_eth_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= mv643xx_eth_netpoll,
+#endif
+};
+
+static int mv643xx_eth_probe(struct platform_device *pdev)
+{
+	struct mv643xx_eth_platform_data *pd;
+	struct mv643xx_eth_private *mp;
+	struct net_device *dev;
+	struct resource *res;
+	int err;
+
+	pd = dev_get_platdata(&pdev->dev);
+	if (pd == NULL) {
+		dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
+		return -ENODEV;
+	}
+
+	if (pd->shared == NULL) {
+		dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
+		return -ENODEV;
+	}
+
+	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
+	if (!dev)
+		return -ENOMEM;
+
+	mp = netdev_priv(dev);
+	platform_set_drvdata(pdev, mp);
+
+	mp->shared = platform_get_drvdata(pd->shared);
+	mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
+	mp->port_num = pd->port_number;
+
+	mp->dev = dev;
+
+	/* Kirkwood resets some registers on gated clocks. Especially
+	 * CLK125_BYPASS_EN must be cleared but is not available on
+	 * all other SoCs/System Controllers using this driver.
+	 */
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "marvell,kirkwood-eth-port"))
+		wrlp(mp, PORT_SERIAL_CONTROL1,
+		     rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
+
+	/*
+	 * Start with a default rate, and if there is a clock, allow
+	 * it to override the default.
+	 */
+	mp->t_clk = 133000000;
+	mp->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(mp->clk)) {
+		clk_prepare_enable(mp->clk);
+		mp->t_clk = clk_get_rate(mp->clk);
+	} else if (!IS_ERR(mp->shared->clk)) {
+		mp->t_clk = clk_get_rate(mp->shared->clk);
+	}
+
+	set_params(mp, pd);
+	netif_set_real_num_tx_queues(dev, mp->txq_count);
+	netif_set_real_num_rx_queues(dev, mp->rxq_count);
+
+	err = 0;
+	if (pd->phy_node) {
+		mp->phy = of_phy_connect(mp->dev, pd->phy_node,
+					 mv643xx_eth_adjust_link, 0,
+					 PHY_INTERFACE_MODE_GMII);
+		if (!mp->phy)
+			err = -ENODEV;
+		else
+			phy_addr_set(mp, mp->phy->addr);
+	} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
+		mp->phy = phy_scan(mp, pd->phy_addr);
+
+		if (IS_ERR(mp->phy))
+			err = PTR_ERR(mp->phy);
+		else
+			phy_init(mp, pd->speed, pd->duplex);
+	}
+	if (err == -ENODEV) {
+		err = -EPROBE_DEFER;
+		goto out;
+	}
+	if (err)
+		goto out;
+
+	dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
+
+	init_pscr(mp, pd->speed, pd->duplex);
+
+
+	mib_counters_clear(mp);
+
+	setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+		    (unsigned long)mp);
+	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
+
+	spin_lock_init(&mp->mib_counters_lock);
+
+	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
+
+	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
+
+	setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	BUG_ON(!res);
+	dev->irq = res->start;
+
+	dev->netdev_ops = &mv643xx_eth_netdev_ops;
+
+	dev->watchdog_timeo = 2 * HZ;
+	dev->base_addr = 0;
+
+	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+	dev->vlan_features = dev->features;
+
+	dev->features |= NETIF_F_RXCSUM;
+	dev->hw_features = dev->features;
+
+	dev->priv_flags |= IFF_UNICAST_FLT;
+	dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	if (mp->shared->win_protect)
+		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
+
+	netif_carrier_off(dev);
+
+	wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
+
+	set_rx_coal(mp, 250);
+	set_tx_coal(mp, 0);
+
+	err = register_netdev(dev);
+	if (err)
+		goto out;
+
+	netdev_notice(dev, "port %d with MAC address %pM\n",
+		      mp->port_num, dev->dev_addr);
+
+	if (mp->tx_desc_sram_size > 0)
+		netdev_notice(dev, "configured with sram\n");
+
+	return 0;
+
+out:
+	if (!IS_ERR(mp->clk))
+		clk_disable_unprepare(mp->clk);
+	free_netdev(dev);
+
+	return err;
+}
+
+static int mv643xx_eth_remove(struct platform_device *pdev)
+{
+	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+
+	unregister_netdev(mp->dev);
+	if (mp->phy != NULL)
+		phy_disconnect(mp->phy);
+	cancel_work_sync(&mp->tx_timeout_task);
+
+	if (!IS_ERR(mp->clk))
+		clk_disable_unprepare(mp->clk);
+
+	free_netdev(mp->dev);
+
+	return 0;
+}
+
+static void mv643xx_eth_shutdown(struct platform_device *pdev)
+{
+	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+
+	/* Mask all interrupts on ethernet port */
+	wrlp(mp, INT_MASK, 0);
+	rdlp(mp, INT_MASK);
+
+	if (netif_running(mp->dev))
+		port_reset(mp);
+}
+
+static struct platform_driver mv643xx_eth_driver = {
+	.probe		= mv643xx_eth_probe,
+	.remove		= mv643xx_eth_remove,
+	.shutdown	= mv643xx_eth_shutdown,
+	.driver = {
+		.name	= MV643XX_ETH_NAME,
+	},
+};
+
+static int __init mv643xx_eth_init_module(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&mv643xx_eth_shared_driver);
+	if (!rc) {
+		rc = platform_driver_register(&mv643xx_eth_driver);
+		if (rc)
+			platform_driver_unregister(&mv643xx_eth_shared_driver);
+	}
+
+	return rc;
+}
+module_init(mv643xx_eth_init_module);
+
+static void __exit mv643xx_eth_cleanup_module(void)
+{
+	platform_driver_unregister(&mv643xx_eth_driver);
+	platform_driver_unregister(&mv643xx_eth_shared_driver);
+}
+module_exit(mv643xx_eth_cleanup_module);
+
+MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
+	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
+MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
+MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 0000000..fc2fb25
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,301 @@
+/*
+ * Driver for the MDIO interface of Marvell network interfaces.
+ *
+ * Since the MDIO interface of Marvell network interfaces is shared
+ * between all network interfaces, having a single driver allows to
+ * handle concurrent accesses properly (you may have four Ethernet
+ * ports, but they in fact share the same SMI interface to access
+ * the MDIO bus). This driver is currently used by the mvneta and
+ * mv643xx_eth drivers.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of_mdio.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#define MVMDIO_SMI_DATA_SHIFT              0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT          16
+#define MVMDIO_SMI_PHY_REG_SHIFT           21
+#define MVMDIO_SMI_READ_OPERATION          BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION         0
+#define MVMDIO_SMI_READ_VALID              BIT(27)
+#define MVMDIO_SMI_BUSY                    BIT(28)
+#define MVMDIO_ERR_INT_CAUSE		   0x007C
+#define  MVMDIO_ERR_INT_SMI_DONE	   0x00000010
+#define MVMDIO_ERR_INT_MASK		   0x0080
+
+/*
+ * SMI Timeout measurements:
+ * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
+ * - Armada 370       (Globalscale Mirabox):   41us to 43us (Polled)
+ */
+#define MVMDIO_SMI_TIMEOUT		   1000 /* 1000us = 1ms */
+#define MVMDIO_SMI_POLL_INTERVAL_MIN	   45
+#define MVMDIO_SMI_POLL_INTERVAL_MAX	   55
+
+struct orion_mdio_dev {
+	struct mutex lock;
+	void __iomem *regs;
+	struct clk *clk;
+	/*
+	 * If we have access to the error interrupt pin (which is
+	 * somewhat misnamed as it not only reflects internal errors
+	 * but also reflects SMI completion), use that to wait for
+	 * SMI access completion instead of polling the SMI busy bit.
+	 */
+	int err_interrupt;
+	wait_queue_head_t smi_busy_wait;
+};
+
+static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+{
+	return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
+}
+
+/* Wait for the SMI unit to be ready for another operation
+ */
+static int orion_mdio_wait_ready(struct mii_bus *bus)
+{
+	struct orion_mdio_dev *dev = bus->priv;
+	unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+	unsigned long end = jiffies + timeout;
+	int timedout = 0;
+
+	while (1) {
+	        if (orion_mdio_smi_is_done(dev))
+			return 0;
+	        else if (timedout)
+			break;
+
+	        if (dev->err_interrupt <= 0) {
+			usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN,
+				     MVMDIO_SMI_POLL_INTERVAL_MAX);
+
+			if (time_is_before_jiffies(end))
+				++timedout;
+	        } else {
+			/* wait_event_timeout does not guarantee a delay of at
+			 * least one whole jiffie, so timeout must be no less
+			 * than two.
+			 */
+			if (timeout < 2)
+				timeout = 2;
+			wait_event_timeout(dev->smi_busy_wait,
+				           orion_mdio_smi_is_done(dev),
+				           timeout);
+
+			++timedout;
+	        }
+	}
+
+	dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+	return  -ETIMEDOUT;
+}
+
+static int orion_mdio_read(struct mii_bus *bus, int mii_id,
+			   int regnum)
+{
+	struct orion_mdio_dev *dev = bus->priv;
+	u32 val;
+	int ret;
+
+	mutex_lock(&dev->lock);
+
+	ret = orion_mdio_wait_ready(bus);
+	if (ret < 0)
+		goto out;
+
+	writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+		(regnum << MVMDIO_SMI_PHY_REG_SHIFT)  |
+		MVMDIO_SMI_READ_OPERATION),
+	       dev->regs);
+
+	ret = orion_mdio_wait_ready(bus);
+	if (ret < 0)
+		goto out;
+
+	val = readl(dev->regs);
+	if (!(val & MVMDIO_SMI_READ_VALID)) {
+		dev_err(bus->parent, "SMI bus read not valid\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = val & 0xFFFF;
+out:
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static int orion_mdio_write(struct mii_bus *bus, int mii_id,
+			    int regnum, u16 value)
+{
+	struct orion_mdio_dev *dev = bus->priv;
+	int ret;
+
+	mutex_lock(&dev->lock);
+
+	ret = orion_mdio_wait_ready(bus);
+	if (ret < 0)
+		goto out;
+
+	writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+		(regnum << MVMDIO_SMI_PHY_REG_SHIFT)  |
+		MVMDIO_SMI_WRITE_OPERATION            |
+		(value << MVMDIO_SMI_DATA_SHIFT)),
+	       dev->regs);
+
+out:
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
+{
+	struct orion_mdio_dev *dev = dev_id;
+
+	if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) &
+			MVMDIO_ERR_INT_SMI_DONE) {
+		writel(~MVMDIO_ERR_INT_SMI_DONE,
+				dev->regs + MVMDIO_ERR_INT_CAUSE);
+		wake_up(&dev->smi_busy_wait);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int orion_mdio_probe(struct platform_device *pdev)
+{
+	struct resource *r;
+	struct mii_bus *bus;
+	struct orion_mdio_dev *dev;
+	int i, ret;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(&pdev->dev, "No SMI register address given\n");
+		return -ENODEV;
+	}
+
+	bus = devm_mdiobus_alloc_size(&pdev->dev,
+				      sizeof(struct orion_mdio_dev));
+	if (!bus)
+		return -ENOMEM;
+
+	bus->name = "orion_mdio_bus";
+	bus->read = orion_mdio_read;
+	bus->write = orion_mdio_write;
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
+		 dev_name(&pdev->dev));
+	bus->parent = &pdev->dev;
+
+	bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
+				      GFP_KERNEL);
+	if (!bus->irq)
+		return -ENOMEM;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		bus->irq[i] = PHY_POLL;
+
+	dev = bus->priv;
+	dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!dev->regs) {
+		dev_err(&pdev->dev, "Unable to remap SMI register\n");
+		ret = -ENODEV;
+		goto out_mdio;
+	}
+
+	init_waitqueue_head(&dev->smi_busy_wait);
+
+	dev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(dev->clk))
+		clk_prepare_enable(dev->clk);
+
+	dev->err_interrupt = platform_get_irq(pdev, 0);
+	if (dev->err_interrupt > 0) {
+		ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
+					orion_mdio_err_irq,
+					IRQF_SHARED, pdev->name, dev);
+		if (ret)
+			goto out_mdio;
+
+		writel(MVMDIO_ERR_INT_SMI_DONE,
+			dev->regs + MVMDIO_ERR_INT_MASK);
+
+	} else if (dev->err_interrupt == -EPROBE_DEFER) {
+		return -EPROBE_DEFER;
+	}
+
+	mutex_init(&dev->lock);
+
+	if (pdev->dev.of_node)
+		ret = of_mdiobus_register(bus, pdev->dev.of_node);
+	else
+		ret = mdiobus_register(bus);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+		goto out_mdio;
+	}
+
+	platform_set_drvdata(pdev, bus);
+
+	return 0;
+
+out_mdio:
+	if (!IS_ERR(dev->clk))
+		clk_disable_unprepare(dev->clk);
+	return ret;
+}
+
+static int orion_mdio_remove(struct platform_device *pdev)
+{
+	struct mii_bus *bus = platform_get_drvdata(pdev);
+	struct orion_mdio_dev *dev = bus->priv;
+
+	writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+	mdiobus_unregister(bus);
+	if (!IS_ERR(dev->clk))
+		clk_disable_unprepare(dev->clk);
+
+	return 0;
+}
+
+static const struct of_device_id orion_mdio_match[] = {
+	{ .compatible = "marvell,orion-mdio" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, orion_mdio_match);
+
+static struct platform_driver orion_mdio_driver = {
+	.probe = orion_mdio_probe,
+	.remove = orion_mdio_remove,
+	.driver = {
+		.name = "orion-mdio",
+		.of_match_table = orion_mdio_match,
+	},
+};
+
+module_platform_driver(orion_mdio_driver);
+
+MODULE_DESCRIPTION("Marvell MDIO interface driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:orion-mdio");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 0000000..15056f0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,3487 @@
+/*
+ * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Rami Rosen <rosenr@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/io.h>
+#include <net/tso.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+
+/* Registers */
+#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
+#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
+#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
+#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
+#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
+#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
+#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
+#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
+#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
+#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
+#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
+#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
+#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
+#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
+#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
+#define MVNETA_PORT_RX_RESET                    0x1cc0
+#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
+#define MVNETA_PHY_ADDR                         0x2000
+#define      MVNETA_PHY_ADDR_MASK               0x1f
+#define MVNETA_MBUS_RETRY                       0x2010
+#define MVNETA_UNIT_INTR_CAUSE                  0x2080
+#define MVNETA_UNIT_CONTROL                     0x20B0
+#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
+#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
+#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
+#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
+#define MVNETA_BASE_ADDR_ENABLE                 0x2290
+#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
+#define MVNETA_PORT_CONFIG                      0x2400
+#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
+#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
+#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
+#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
+#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
+#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
+#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
+#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
+#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
+						 MVNETA_DEF_RXQ_ARP(q)	 | \
+						 MVNETA_DEF_RXQ_TCP(q)	 | \
+						 MVNETA_DEF_RXQ_UDP(q)	 | \
+						 MVNETA_DEF_RXQ_BPDU(q)	 | \
+						 MVNETA_TX_UNSET_ERR_SUM | \
+						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
+#define MVNETA_PORT_CONFIG_EXTEND                0x2404
+#define MVNETA_MAC_ADDR_LOW                      0x2414
+#define MVNETA_MAC_ADDR_HIGH                     0x2418
+#define MVNETA_SDMA_CONFIG                       0x241c
+#define      MVNETA_SDMA_BRST_SIZE_16            4
+#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
+#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
+#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
+#define      MVNETA_DESC_SWAP                    BIT(6)
+#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
+#define MVNETA_PORT_STATUS                       0x2444
+#define      MVNETA_TX_IN_PRGRS                  BIT(1)
+#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
+#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
+#define MVNETA_SERDES_CFG			 0x24A0
+#define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
+#define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
+#define MVNETA_TYPE_PRIO                         0x24bc
+#define      MVNETA_FORCE_UNI                    BIT(21)
+#define MVNETA_TXQ_CMD_1                         0x24e4
+#define MVNETA_TXQ_CMD                           0x2448
+#define      MVNETA_TXQ_DISABLE_SHIFT            8
+#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
+#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
+#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
+#define MVNETA_ACC_MODE                          0x2500
+#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
+#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
+#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
+#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+
+#define MVNETA_INTR_NEW_CAUSE                    0x25a0
+#define MVNETA_INTR_NEW_MASK                     0x25a4
+
+/* bits  0..7  = TXQ SENT, one bit per queue.
+ * bits  8..15 = RXQ OCCUP, one bit per queue.
+ * bits 16..23 = RXQ FREE, one bit per queue.
+ * bit  29 = OLD_REG_SUM, see old reg ?
+ * bit  30 = TX_ERR_SUM, one bit for 4 ports
+ * bit  31 = MISC_SUM,   one bit for 4 ports
+ */
+#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
+#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
+#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
+#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
+#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
+
+#define MVNETA_INTR_OLD_CAUSE                    0x25a8
+#define MVNETA_INTR_OLD_MASK                     0x25ac
+
+/* Data Path Port/Queue Cause Register */
+#define MVNETA_INTR_MISC_CAUSE                   0x25b0
+#define MVNETA_INTR_MISC_MASK                    0x25b4
+
+#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
+#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
+#define      MVNETA_CAUSE_PTP                    BIT(4)
+
+#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
+#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
+#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
+#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
+#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
+#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
+#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
+#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
+
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
+#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
+
+#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
+#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
+#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
+
+#define MVNETA_INTR_ENABLE                       0x25b8
+#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
+#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
+
+#define MVNETA_RXQ_CMD                           0x2680
+#define      MVNETA_RXQ_DISABLE_SHIFT            8
+#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
+#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
+#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
+#define MVNETA_GMAC_CTRL_0                       0x2c00
+#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
+#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
+#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
+#define MVNETA_GMAC_CTRL_2                       0x2c08
+#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
+#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
+#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
+#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
+#define MVNETA_GMAC_STATUS                       0x2c10
+#define      MVNETA_GMAC_LINK_UP                 BIT(0)
+#define      MVNETA_GMAC_SPEED_1000              BIT(1)
+#define      MVNETA_GMAC_SPEED_100               BIT(2)
+#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
+#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
+#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
+#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
+#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
+#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
+#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
+#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
+#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
+#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
+#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
+#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
+#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
+#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
+#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
+#define MVNETA_MIB_COUNTERS_BASE                 0x3000
+#define      MVNETA_MIB_LATE_COLLISION           0x7c
+#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
+#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
+#define MVNETA_DA_FILT_UCAST_BASE                0x3600
+#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
+#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
+#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
+#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
+#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
+#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
+#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
+#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
+#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
+#define MVNETA_PORT_TX_RESET                     0x3cf0
+#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
+#define MVNETA_TX_MTU                            0x3e0c
+#define MVNETA_TX_TOKEN_SIZE                     0x3e14
+#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
+#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
+#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
+
+#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
+
+/* Descriptor ring Macros */
+#define MVNETA_QUEUE_NEXT_DESC(q, index)	\
+	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
+#define MVNETA_RX_COAL_PKTS		32
+#define MVNETA_RX_COAL_USEC		100
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVNETA_MH_SIZE			2
+
+#define MVNETA_VLAN_TAG_LEN             4
+
+#define MVNETA_CPU_D_CACHE_LINE_SIZE    32
+#define MVNETA_TX_CSUM_DEF_SIZE		1600
+#define MVNETA_TX_CSUM_MAX_SIZE		9800
+#define MVNETA_ACC_MODE_EXT		1
+
+/* Timeout constants */
+#define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
+#define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
+#define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
+
+#define MVNETA_TX_MTU_MAX		0x3ffff
+
+/* TSO header size */
+#define TSO_HEADER_SIZE 128
+
+/* Max number of Rx descriptors */
+#define MVNETA_MAX_RXD 128
+
+/* Max number of Tx descriptors */
+#define MVNETA_MAX_TXD 532
+
+/* Max number of allowed TCP segments for software TSO */
+#define MVNETA_MAX_TSO_SEGS 100
+
+#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+/* descriptor aligned size */
+#define MVNETA_DESC_ALIGNED_SIZE	32
+
+#define MVNETA_RX_PKT_SIZE(mtu) \
+	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
+	      ETH_HLEN + ETH_FCS_LEN,			     \
+	      MVNETA_CPU_D_CACHE_LINE_SIZE)
+
+#define IS_TSO_HEADER(txq, addr) \
+	((addr >= txq->tso_hdrs_phys) && \
+	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+
+#define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
+
+struct mvneta_statistic {
+	unsigned short offset;
+	unsigned short type;
+	const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32	32
+#define T_REG_64	64
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+	{ 0x3000, T_REG_64, "good_octets_received", },
+	{ 0x3010, T_REG_32, "good_frames_received", },
+	{ 0x3008, T_REG_32, "bad_octets_received", },
+	{ 0x3014, T_REG_32, "bad_frames_received", },
+	{ 0x3018, T_REG_32, "broadcast_frames_received", },
+	{ 0x301c, T_REG_32, "multicast_frames_received", },
+	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
+	{ 0x3058, T_REG_32, "good_fc_received", },
+	{ 0x305c, T_REG_32, "bad_fc_received", },
+	{ 0x3060, T_REG_32, "undersize_received", },
+	{ 0x3064, T_REG_32, "fragments_received", },
+	{ 0x3068, T_REG_32, "oversize_received", },
+	{ 0x306c, T_REG_32, "jabber_received", },
+	{ 0x3070, T_REG_32, "mac_receive_error", },
+	{ 0x3074, T_REG_32, "bad_crc_event", },
+	{ 0x3078, T_REG_32, "collision", },
+	{ 0x307c, T_REG_32, "late_collision", },
+	{ 0x2484, T_REG_32, "rx_discard", },
+	{ 0x2488, T_REG_32, "rx_overrun", },
+	{ 0x3020, T_REG_32, "frames_64_octets", },
+	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
+	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
+	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
+	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+	{ 0x3038, T_REG_64, "good_octets_sent", },
+	{ 0x3040, T_REG_32, "good_frames_sent", },
+	{ 0x3044, T_REG_32, "excessive_collision", },
+	{ 0x3048, T_REG_32, "multicast_frames_sent", },
+	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
+	{ 0x3054, T_REG_32, "fc_sent", },
+	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+};
+
+struct mvneta_pcpu_stats {
+	struct	u64_stats_sync syncp;
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+};
+
+struct mvneta_pcpu_port {
+	/* Pointer to the shared port */
+	struct mvneta_port	*pp;
+
+	/* Pointer to the CPU-local NAPI struct */
+	struct napi_struct	napi;
+
+	/* Cause of the previous interrupt */
+	u32			cause_rx_tx;
+};
+
+struct mvneta_port {
+	struct mvneta_pcpu_port __percpu	*ports;
+	struct mvneta_pcpu_stats __percpu	*stats;
+
+	int pkt_size;
+	unsigned int frag_size;
+	void __iomem *base;
+	struct mvneta_rx_queue *rxqs;
+	struct mvneta_tx_queue *txqs;
+	struct net_device *dev;
+	struct notifier_block cpu_notifier;
+
+	/* Core clock */
+	struct clk *clk;
+	u8 mcast_count[256];
+	u16 tx_ring_size;
+	u16 rx_ring_size;
+
+	struct mii_bus *mii_bus;
+	struct phy_device *phy_dev;
+	phy_interface_t phy_interface;
+	struct device_node *phy_node;
+	unsigned int link;
+	unsigned int duplex;
+	unsigned int speed;
+	unsigned int tx_csum_limit;
+	int use_inband_status:1;
+
+	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
+};
+
+/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVNETA_TX_L3_OFF_SHIFT	0
+#define MVNETA_TX_IP_HLEN_SHIFT	8
+#define MVNETA_TX_L4_UDP	BIT(16)
+#define MVNETA_TX_L3_IP6	BIT(17)
+#define MVNETA_TXD_IP_CSUM	BIT(18)
+#define MVNETA_TXD_Z_PAD	BIT(19)
+#define MVNETA_TXD_L_DESC	BIT(20)
+#define MVNETA_TXD_F_DESC	BIT(21)
+#define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
+				 MVNETA_TXD_L_DESC | \
+				 MVNETA_TXD_F_DESC)
+#define MVNETA_TX_L4_CSUM_FULL	BIT(30)
+#define MVNETA_TX_L4_CSUM_NOT	BIT(31)
+
+#define MVNETA_RXD_ERR_CRC		0x0
+#define MVNETA_RXD_ERR_SUMMARY		BIT(16)
+#define MVNETA_RXD_ERR_OVERRUN		BIT(17)
+#define MVNETA_RXD_ERR_LEN		BIT(18)
+#define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
+#define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
+#define MVNETA_RXD_L3_IP4		BIT(25)
+#define MVNETA_RXD_FIRST_LAST_DESC	(BIT(26) | BIT(27))
+#define MVNETA_RXD_L4_CSUM_OK		BIT(30)
+
+#if defined(__LITTLE_ENDIAN)
+struct mvneta_tx_desc {
+	u32  command;		/* Options used by HW for packet transmitting.*/
+	u16  reserverd1;	/* csum_l4 (for future use)		*/
+	u16  data_size;		/* Data size of transmitted packet in bytes */
+	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
+	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
+	u32  reserved3[4];	/* Reserved - (for future use)		*/
+};
+
+struct mvneta_rx_desc {
+	u32  status;		/* Info about received packet		*/
+	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
+	u16  data_size;		/* Size of received packet in bytes	*/
+
+	u32  buf_phys_addr;	/* Physical address of the buffer	*/
+	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
+
+	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
+	u16  reserved3;		/* prefetch_cmd, for future use		*/
+	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
+
+	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
+	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
+};
+#else
+struct mvneta_tx_desc {
+	u16  data_size;		/* Data size of transmitted packet in bytes */
+	u16  reserverd1;	/* csum_l4 (for future use)		*/
+	u32  command;		/* Options used by HW for packet transmitting.*/
+	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
+	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
+	u32  reserved3[4];	/* Reserved - (for future use)		*/
+};
+
+struct mvneta_rx_desc {
+	u16  data_size;		/* Size of received packet in bytes	*/
+	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
+	u32  status;		/* Info about received packet		*/
+
+	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
+	u32  buf_phys_addr;	/* Physical address of the buffer	*/
+
+	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
+	u16  reserved3;		/* prefetch_cmd, for future use		*/
+	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
+
+	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
+	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
+};
+#endif
+
+struct mvneta_tx_queue {
+	/* Number of this TX queue, in the range 0-7 */
+	u8 id;
+
+	/* Number of TX DMA descriptors in the descriptor ring */
+	int size;
+
+	/* Number of currently used TX DMA descriptor in the
+	 * descriptor ring
+	 */
+	int count;
+	int tx_stop_threshold;
+	int tx_wake_threshold;
+
+	/* Array of transmitted skb */
+	struct sk_buff **tx_skb;
+
+	/* Index of last TX DMA descriptor that was inserted */
+	int txq_put_index;
+
+	/* Index of the TX DMA descriptor to be cleaned up */
+	int txq_get_index;
+
+	u32 done_pkts_coal;
+
+	/* Virtual address of the TX DMA descriptors array */
+	struct mvneta_tx_desc *descs;
+
+	/* DMA address of the TX DMA descriptors array */
+	dma_addr_t descs_phys;
+
+	/* Index of the last TX DMA descriptor */
+	int last_desc;
+
+	/* Index of the next TX DMA descriptor to process */
+	int next_desc_to_proc;
+
+	/* DMA buffers for TSO headers */
+	char *tso_hdrs;
+
+	/* DMA address of TSO headers */
+	dma_addr_t tso_hdrs_phys;
+};
+
+struct mvneta_rx_queue {
+	/* rx queue number, in the range 0-7 */
+	u8 id;
+
+	/* num of rx descriptors in the rx descriptor ring */
+	int size;
+
+	/* counter of times when mvneta_refill() failed */
+	int missed;
+
+	u32 pkts_coal;
+	u32 time_coal;
+
+	/* Virtual address of the RX DMA descriptors array */
+	struct mvneta_rx_desc *descs;
+
+	/* DMA address of the RX DMA descriptors array */
+	dma_addr_t descs_phys;
+
+	/* Index of the last RX DMA descriptor */
+	int last_desc;
+
+	/* Index of the next RX DMA descriptor to process */
+	int next_desc_to_proc;
+};
+
+/* The hardware supports eight (8) rx queues, but we are only allowing
+ * the first one to be used. Therefore, let's just allocate one queue.
+ */
+static int rxq_number = 8;
+static int txq_number = 8;
+
+static int rxq_def;
+
+static int rx_copybreak __read_mostly = 256;
+
+#define MVNETA_DRIVER_NAME "mvneta"
+#define MVNETA_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+/* Write helper method */
+static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
+{
+	writel(data, pp->base + offset);
+}
+
+/* Read helper method */
+static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
+{
+	return readl(pp->base + offset);
+}
+
+/* Increment txq get counter */
+static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
+{
+	txq->txq_get_index++;
+	if (txq->txq_get_index == txq->size)
+		txq->txq_get_index = 0;
+}
+
+/* Increment txq put counter */
+static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
+{
+	txq->txq_put_index++;
+	if (txq->txq_put_index == txq->size)
+		txq->txq_put_index = 0;
+}
+
+
+/* Clear all MIB counters */
+static void mvneta_mib_counters_clear(struct mvneta_port *pp)
+{
+	int i;
+	u32 dummy;
+
+	/* Perform dummy reads from MIB counters */
+	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
+		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+	dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+	dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+}
+
+/* Get System Network Statistics */
+struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
+					     struct rtnl_link_stats64 *stats)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	unsigned int start;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct mvneta_pcpu_stats *cpu_stats;
+		u64 rx_packets;
+		u64 rx_bytes;
+		u64 tx_packets;
+		u64 tx_bytes;
+
+		cpu_stats = per_cpu_ptr(pp->stats, cpu);
+		do {
+			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+			rx_packets = cpu_stats->rx_packets;
+			rx_bytes   = cpu_stats->rx_bytes;
+			tx_packets = cpu_stats->tx_packets;
+			tx_bytes   = cpu_stats->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+		stats->rx_packets += rx_packets;
+		stats->rx_bytes   += rx_bytes;
+		stats->tx_packets += tx_packets;
+		stats->tx_bytes   += tx_bytes;
+	}
+
+	stats->rx_errors	= dev->stats.rx_errors;
+	stats->rx_dropped	= dev->stats.rx_dropped;
+
+	stats->tx_dropped	= dev->stats.tx_dropped;
+
+	return stats;
+}
+
+/* Rx descriptors helper methods */
+
+/* Checks whether the RX descriptor having this status is both the first
+ * and the last descriptor for the RX packet. Each RX packet is currently
+ * received through a single RX descriptor, so not having each RX
+ * descriptor with its first and last bits set is an error
+ */
+static int mvneta_rxq_desc_is_first_last(u32 status)
+{
+	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
+		MVNETA_RXD_FIRST_LAST_DESC;
+}
+
+/* Add number of descriptors ready to receive new packets */
+static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
+					  struct mvneta_rx_queue *rxq,
+					  int ndescs)
+{
+	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
+	 * be added at once
+	 */
+	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
+		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
+			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
+	}
+
+	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+}
+
+/* Get number of RX descriptors occupied by received packets */
+static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
+					struct mvneta_rx_queue *rxq)
+{
+	u32 val;
+
+	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
+	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
+}
+
+/* Update num of rx desc called upon return from rx path or
+ * from mvneta_rxq_drop_pkts().
+ */
+static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
+				       struct mvneta_rx_queue *rxq,
+				       int rx_done, int rx_filled)
+{
+	u32 val;
+
+	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
+		val = rx_done |
+		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
+		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+		return;
+	}
+
+	/* Only 255 descriptors can be added at once */
+	while ((rx_done > 0) || (rx_filled > 0)) {
+		if (rx_done <= 0xff) {
+			val = rx_done;
+			rx_done = 0;
+		} else {
+			val = 0xff;
+			rx_done -= 0xff;
+		}
+		if (rx_filled <= 0xff) {
+			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+			rx_filled = 0;
+		} else {
+			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+			rx_filled -= 0xff;
+		}
+		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+	}
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static struct mvneta_rx_desc *
+mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
+{
+	int rx_desc = rxq->next_desc_to_proc;
+
+	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+	prefetch(rxq->descs + rxq->next_desc_to_proc);
+	return rxq->descs + rx_desc;
+}
+
+/* Change maximum receive size of the port. */
+static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
+{
+	u32 val;
+
+	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
+	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
+		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
+	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+
+/* Set rx queue offset */
+static void mvneta_rxq_offset_set(struct mvneta_port *pp,
+				  struct mvneta_rx_queue *rxq,
+				  int offset)
+{
+	u32 val;
+
+	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
+
+	/* Offset is in */
+	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
+	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+/* Tx descriptors helper methods */
+
+/* Update HW with number of TX descriptors to be sent */
+static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
+				     struct mvneta_tx_queue *txq,
+				     int pend_desc)
+{
+	u32 val;
+
+	/* Only 255 descriptors can be added at once ; Assume caller
+	 * process TX desriptors in quanta less than 256
+	 */
+	val = pend_desc;
+	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get pointer to next TX descriptor to be processed (send) by HW */
+static struct mvneta_tx_desc *
+mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
+{
+	int tx_desc = txq->next_desc_to_proc;
+
+	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
+	return txq->descs + tx_desc;
+}
+
+/* Release the last allocated TX descriptor. Useful to handle DMA
+ * mapping failures in the TX path.
+ */
+static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
+{
+	if (txq->next_desc_to_proc == 0)
+		txq->next_desc_to_proc = txq->last_desc - 1;
+	else
+		txq->next_desc_to_proc--;
+}
+
+/* Set rxq buf size */
+static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
+				    struct mvneta_rx_queue *rxq,
+				    int buf_size)
+{
+	u32 val;
+
+	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
+
+	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
+	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
+
+	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
+}
+
+/* Disable buffer management (BM) */
+static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+				  struct mvneta_rx_queue *rxq)
+{
+	u32 val;
+
+	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
+	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Start the Ethernet port RX and TX activity */
+static void mvneta_port_up(struct mvneta_port *pp)
+{
+	int queue;
+	u32 q_map;
+
+	/* Enable all initialized TXs. */
+	q_map = 0;
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+		if (txq->descs != NULL)
+			q_map |= (1 << queue);
+	}
+	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+
+	/* Enable all initialized RXQs. */
+	mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
+}
+
+/* Stop the Ethernet port activity */
+static void mvneta_port_down(struct mvneta_port *pp)
+{
+	u32 val;
+	int count;
+
+	/* Stop Rx port activity. Check port Rx activity. */
+	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
+
+	/* Issue stop command for active channels only */
+	if (val != 0)
+		mvreg_write(pp, MVNETA_RXQ_CMD,
+			    val << MVNETA_RXQ_DISABLE_SHIFT);
+
+	/* Wait for all Rx activity to terminate. */
+	count = 0;
+	do {
+		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
+			netdev_warn(pp->dev,
+				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
+				    val);
+			break;
+		}
+		mdelay(1);
+
+		val = mvreg_read(pp, MVNETA_RXQ_CMD);
+	} while (val & 0xff);
+
+	/* Stop Tx port activity. Check port Tx activity. Issue stop
+	 * command for active channels only
+	 */
+	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
+
+	if (val != 0)
+		mvreg_write(pp, MVNETA_TXQ_CMD,
+			    (val << MVNETA_TXQ_DISABLE_SHIFT));
+
+	/* Wait for all Tx activity to terminate. */
+	count = 0;
+	do {
+		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
+			netdev_warn(pp->dev,
+				    "TIMEOUT for TX stopped status=0x%08x\n",
+				    val);
+			break;
+		}
+		mdelay(1);
+
+		/* Check TX Command reg that all Txqs are stopped */
+		val = mvreg_read(pp, MVNETA_TXQ_CMD);
+
+	} while (val & 0xff);
+
+	/* Double check to verify that TX FIFO is empty */
+	count = 0;
+	do {
+		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
+			netdev_warn(pp->dev,
+				    "TX FIFO empty timeout status=0x08%x\n",
+				    val);
+			break;
+		}
+		mdelay(1);
+
+		val = mvreg_read(pp, MVNETA_PORT_STATUS);
+	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
+		 (val & MVNETA_TX_IN_PRGRS));
+
+	udelay(200);
+}
+
+/* Enable the port by setting the port enable bit of the MAC control register */
+static void mvneta_port_enable(struct mvneta_port *pp)
+{
+	u32 val;
+
+	/* Enable port */
+	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+	val |= MVNETA_GMAC0_PORT_ENABLE;
+	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+/* Disable the port and wait for about 200 usec before retuning */
+static void mvneta_port_disable(struct mvneta_port *pp)
+{
+	u32 val;
+
+	/* Reset the Enable bit in the Serial Control Register */
+	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+	val &= ~MVNETA_GMAC0_PORT_ENABLE;
+	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+
+	udelay(200);
+}
+
+/* Multicast tables methods */
+
+/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
+{
+	int offset;
+	u32 val;
+
+	if (queue == -1) {
+		val = 0;
+	} else {
+		val = 0x1 | (queue << 1);
+		val |= (val << 24) | (val << 16) | (val << 8);
+	}
+
+	for (offset = 0; offset <= 0xc; offset += 4)
+		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
+}
+
+/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
+{
+	int offset;
+	u32 val;
+
+	if (queue == -1) {
+		val = 0;
+	} else {
+		val = 0x1 | (queue << 1);
+		val |= (val << 24) | (val << 16) | (val << 8);
+	}
+
+	for (offset = 0; offset <= 0xfc; offset += 4)
+		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
+
+}
+
+/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
+static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
+{
+	int offset;
+	u32 val;
+
+	if (queue == -1) {
+		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
+		val = 0;
+	} else {
+		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
+		val = 0x1 | (queue << 1);
+		val |= (val << 24) | (val << 16) | (val << 8);
+	}
+
+	for (offset = 0; offset <= 0xfc; offset += 4)
+		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
+}
+
+/* This method sets defaults to the NETA port:
+ *	Clears interrupt Cause and Mask registers.
+ *	Clears all MAC tables.
+ *	Sets defaults to all registers.
+ *	Resets RX and TX descriptor rings.
+ *	Resets PHY.
+ * This method can be called after mvneta_port_down() to return the port
+ *	settings to defaults.
+ */
+static void mvneta_defaults_set(struct mvneta_port *pp)
+{
+	int cpu;
+	int queue;
+	u32 val;
+
+	/* Clear all Cause registers */
+	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
+	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+
+	/* Mask all interrupts */
+	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
+
+	/* Enable MBUS Retry bit16 */
+	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
+
+	/* Set CPU queue access map - all CPUs have access to all RX
+	 * queues and to all TX queues
+	 */
+	for_each_present_cpu(cpu)
+		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
+			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
+			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+
+	/* Reset RX and TX DMAs */
+	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+
+	/* Disable Legacy WRR, Disable EJP, Release from reset */
+	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
+	for (queue = 0; queue < txq_number; queue++) {
+		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
+		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
+	}
+
+	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+
+	/* Set Port Acceleration Mode */
+	val = MVNETA_ACC_MODE_EXT;
+	mvreg_write(pp, MVNETA_ACC_MODE, val);
+
+	/* Update val of portCfg register accordingly with all RxQueue types */
+	val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
+	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+	val = 0;
+	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
+	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
+
+	/* Build PORT_SDMA_CONFIG_REG */
+	val = 0;
+
+	/* Default burst size */
+	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
+
+#if defined(__BIG_ENDIAN)
+	val |= MVNETA_DESC_SWAP;
+#endif
+
+	/* Assign port SDMA configuration */
+	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+
+	/* Disable PHY polling in hardware, since we're using the
+	 * kernel phylib to do this.
+	 */
+	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
+	val &= ~MVNETA_PHY_POLLING_ENABLE;
+	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+
+	if (pp->use_inband_status) {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
+			 MVNETA_GMAC_FORCE_LINK_DOWN |
+			 MVNETA_GMAC_AN_FLOW_CTRL_EN);
+		val |= MVNETA_GMAC_INBAND_AN_ENABLE |
+		       MVNETA_GMAC_AN_SPEED_EN |
+		       MVNETA_GMAC_AN_DUPLEX_EN;
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+	} else {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+		       MVNETA_GMAC_AN_SPEED_EN |
+		       MVNETA_GMAC_AN_DUPLEX_EN);
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+	}
+
+	mvneta_set_ucast_table(pp, -1);
+	mvneta_set_special_mcast_table(pp, -1);
+	mvneta_set_other_mcast_table(pp, -1);
+
+	/* Set port interrupt enable register - default enable all */
+	mvreg_write(pp, MVNETA_INTR_ENABLE,
+		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
+		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+	mvneta_mib_counters_clear(pp);
+}
+
+/* Set max sizes for tx queues */
+static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
+
+{
+	u32 val, size, mtu;
+	int queue;
+
+	mtu = max_tx_size * 8;
+	if (mtu > MVNETA_TX_MTU_MAX)
+		mtu = MVNETA_TX_MTU_MAX;
+
+	/* Set MTU */
+	val = mvreg_read(pp, MVNETA_TX_MTU);
+	val &= ~MVNETA_TX_MTU_MAX;
+	val |= mtu;
+	mvreg_write(pp, MVNETA_TX_MTU, val);
+
+	/* TX token size and all TXQs token size must be larger that MTU */
+	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
+
+	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
+	if (size < mtu) {
+		size = mtu;
+		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
+		val |= size;
+		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
+	}
+	for (queue = 0; queue < txq_number; queue++) {
+		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
+
+		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
+		if (size < mtu) {
+			size = mtu;
+			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
+			val |= size;
+			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
+		}
+	}
+}
+
+/* Set unicast address */
+static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
+				  int queue)
+{
+	unsigned int unicast_reg;
+	unsigned int tbl_offset;
+	unsigned int reg_offset;
+
+	/* Locate the Unicast table entry */
+	last_nibble = (0xf & last_nibble);
+
+	/* offset from unicast tbl base */
+	tbl_offset = (last_nibble / 4) * 4;
+
+	/* offset within the above reg  */
+	reg_offset = last_nibble % 4;
+
+	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
+
+	if (queue == -1) {
+		/* Clear accepts frame bit at specified unicast DA tbl entry */
+		unicast_reg &= ~(0xff << (8 * reg_offset));
+	} else {
+		unicast_reg &= ~(0xff << (8 * reg_offset));
+		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+	}
+
+	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
+}
+
+/* Set mac address */
+static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
+				int queue)
+{
+	unsigned int mac_h;
+	unsigned int mac_l;
+
+	if (queue != -1) {
+		mac_l = (addr[4] << 8) | (addr[5]);
+		mac_h = (addr[0] << 24) | (addr[1] << 16) |
+			(addr[2] << 8) | (addr[3] << 0);
+
+		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
+		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
+	}
+
+	/* Accept frames of this address */
+	mvneta_set_ucast_addr(pp, addr[5], queue);
+}
+
+/* Set the number of packets that will be received before RX interrupt
+ * will be generated by HW.
+ */
+static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
+				    struct mvneta_rx_queue *rxq, u32 value)
+{
+	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
+		    value | MVNETA_RXQ_NON_OCCUPIED(0));
+	rxq->pkts_coal = value;
+}
+
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
+ */
+static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
+				    struct mvneta_rx_queue *rxq, u32 value)
+{
+	u32 val;
+	unsigned long clk_rate;
+
+	clk_rate = clk_get_rate(pp->clk);
+	val = (clk_rate / 1000000) * value;
+
+	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
+	rxq->time_coal = value;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
+					 struct mvneta_tx_queue *txq, u32 value)
+{
+	u32 val;
+
+	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
+
+	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
+	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
+
+	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
+
+	txq->done_pkts_coal = value;
+}
+
+/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
+static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
+				u32 phys_addr, u32 cookie)
+{
+	rx_desc->buf_cookie = cookie;
+	rx_desc->buf_phys_addr = phys_addr;
+}
+
+/* Decrement sent descriptors counter */
+static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
+				     struct mvneta_tx_queue *txq,
+				     int sent_desc)
+{
+	u32 val;
+
+	/* Only 255 TX descriptors can be updated at once */
+	while (sent_desc > 0xff) {
+		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
+		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+		sent_desc = sent_desc - 0xff;
+	}
+
+	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
+	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get number of TX descriptors already sent by HW */
+static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
+					struct mvneta_tx_queue *txq)
+{
+	u32 val;
+	int sent_desc;
+
+	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
+	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
+		MVNETA_TXQ_SENT_DESC_SHIFT;
+
+	return sent_desc;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ *  The number of sent descriptors is returned.
+ */
+static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
+				     struct mvneta_tx_queue *txq)
+{
+	int sent_desc;
+
+	/* Get number of sent descriptors */
+	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
+
+	/* Decrement sent descriptors counter */
+	if (sent_desc)
+		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
+
+	return sent_desc;
+}
+
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+				int ip_hdr_len, int l4_proto)
+{
+	u32 command;
+
+	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+	 * G_L4_chk, L4_type; required only for checksum
+	 * calculation
+	 */
+	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
+	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+
+	if (l3_proto == htons(ETH_P_IP))
+		command |= MVNETA_TXD_IP_CSUM;
+	else
+		command |= MVNETA_TX_L3_IP6;
+
+	if (l4_proto == IPPROTO_TCP)
+		command |=  MVNETA_TX_L4_CSUM_FULL;
+	else if (l4_proto == IPPROTO_UDP)
+		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
+	else
+		command |= MVNETA_TX_L4_CSUM_NOT;
+
+	return command;
+}
+
+
+/* Display more error info */
+static void mvneta_rx_error(struct mvneta_port *pp,
+			    struct mvneta_rx_desc *rx_desc)
+{
+	u32 status = rx_desc->status;
+
+	if (!mvneta_rxq_desc_is_first_last(status)) {
+		netdev_err(pp->dev,
+			   "bad rx status %08x (buffer oversize), size=%d\n",
+			   status, rx_desc->data_size);
+		return;
+	}
+
+	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
+	case MVNETA_RXD_ERR_CRC:
+		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVNETA_RXD_ERR_OVERRUN:
+		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVNETA_RXD_ERR_LEN:
+		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVNETA_RXD_ERR_RESOURCE:
+		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	}
+}
+
+/* Handle RX checksum offload based on the descriptor's status */
+static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
+			   struct sk_buff *skb)
+{
+	if ((status & MVNETA_RXD_L3_IP4) &&
+	    (status & MVNETA_RXD_L4_CSUM_OK)) {
+		skb->csum = 0;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return;
+	}
+
+	skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
+static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
+						     u32 cause)
+{
+	int queue = fls(cause) - 1;
+
+	return &pp->txqs[queue];
+}
+
+/* Free tx queue skbuffs */
+static void mvneta_txq_bufs_free(struct mvneta_port *pp,
+				 struct mvneta_tx_queue *txq, int num)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		struct mvneta_tx_desc *tx_desc = txq->descs +
+			txq->txq_get_index;
+		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+
+		mvneta_txq_inc_get(txq);
+
+		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+			dma_unmap_single(pp->dev->dev.parent,
+					 tx_desc->buf_phys_addr,
+					 tx_desc->data_size, DMA_TO_DEVICE);
+		if (!skb)
+			continue;
+		dev_kfree_skb_any(skb);
+	}
+}
+
+/* Handle end of transmission */
+static void mvneta_txq_done(struct mvneta_port *pp,
+			   struct mvneta_tx_queue *txq)
+{
+	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+	int tx_done;
+
+	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
+	if (!tx_done)
+		return;
+
+	mvneta_txq_bufs_free(pp, txq, tx_done);
+
+	txq->count -= tx_done;
+
+	if (netif_tx_queue_stopped(nq)) {
+		if (txq->count <= txq->tx_wake_threshold)
+			netif_tx_wake_queue(nq);
+	}
+}
+
+static void *mvneta_frag_alloc(const struct mvneta_port *pp)
+{
+	if (likely(pp->frag_size <= PAGE_SIZE))
+		return netdev_alloc_frag(pp->frag_size);
+	else
+		return kmalloc(pp->frag_size, GFP_ATOMIC);
+}
+
+static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
+{
+	if (likely(pp->frag_size <= PAGE_SIZE))
+		skb_free_frag(data);
+	else
+		kfree(data);
+}
+
+/* Refill processing */
+static int mvneta_rx_refill(struct mvneta_port *pp,
+			    struct mvneta_rx_desc *rx_desc)
+
+{
+	dma_addr_t phys_addr;
+	void *data;
+
+	data = mvneta_frag_alloc(pp);
+	if (!data)
+		return -ENOMEM;
+
+	phys_addr = dma_map_single(pp->dev->dev.parent, data,
+				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
+				   DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
+		mvneta_frag_free(pp, data);
+		return -ENOMEM;
+	}
+
+	mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
+	return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int ip_hdr_len = 0;
+		__be16 l3_proto = vlan_get_protocol(skb);
+		u8 l4_proto;
+
+		if (l3_proto == htons(ETH_P_IP)) {
+			struct iphdr *ip4h = ip_hdr(skb);
+
+			/* Calculate IPv4 checksum and L4 checksum */
+			ip_hdr_len = ip4h->ihl;
+			l4_proto = ip4h->protocol;
+		} else if (l3_proto == htons(ETH_P_IPV6)) {
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			/* Read l4_protocol from one of IPv6 extra headers */
+			if (skb_network_header_len(skb) > 0)
+				ip_hdr_len = (skb_network_header_len(skb) >> 2);
+			l4_proto = ip6h->nexthdr;
+		} else
+			return MVNETA_TX_L4_CSUM_NOT;
+
+		return mvneta_txq_desc_csum(skb_network_offset(skb),
+					    l3_proto, ip_hdr_len, l4_proto);
+	}
+
+	return MVNETA_TX_L4_CSUM_NOT;
+}
+
+/* Drop packets received by the RXQ and free buffers */
+static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
+				 struct mvneta_rx_queue *rxq)
+{
+	int rx_done, i;
+
+	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+	for (i = 0; i < rxq->size; i++) {
+		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
+		void *data = (void *)rx_desc->buf_cookie;
+
+		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+		mvneta_frag_free(pp, data);
+	}
+
+	if (rx_done)
+		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+}
+
+/* Main rx processing */
+static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+		     struct mvneta_rx_queue *rxq)
+{
+	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
+	struct net_device *dev = pp->dev;
+	int rx_done;
+	u32 rcvd_pkts = 0;
+	u32 rcvd_bytes = 0;
+
+	/* Get number of received packets */
+	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+	if (rx_todo > rx_done)
+		rx_todo = rx_done;
+
+	rx_done = 0;
+
+	/* Fairness NAPI loop */
+	while (rx_done < rx_todo) {
+		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+		struct sk_buff *skb;
+		unsigned char *data;
+		dma_addr_t phys_addr;
+		u32 rx_status;
+		int rx_bytes, err;
+
+		rx_done++;
+		rx_status = rx_desc->status;
+		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+		data = (unsigned char *)rx_desc->buf_cookie;
+		phys_addr = rx_desc->buf_phys_addr;
+
+		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
+		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+		err_drop_frame:
+			dev->stats.rx_errors++;
+			mvneta_rx_error(pp, rx_desc);
+			/* leave the descriptor untouched */
+			continue;
+		}
+
+		if (rx_bytes <= rx_copybreak) {
+			/* better copy a small frame and not unmap the DMA region */
+			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+			if (unlikely(!skb))
+				goto err_drop_frame;
+
+			dma_sync_single_range_for_cpu(dev->dev.parent,
+			                              rx_desc->buf_phys_addr,
+			                              MVNETA_MH_SIZE + NET_SKB_PAD,
+			                              rx_bytes,
+			                              DMA_FROM_DEVICE);
+			memcpy(skb_put(skb, rx_bytes),
+			       data + MVNETA_MH_SIZE + NET_SKB_PAD,
+			       rx_bytes);
+
+			skb->protocol = eth_type_trans(skb, dev);
+			mvneta_rx_csum(pp, rx_status, skb);
+			napi_gro_receive(&port->napi, skb);
+
+			rcvd_pkts++;
+			rcvd_bytes += rx_bytes;
+
+			/* leave the descriptor and buffer untouched */
+			continue;
+		}
+
+		/* Refill processing */
+		err = mvneta_rx_refill(pp, rx_desc);
+		if (err) {
+			netdev_err(dev, "Linux processing - Can't refill\n");
+			rxq->missed++;
+			goto err_drop_frame;
+		}
+
+		skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+
+		/* After refill old buffer has to be unmapped regardless
+		 * the skb is successfully built or not.
+		 */
+		dma_unmap_single(dev->dev.parent, phys_addr,
+				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+
+		if (!skb)
+			goto err_drop_frame;
+
+		rcvd_pkts++;
+		rcvd_bytes += rx_bytes;
+
+		/* Linux processing */
+		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
+		skb_put(skb, rx_bytes);
+
+		skb->protocol = eth_type_trans(skb, dev);
+
+		mvneta_rx_csum(pp, rx_status, skb);
+
+		napi_gro_receive(&port->napi, skb);
+	}
+
+	if (rcvd_pkts) {
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes   += rcvd_bytes;
+		u64_stats_update_end(&stats->syncp);
+	}
+
+	/* Update rxq management counters */
+	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+
+	return rx_done;
+}
+
+static inline void
+mvneta_tso_put_hdr(struct sk_buff *skb,
+		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+{
+	struct mvneta_tx_desc *tx_desc;
+	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+	txq->tx_skb[txq->txq_put_index] = NULL;
+	tx_desc = mvneta_txq_next_desc_get(txq);
+	tx_desc->data_size = hdr_len;
+	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+	tx_desc->command |= MVNETA_TXD_F_DESC;
+	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+				 txq->txq_put_index * TSO_HEADER_SIZE;
+	mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+		    struct sk_buff *skb, char *data, int size,
+		    bool last_tcp, bool is_last)
+{
+	struct mvneta_tx_desc *tx_desc;
+
+	tx_desc = mvneta_txq_next_desc_get(txq);
+	tx_desc->data_size = size;
+	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+						size, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev.parent,
+		     tx_desc->buf_phys_addr))) {
+		mvneta_txq_desc_put(txq);
+		return -ENOMEM;
+	}
+
+	tx_desc->command = 0;
+	txq->tx_skb[txq->txq_put_index] = NULL;
+
+	if (last_tcp) {
+		/* last descriptor in the TCP packet */
+		tx_desc->command = MVNETA_TXD_L_DESC;
+
+		/* last descriptor in SKB */
+		if (is_last)
+			txq->tx_skb[txq->txq_put_index] = skb;
+	}
+	mvneta_txq_inc_put(txq);
+	return 0;
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+			 struct mvneta_tx_queue *txq)
+{
+	int total_len, data_left;
+	int desc_count = 0;
+	struct mvneta_port *pp = netdev_priv(dev);
+	struct tso_t tso;
+	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	int i;
+
+	/* Count needed descriptors */
+	if ((txq->count + tso_count_descs(skb)) >= txq->size)
+		return 0;
+
+	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+		pr_info("*** Is this even  possible???!?!?\n");
+		return 0;
+	}
+
+	/* Initialize the TSO handler, and prepare the first payload */
+	tso_start(skb, &tso);
+
+	total_len = skb->len - hdr_len;
+	while (total_len > 0) {
+		char *hdr;
+
+		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+		total_len -= data_left;
+		desc_count++;
+
+		/* prepare packet headers: MAC + IP + TCP */
+		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+
+		mvneta_tso_put_hdr(skb, pp, txq);
+
+		while (data_left > 0) {
+			int size;
+			desc_count++;
+
+			size = min_t(int, tso.size, data_left);
+
+			if (mvneta_tso_put_data(dev, txq, skb,
+						 tso.data, size,
+						 size == data_left,
+						 total_len == 0))
+				goto err_release;
+			data_left -= size;
+
+			tso_build_data(skb, &tso, size);
+		}
+	}
+
+	return desc_count;
+
+err_release:
+	/* Release all used data descriptors; header descriptors must not
+	 * be DMA-unmapped.
+	 */
+	for (i = desc_count - 1; i >= 0; i--) {
+		struct mvneta_tx_desc *tx_desc = txq->descs + i;
+		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+			dma_unmap_single(pp->dev->dev.parent,
+					 tx_desc->buf_phys_addr,
+					 tx_desc->data_size,
+					 DMA_TO_DEVICE);
+		mvneta_txq_desc_put(txq);
+	}
+	return 0;
+}
+
+/* Handle tx fragmentation processing */
+static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
+				  struct mvneta_tx_queue *txq)
+{
+	struct mvneta_tx_desc *tx_desc;
+	int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+	for (i = 0; i < nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		void *addr = page_address(frag->page.p) + frag->page_offset;
+
+		tx_desc = mvneta_txq_next_desc_get(txq);
+		tx_desc->data_size = frag->size;
+
+		tx_desc->buf_phys_addr =
+			dma_map_single(pp->dev->dev.parent, addr,
+				       tx_desc->data_size, DMA_TO_DEVICE);
+
+		if (dma_mapping_error(pp->dev->dev.parent,
+				      tx_desc->buf_phys_addr)) {
+			mvneta_txq_desc_put(txq);
+			goto error;
+		}
+
+		if (i == nr_frags - 1) {
+			/* Last descriptor */
+			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+			txq->tx_skb[txq->txq_put_index] = skb;
+		} else {
+			/* Descriptor in the middle: Not First, Not Last */
+			tx_desc->command = 0;
+			txq->tx_skb[txq->txq_put_index] = NULL;
+		}
+		mvneta_txq_inc_put(txq);
+	}
+
+	return 0;
+
+error:
+	/* Release all descriptors that were used to map fragments of
+	 * this packet, as well as the corresponding DMA mappings
+	 */
+	for (i = i - 1; i >= 0; i--) {
+		tx_desc = txq->descs + i;
+		dma_unmap_single(pp->dev->dev.parent,
+				 tx_desc->buf_phys_addr,
+				 tx_desc->data_size,
+				 DMA_TO_DEVICE);
+		mvneta_txq_desc_put(txq);
+	}
+
+	return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	u16 txq_id = skb_get_queue_mapping(skb);
+	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+	struct mvneta_tx_desc *tx_desc;
+	int len = skb->len;
+	int frags = 0;
+	u32 tx_cmd;
+
+	if (!netif_running(dev))
+		goto out;
+
+	if (skb_is_gso(skb)) {
+		frags = mvneta_tx_tso(skb, dev, txq);
+		goto out;
+	}
+
+	frags = skb_shinfo(skb)->nr_frags + 1;
+
+	/* Get a descriptor for the first part of the packet */
+	tx_desc = mvneta_txq_next_desc_get(txq);
+
+	tx_cmd = mvneta_skb_tx_csum(pp, skb);
+
+	tx_desc->data_size = skb_headlen(skb);
+
+	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+						tx_desc->data_size,
+						DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev.parent,
+				       tx_desc->buf_phys_addr))) {
+		mvneta_txq_desc_put(txq);
+		frags = 0;
+		goto out;
+	}
+
+	if (frags == 1) {
+		/* First and Last descriptor */
+		tx_cmd |= MVNETA_TXD_FLZ_DESC;
+		tx_desc->command = tx_cmd;
+		txq->tx_skb[txq->txq_put_index] = skb;
+		mvneta_txq_inc_put(txq);
+	} else {
+		/* First but not Last */
+		tx_cmd |= MVNETA_TXD_F_DESC;
+		txq->tx_skb[txq->txq_put_index] = NULL;
+		mvneta_txq_inc_put(txq);
+		tx_desc->command = tx_cmd;
+		/* Continue with other skb fragments */
+		if (mvneta_tx_frag_process(pp, skb, txq)) {
+			dma_unmap_single(dev->dev.parent,
+					 tx_desc->buf_phys_addr,
+					 tx_desc->data_size,
+					 DMA_TO_DEVICE);
+			mvneta_txq_desc_put(txq);
+			frags = 0;
+			goto out;
+		}
+	}
+
+out:
+	if (frags > 0) {
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+		txq->count += frags;
+		mvneta_txq_pend_desc_add(pp, txq, frags);
+
+		if (txq->count >= txq->tx_stop_threshold)
+			netif_tx_stop_queue(nq);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes  += len;
+		u64_stats_update_end(&stats->syncp);
+	} else {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+
+/* Free tx resources, when resetting a port */
+static void mvneta_txq_done_force(struct mvneta_port *pp,
+				  struct mvneta_tx_queue *txq)
+
+{
+	int tx_done = txq->count;
+
+	mvneta_txq_bufs_free(pp, txq, tx_done);
+
+	/* reset txq */
+	txq->count = 0;
+	txq->txq_put_index = 0;
+	txq->txq_get_index = 0;
+}
+
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
+{
+	struct mvneta_tx_queue *txq;
+	struct netdev_queue *nq;
+
+	while (cause_tx_done) {
+		txq = mvneta_tx_done_policy(pp, cause_tx_done);
+
+		nq = netdev_get_tx_queue(pp->dev, txq->id);
+		__netif_tx_lock(nq, smp_processor_id());
+
+		if (txq->count)
+			mvneta_txq_done(pp, txq);
+
+		__netif_tx_unlock(nq);
+		cause_tx_done &= ~((1 << txq->id));
+	}
+}
+
+/* Compute crc8 of the specified address, using a unique algorithm ,
+ * according to hw spec, different than generic crc8 algorithm
+ */
+static int mvneta_addr_crc(unsigned char *addr)
+{
+	int crc = 0;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++) {
+		int j;
+
+		crc = (crc ^ addr[i]) << 8;
+		for (j = 7; j >= 0; j--) {
+			if (crc & (0x100 << j))
+				crc ^= 0x107 << j;
+		}
+	}
+
+	return crc;
+}
+
+/* This method controls the net device special MAC multicast support.
+ * The Special Multicast Table for MAC addresses supports MAC of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table. This method set the Special
+ * Multicast Table appropriate entry.
+ */
+static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
+					  unsigned char last_byte,
+					  int queue)
+{
+	unsigned int smc_table_reg;
+	unsigned int tbl_offset;
+	unsigned int reg_offset;
+
+	/* Register offset from SMC table base    */
+	tbl_offset = (last_byte / 4);
+	/* Entry offset within the above reg */
+	reg_offset = last_byte % 4;
+
+	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
+					+ tbl_offset * 4));
+
+	if (queue == -1)
+		smc_table_reg &= ~(0xff << (8 * reg_offset));
+	else {
+		smc_table_reg &= ~(0xff << (8 * reg_offset));
+		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+	}
+
+	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
+		    smc_table_reg);
+}
+
+/* This method controls the network device Other MAC multicast support.
+ * The Other Multicast Table is used for multicast of another type.
+ * A CRC-8 is used as an index to the Other Multicast Table entries
+ * in the DA-Filter table.
+ * The method gets the CRC-8 value from the calling routine and
+ * sets the Other Multicast Table appropriate entry according to the
+ * specified CRC-8 .
+ */
+static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
+					unsigned char crc8,
+					int queue)
+{
+	unsigned int omc_table_reg;
+	unsigned int tbl_offset;
+	unsigned int reg_offset;
+
+	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
+	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
+
+	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
+
+	if (queue == -1) {
+		/* Clear accepts frame bit at specified Other DA table entry */
+		omc_table_reg &= ~(0xff << (8 * reg_offset));
+	} else {
+		omc_table_reg &= ~(0xff << (8 * reg_offset));
+		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+	}
+
+	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
+}
+
+/* The network device supports multicast using two tables:
+ *    1) Special Multicast Table for MAC addresses of the form
+ *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ *       Table entries in the DA-Filter table.
+ *    2) Other Multicast Table for multicast of another type. A CRC-8 value
+ *       is used as an index to the Other Multicast Table entries in the
+ *       DA-Filter table.
+ */
+static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
+				 int queue)
+{
+	unsigned char crc_result = 0;
+
+	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
+		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
+		return 0;
+	}
+
+	crc_result = mvneta_addr_crc(p_addr);
+	if (queue == -1) {
+		if (pp->mcast_count[crc_result] == 0) {
+			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
+				    crc_result);
+			return -EINVAL;
+		}
+
+		pp->mcast_count[crc_result]--;
+		if (pp->mcast_count[crc_result] != 0) {
+			netdev_info(pp->dev,
+				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
+				    pp->mcast_count[crc_result], crc_result);
+			return -EINVAL;
+		}
+	} else
+		pp->mcast_count[crc_result]++;
+
+	mvneta_set_other_mcast_addr(pp, crc_result, queue);
+
+	return 0;
+}
+
+/* Configure Fitering mode of Ethernet port */
+static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
+					  int is_promisc)
+{
+	u32 port_cfg_reg, val;
+
+	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
+
+	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
+
+	/* Set / Clear UPM bit in port configuration register */
+	if (is_promisc) {
+		/* Accept all Unicast addresses */
+		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
+		val |= MVNETA_FORCE_UNI;
+		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
+		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
+	} else {
+		/* Reject all Unicast addresses */
+		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
+		val &= ~MVNETA_FORCE_UNI;
+	}
+
+	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
+	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
+}
+
+/* register unicast and multicast addresses */
+static void mvneta_set_rx_mode(struct net_device *dev)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	struct netdev_hw_addr *ha;
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Accept all: Multicast + Unicast */
+		mvneta_rx_unicast_promisc_set(pp, 1);
+		mvneta_set_ucast_table(pp, rxq_def);
+		mvneta_set_special_mcast_table(pp, rxq_def);
+		mvneta_set_other_mcast_table(pp, rxq_def);
+	} else {
+		/* Accept single Unicast */
+		mvneta_rx_unicast_promisc_set(pp, 0);
+		mvneta_set_ucast_table(pp, -1);
+		mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Accept all multicast */
+			mvneta_set_special_mcast_table(pp, rxq_def);
+			mvneta_set_other_mcast_table(pp, rxq_def);
+		} else {
+			/* Accept only initialized multicast */
+			mvneta_set_special_mcast_table(pp, -1);
+			mvneta_set_other_mcast_table(pp, -1);
+
+			if (!netdev_mc_empty(dev)) {
+				netdev_for_each_mc_addr(ha, dev) {
+					mvneta_mcast_addr_set(pp, ha->addr,
+							      rxq_def);
+				}
+			}
+		}
+	}
+}
+
+/* Interrupt handling - the callback for request_irq() */
+static irqreturn_t mvneta_isr(int irq, void *dev_id)
+{
+	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
+
+	disable_percpu_irq(port->pp->dev->irq);
+	napi_schedule(&port->napi);
+
+	return IRQ_HANDLED;
+}
+
+static int mvneta_fixed_link_update(struct mvneta_port *pp,
+				    struct phy_device *phy)
+{
+	struct fixed_phy_status status;
+	struct fixed_phy_status changed = {};
+	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
+
+	status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+		status.speed = SPEED_1000;
+	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+		status.speed = SPEED_100;
+	else
+		status.speed = SPEED_10;
+	status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+	changed.link = 1;
+	changed.speed = 1;
+	changed.duplex = 1;
+	fixed_phy_update_state(phy, &status, &changed);
+	return 0;
+}
+
+/* NAPI handler
+ * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
+ * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
+ * Bits 8 -15 of the cause Rx Tx register indicate that are received
+ * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
+ * Each CPU has its own causeRxTx register
+ */
+static int mvneta_poll(struct napi_struct *napi, int budget)
+{
+	int rx_done = 0;
+	u32 cause_rx_tx;
+	struct mvneta_port *pp = netdev_priv(napi->dev);
+	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
+
+	if (!netif_running(pp->dev)) {
+		napi_complete(&port->napi);
+		return rx_done;
+	}
+
+	/* Read cause register */
+	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
+	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
+		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
+
+		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+		if (pp->use_inband_status && (cause_misc &
+				(MVNETA_CAUSE_PHY_STATUS_CHANGE |
+				 MVNETA_CAUSE_LINK_CHANGE |
+				 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
+			mvneta_fixed_link_update(pp, pp->phy_dev);
+		}
+	}
+
+	/* Release Tx descriptors */
+	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
+		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
+		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
+	}
+
+	/* For the case where the last mvneta_poll did not process all
+	 * RX packets
+	 */
+	cause_rx_tx |= port->cause_rx_tx;
+	rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+	budget -= rx_done;
+
+	if (budget > 0) {
+		cause_rx_tx = 0;
+		napi_complete(&port->napi);
+		enable_percpu_irq(pp->dev->irq, 0);
+	}
+
+	port->cause_rx_tx = cause_rx_tx;
+	return rx_done;
+}
+
+/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
+static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+			   int num)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
+		if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
+			netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
+				__func__, rxq->id, i, num);
+			break;
+		}
+	}
+
+	/* Add this number of RX descriptors as non occupied (ready to
+	 * get packets)
+	 */
+	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
+
+	return i;
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+static void mvneta_tx_reset(struct mvneta_port *pp)
+{
+	int queue;
+
+	/* free the skb's in the tx ring */
+	for (queue = 0; queue < txq_number; queue++)
+		mvneta_txq_done_force(pp, &pp->txqs[queue]);
+
+	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+}
+
+static void mvneta_rx_reset(struct mvneta_port *pp)
+{
+	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+			   struct mvneta_rx_queue *rxq)
+
+{
+	rxq->size = pp->rx_ring_size;
+
+	/* Allocate memory for RX descriptors */
+	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+					&rxq->descs_phys, GFP_KERNEL);
+	if (rxq->descs == NULL)
+		return -ENOMEM;
+
+	BUG_ON(rxq->descs !=
+	       PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+	rxq->last_desc = rxq->size - 1;
+
+	/* Set Rx descriptors queue starting address */
+	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
+	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
+
+	/* Set Offset */
+	mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
+
+	/* Set coalescing pkts and time */
+	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+
+	/* Fill RXQ with buffers from RX pool */
+	mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
+	mvneta_rxq_bm_disable(pp, rxq);
+	mvneta_rxq_fill(pp, rxq, rxq->size);
+
+	return 0;
+}
+
+/* Cleanup Rx queue */
+static void mvneta_rxq_deinit(struct mvneta_port *pp,
+			      struct mvneta_rx_queue *rxq)
+{
+	mvneta_rxq_drop_pkts(pp, rxq);
+
+	if (rxq->descs)
+		dma_free_coherent(pp->dev->dev.parent,
+				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+				  rxq->descs,
+				  rxq->descs_phys);
+
+	rxq->descs             = NULL;
+	rxq->last_desc         = 0;
+	rxq->next_desc_to_proc = 0;
+	rxq->descs_phys        = 0;
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+			   struct mvneta_tx_queue *txq)
+{
+	txq->size = pp->tx_ring_size;
+
+	/* A queue must always have room for at least one skb.
+	 * Therefore, stop the queue when the free entries reaches
+	 * the maximum number of descriptors per skb.
+	 */
+	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
+	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+
+	/* Allocate memory for TX descriptors */
+	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+					txq->size * MVNETA_DESC_ALIGNED_SIZE,
+					&txq->descs_phys, GFP_KERNEL);
+	if (txq->descs == NULL)
+		return -ENOMEM;
+
+	/* Make sure descriptor address is cache line size aligned  */
+	BUG_ON(txq->descs !=
+	       PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+	txq->last_desc = txq->size - 1;
+
+	/* Set maximum bandwidth for enabled TXQs */
+	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+	/* Set Tx descriptors queue starting address */
+	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+	txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
+	if (txq->tx_skb == NULL) {
+		dma_free_coherent(pp->dev->dev.parent,
+				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
+				  txq->descs, txq->descs_phys);
+		return -ENOMEM;
+	}
+
+	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+					   txq->size * TSO_HEADER_SIZE,
+					   &txq->tso_hdrs_phys, GFP_KERNEL);
+	if (txq->tso_hdrs == NULL) {
+		kfree(txq->tx_skb);
+		dma_free_coherent(pp->dev->dev.parent,
+				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
+				  txq->descs, txq->descs_phys);
+		return -ENOMEM;
+	}
+	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+
+	return 0;
+}
+
+/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+			      struct mvneta_tx_queue *txq)
+{
+	kfree(txq->tx_skb);
+
+	if (txq->tso_hdrs)
+		dma_free_coherent(pp->dev->dev.parent,
+				  txq->size * TSO_HEADER_SIZE,
+				  txq->tso_hdrs, txq->tso_hdrs_phys);
+	if (txq->descs)
+		dma_free_coherent(pp->dev->dev.parent,
+				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
+				  txq->descs, txq->descs_phys);
+
+	txq->descs             = NULL;
+	txq->last_desc         = 0;
+	txq->next_desc_to_proc = 0;
+	txq->descs_phys        = 0;
+
+	/* Set minimum bandwidth for disabled TXQs */
+	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
+	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
+
+	/* Set Tx descriptors queue starting address and size */
+	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
+	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
+}
+
+/* Cleanup all Tx queues */
+static void mvneta_cleanup_txqs(struct mvneta_port *pp)
+{
+	int queue;
+
+	for (queue = 0; queue < txq_number; queue++)
+		mvneta_txq_deinit(pp, &pp->txqs[queue]);
+}
+
+/* Cleanup all Rx queues */
+static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
+{
+	mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
+}
+
+
+/* Init all Rx queues */
+static int mvneta_setup_rxqs(struct mvneta_port *pp)
+{
+	int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
+	if (err) {
+		netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+			   __func__, rxq_def);
+		mvneta_cleanup_rxqs(pp);
+		return err;
+	}
+
+	return 0;
+}
+
+/* Init all tx queues */
+static int mvneta_setup_txqs(struct mvneta_port *pp)
+{
+	int queue;
+
+	for (queue = 0; queue < txq_number; queue++) {
+		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
+		if (err) {
+			netdev_err(pp->dev, "%s: can't create txq=%d\n",
+				   __func__, queue);
+			mvneta_cleanup_txqs(pp);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void mvneta_start_dev(struct mvneta_port *pp)
+{
+	unsigned int cpu;
+
+	mvneta_max_rx_size_set(pp, pp->pkt_size);
+	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
+
+	/* start the Rx/Tx activity */
+	mvneta_port_enable(pp);
+
+	/* Enable polling on the port */
+	for_each_online_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		napi_enable(&port->napi);
+	}
+
+	/* Unmask interrupts */
+	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+		    MVNETA_RX_INTR_MASK(rxq_number) |
+		    MVNETA_TX_INTR_MASK(txq_number) |
+		    MVNETA_MISCINTR_INTR_MASK);
+	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
+		    MVNETA_CAUSE_LINK_CHANGE |
+		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+
+	phy_start(pp->phy_dev);
+	netif_tx_start_all_queues(pp->dev);
+}
+
+static void mvneta_stop_dev(struct mvneta_port *pp)
+{
+	unsigned int cpu;
+
+	phy_stop(pp->phy_dev);
+
+	for_each_online_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		napi_disable(&port->napi);
+	}
+
+	netif_carrier_off(pp->dev);
+
+	mvneta_port_down(pp);
+	netif_tx_stop_all_queues(pp->dev);
+
+	/* Stop the port activity */
+	mvneta_port_disable(pp);
+
+	/* Clear all ethernet port interrupts */
+	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+
+	/* Mask all ethernet port interrupts */
+	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+	mvneta_tx_reset(pp);
+	mvneta_rx_reset(pp);
+}
+
+/* Return positive if MTU is valid */
+static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
+{
+	if (mtu < 68) {
+		netdev_err(dev, "cannot change mtu to less than 68\n");
+		return -EINVAL;
+	}
+
+	/* 9676 == 9700 - 20 and rounding to 8 */
+	if (mtu > 9676) {
+		netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
+		mtu = 9676;
+	}
+
+	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
+		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
+			mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
+		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
+	}
+
+	return mtu;
+}
+
+/* Change the device mtu */
+static int mvneta_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	int ret;
+
+	mtu = mvneta_check_mtu_valid(dev, mtu);
+	if (mtu < 0)
+		return -EINVAL;
+
+	dev->mtu = mtu;
+
+	if (!netif_running(dev)) {
+		netdev_update_features(dev);
+		return 0;
+	}
+
+	/* The interface is running, so we have to force a
+	 * reallocation of the queues
+	 */
+	mvneta_stop_dev(pp);
+
+	mvneta_cleanup_txqs(pp);
+	mvneta_cleanup_rxqs(pp);
+
+	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
+	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	ret = mvneta_setup_rxqs(pp);
+	if (ret) {
+		netdev_err(dev, "unable to setup rxqs after MTU change\n");
+		return ret;
+	}
+
+	ret = mvneta_setup_txqs(pp);
+	if (ret) {
+		netdev_err(dev, "unable to setup txqs after MTU change\n");
+		return ret;
+	}
+
+	mvneta_start_dev(pp);
+	mvneta_port_up(pp);
+
+	netdev_update_features(dev);
+
+	return 0;
+}
+
+static netdev_features_t mvneta_fix_features(struct net_device *dev,
+					     netdev_features_t features)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
+		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+		netdev_info(dev,
+			    "Disable IP checksum for MTU greater than %dB\n",
+			    pp->tx_csum_limit);
+	}
+
+	return features;
+}
+
+/* Get mac address */
+static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+{
+	u32 mac_addr_l, mac_addr_h;
+
+	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
+	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
+	addr[0] = (mac_addr_h >> 24) & 0xFF;
+	addr[1] = (mac_addr_h >> 16) & 0xFF;
+	addr[2] = (mac_addr_h >> 8) & 0xFF;
+	addr[3] = mac_addr_h & 0xFF;
+	addr[4] = (mac_addr_l >> 8) & 0xFF;
+	addr[5] = mac_addr_l & 0xFF;
+}
+
+/* Handle setting mac address */
+static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	struct sockaddr *sockaddr = addr;
+	int ret;
+
+	ret = eth_prepare_mac_addr_change(dev, addr);
+	if (ret < 0)
+		return ret;
+	/* Remove previous address table entry */
+	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
+
+	/* Set new addr in hw */
+	mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
+
+	eth_commit_mac_addr_change(dev, addr);
+	return 0;
+}
+
+static void mvneta_adjust_link(struct net_device *ndev)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	struct phy_device *phydev = pp->phy_dev;
+	int status_change = 0;
+
+	if (phydev->link) {
+		if ((pp->speed != phydev->speed) ||
+		    (pp->duplex != phydev->duplex)) {
+			u32 val;
+
+			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
+				 MVNETA_GMAC_CONFIG_GMII_SPEED |
+				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+
+			if (phydev->duplex)
+				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+			if (phydev->speed == SPEED_1000)
+				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+			else if (phydev->speed == SPEED_100)
+				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+
+			pp->duplex = phydev->duplex;
+			pp->speed  = phydev->speed;
+		}
+	}
+
+	if (phydev->link != pp->link) {
+		if (!phydev->link) {
+			pp->duplex = -1;
+			pp->speed = 0;
+		}
+
+		pp->link = phydev->link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (phydev->link) {
+			if (!pp->use_inband_status) {
+				u32 val = mvreg_read(pp,
+						  MVNETA_GMAC_AUTONEG_CONFIG);
+				val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+				val |= MVNETA_GMAC_FORCE_LINK_PASS;
+				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+					    val);
+			}
+			mvneta_port_up(pp);
+		} else {
+			if (!pp->use_inband_status) {
+				u32 val = mvreg_read(pp,
+						  MVNETA_GMAC_AUTONEG_CONFIG);
+				val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+				val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+					    val);
+			}
+			mvneta_port_down(pp);
+		}
+		phy_print_status(phydev);
+	}
+}
+
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+	struct phy_device *phy_dev;
+
+	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
+				 pp->phy_interface);
+	if (!phy_dev) {
+		netdev_err(pp->dev, "could not find the PHY\n");
+		return -ENODEV;
+	}
+
+	phy_dev->supported &= PHY_GBIT_FEATURES;
+	phy_dev->advertising = phy_dev->supported;
+
+	pp->phy_dev = phy_dev;
+	pp->link    = 0;
+	pp->duplex  = 0;
+	pp->speed   = 0;
+
+	return 0;
+}
+
+static void mvneta_mdio_remove(struct mvneta_port *pp)
+{
+	phy_disconnect(pp->phy_dev);
+	pp->phy_dev = NULL;
+}
+
+static void mvneta_percpu_enable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	disable_percpu_irq(pp->dev->irq);
+}
+
+static void mvneta_percpu_elect(struct mvneta_port *pp)
+{
+	int online_cpu_idx, cpu, i = 0;
+
+	online_cpu_idx = rxq_def % num_online_cpus();
+
+	for_each_online_cpu(cpu) {
+		if (i == online_cpu_idx)
+			/* Enable per-CPU interrupt on the one CPU we
+			 * just elected
+			 */
+			smp_call_function_single(cpu, mvneta_percpu_enable,
+						pp, true);
+		else
+			/* Disable per-CPU interrupt on all the other CPU */
+			smp_call_function_single(cpu, mvneta_percpu_disable,
+						pp, true);
+		i++;
+	}
+};
+
+static int mvneta_percpu_notifier(struct notifier_block *nfb,
+				  unsigned long action, void *hcpu)
+{
+	struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
+					      cpu_notifier);
+	int cpu = (unsigned long)hcpu, other_cpu;
+	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		netif_tx_stop_all_queues(pp->dev);
+
+		/* We have to synchronise on tha napi of each CPU
+		 * except the one just being waked up
+		 */
+		for_each_online_cpu(other_cpu) {
+			if (other_cpu != cpu) {
+				struct mvneta_pcpu_port *other_port =
+					per_cpu_ptr(pp->ports, other_cpu);
+
+				napi_synchronize(&other_port->napi);
+			}
+		}
+
+		/* Mask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+		napi_enable(&port->napi);
+
+		/* Enable per-CPU interrupt on the one CPU we care
+		 * about.
+		 */
+		mvneta_percpu_elect(pp);
+
+		/* Unmask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+			MVNETA_RX_INTR_MASK(rxq_number) |
+			MVNETA_TX_INTR_MASK(txq_number) |
+			MVNETA_MISCINTR_INTR_MASK);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+			MVNETA_CAUSE_PHY_STATUS_CHANGE |
+			MVNETA_CAUSE_LINK_CHANGE |
+			MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		netif_tx_start_all_queues(pp->dev);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		netif_tx_stop_all_queues(pp->dev);
+		/* Mask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+		napi_synchronize(&port->napi);
+		napi_disable(&port->napi);
+		/* Disable per-CPU interrupts on the CPU that is
+		 * brought down.
+		 */
+		smp_call_function_single(cpu, mvneta_percpu_disable,
+					 pp, true);
+
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		/* Check if a new CPU must be elected now this on is down */
+		mvneta_percpu_elect(pp);
+		/* Unmask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+			MVNETA_RX_INTR_MASK(rxq_number) |
+			MVNETA_TX_INTR_MASK(txq_number) |
+			MVNETA_MISCINTR_INTR_MASK);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+			MVNETA_CAUSE_PHY_STATUS_CHANGE |
+			MVNETA_CAUSE_LINK_CHANGE |
+			MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		netif_tx_start_all_queues(pp->dev);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int mvneta_open(struct net_device *dev)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	int ret;
+
+	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	ret = mvneta_setup_rxqs(pp);
+	if (ret)
+		return ret;
+
+	ret = mvneta_setup_txqs(pp);
+	if (ret)
+		goto err_cleanup_rxqs;
+
+	/* Connect to port interrupt line */
+	ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
+				 MVNETA_DRIVER_NAME, pp->ports);
+	if (ret) {
+		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
+		goto err_cleanup_txqs;
+	}
+
+	/* Even though the documentation says that request_percpu_irq
+	 * doesn't enable the interrupts automatically, it actually
+	 * does so on the local CPU.
+	 *
+	 * Make sure it's disabled.
+	 */
+	mvneta_percpu_disable(pp);
+
+	/* Elect a CPU to handle our RX queue interrupt */
+	mvneta_percpu_elect(pp);
+
+	/* Register a CPU notifier to handle the case where our CPU
+	 * might be taken offline.
+	 */
+	register_cpu_notifier(&pp->cpu_notifier);
+
+	/* In default link is down */
+	netif_carrier_off(pp->dev);
+
+	ret = mvneta_mdio_probe(pp);
+	if (ret < 0) {
+		netdev_err(dev, "cannot probe MDIO bus\n");
+		goto err_free_irq;
+	}
+
+	mvneta_start_dev(pp);
+
+	return 0;
+
+err_free_irq:
+	free_percpu_irq(pp->dev->irq, pp->ports);
+err_cleanup_txqs:
+	mvneta_cleanup_txqs(pp);
+err_cleanup_rxqs:
+	mvneta_cleanup_rxqs(pp);
+	return ret;
+}
+
+/* Stop the port, free port interrupt line */
+static int mvneta_stop(struct net_device *dev)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	mvneta_stop_dev(pp);
+	mvneta_mdio_remove(pp);
+	unregister_cpu_notifier(&pp->cpu_notifier);
+	on_each_cpu(mvneta_percpu_disable, pp, true);
+	free_percpu_irq(dev->irq, pp->ports);
+	mvneta_cleanup_rxqs(pp);
+	mvneta_cleanup_txqs(pp);
+
+	return 0;
+}
+
+static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	if (!pp->phy_dev)
+		return -ENOTSUPP;
+
+	return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	if (!pp->phy_dev)
+		return -ENODEV;
+
+	return phy_ethtool_gset(pp->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	if (!pp->phy_dev)
+		return -ENODEV;
+
+	return phy_ethtool_sset(pp->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvneta_ethtool_set_coalesce(struct net_device *dev,
+				       struct ethtool_coalesce *c)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	int queue;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+		rxq->time_coal = c->rx_coalesce_usecs;
+		rxq->pkts_coal = c->rx_max_coalesced_frames;
+		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+	}
+
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+		txq->done_pkts_coal = c->tx_max_coalesced_frames;
+		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+	}
+
+	return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvneta_ethtool_get_coalesce(struct net_device *dev,
+				       struct ethtool_coalesce *c)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
+	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
+
+	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
+	return 0;
+}
+
+
+static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+		sizeof(drvinfo->bus_info));
+}
+
+
+static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
+					 struct ethtool_ringparam *ring)
+{
+	struct mvneta_port *pp = netdev_priv(netdev);
+
+	ring->rx_max_pending = MVNETA_MAX_RXD;
+	ring->tx_max_pending = MVNETA_MAX_TXD;
+	ring->rx_pending = pp->rx_ring_size;
+	ring->tx_pending = pp->tx_ring_size;
+}
+
+static int mvneta_ethtool_set_ringparam(struct net_device *dev,
+					struct ethtool_ringparam *ring)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
+		return -EINVAL;
+	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
+		ring->rx_pending : MVNETA_MAX_RXD;
+
+	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
+				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
+	if (pp->tx_ring_size != ring->tx_pending)
+		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+			    pp->tx_ring_size, ring->tx_pending);
+
+	if (netif_running(dev)) {
+		mvneta_stop(dev);
+		if (mvneta_open(dev)) {
+			netdev_err(dev,
+				   "error on opening device after ring param change\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+				       u8 *data)
+{
+	if (sset == ETH_SS_STATS) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
+	}
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+	const struct mvneta_statistic *s;
+	void __iomem *base = pp->base;
+	u32 high, low, val;
+	int i;
+
+	for (i = 0, s = mvneta_statistics;
+	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+	     s++, i++) {
+		val = 0;
+
+		switch (s->type) {
+		case T_REG_32:
+			val = readl_relaxed(base + s->offset);
+			break;
+		case T_REG_64:
+			/* Docs say to read low 32-bit then high */
+			low = readl_relaxed(base + s->offset);
+			high = readl_relaxed(base + s->offset + 4);
+			val = (u64)high << 32 | low;
+			break;
+		}
+
+		pp->ethtool_stats[i] += val;
+	}
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	int i;
+
+	mvneta_ethtool_update_stats(pp);
+
+	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+		*data++ = pp->ethtool_stats[i];
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(mvneta_statistics);
+	return -EOPNOTSUPP;
+}
+
+static const struct net_device_ops mvneta_netdev_ops = {
+	.ndo_open            = mvneta_open,
+	.ndo_stop            = mvneta_stop,
+	.ndo_start_xmit      = mvneta_tx,
+	.ndo_set_rx_mode     = mvneta_set_rx_mode,
+	.ndo_set_mac_address = mvneta_set_mac_addr,
+	.ndo_change_mtu      = mvneta_change_mtu,
+	.ndo_fix_features    = mvneta_fix_features,
+	.ndo_get_stats64     = mvneta_get_stats64,
+	.ndo_do_ioctl        = mvneta_ioctl,
+};
+
+const struct ethtool_ops mvneta_eth_tool_ops = {
+	.get_link       = ethtool_op_get_link,
+	.get_settings   = mvneta_ethtool_get_settings,
+	.set_settings   = mvneta_ethtool_set_settings,
+	.set_coalesce   = mvneta_ethtool_set_coalesce,
+	.get_coalesce   = mvneta_ethtool_get_coalesce,
+	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
+	.get_ringparam  = mvneta_ethtool_get_ringparam,
+	.set_ringparam	= mvneta_ethtool_set_ringparam,
+	.get_strings	= mvneta_ethtool_get_strings,
+	.get_ethtool_stats = mvneta_ethtool_get_stats,
+	.get_sset_count	= mvneta_ethtool_get_sset_count,
+};
+
+/* Initialize hw */
+static int mvneta_init(struct device *dev, struct mvneta_port *pp)
+{
+	int queue;
+
+	/* Disable port */
+	mvneta_port_disable(pp);
+
+	/* Set port default values */
+	mvneta_defaults_set(pp);
+
+	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
+				GFP_KERNEL);
+	if (!pp->txqs)
+		return -ENOMEM;
+
+	/* Initialize TX descriptor rings */
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+		txq->id = queue;
+		txq->size = pp->tx_ring_size;
+		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
+	}
+
+	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
+				GFP_KERNEL);
+	if (!pp->rxqs)
+		return -ENOMEM;
+
+	/* Create Rx descriptor rings */
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+		rxq->id = queue;
+		rxq->size = pp->rx_ring_size;
+		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
+		rxq->time_coal = MVNETA_RX_COAL_USEC;
+	}
+
+	return 0;
+}
+
+/* platform glue : initialize decoding windows */
+static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
+				     const struct mbus_dram_target_info *dram)
+{
+	u32 win_enable;
+	u32 win_protect;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+		if (i < 4)
+			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+	}
+
+	win_enable = 0x3f;
+	win_protect = 0;
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+		mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
+			    (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
+
+		mvreg_write(pp, MVNETA_WIN_SIZE(i),
+			    (cs->size - 1) & 0xffff0000);
+
+		win_enable &= ~(1 << i);
+		win_protect |= 3 << (2 * i);
+	}
+
+	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
+}
+
+/* Power up the port */
+static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+{
+	u32 ctrl;
+
+	/* MAC Cause register should be cleared */
+	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+	ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+	/* Even though it might look weird, when we're configured in
+	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
+	 */
+	switch(phy_mode) {
+	case PHY_INTERFACE_MODE_QSGMII:
+		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
+		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
+		break;
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+		ctrl |= MVNETA_GMAC2_PORT_RGMII;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (pp->use_inband_status)
+		ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+
+	/* Cancel Port Reset */
+	ctrl &= ~MVNETA_GMAC2_PORT_RESET;
+	mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
+
+	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+		MVNETA_GMAC2_PORT_RESET) != 0)
+		continue;
+
+	return 0;
+}
+
+/* Device initialization routine */
+static int mvneta_probe(struct platform_device *pdev)
+{
+	const struct mbus_dram_target_info *dram_target_info;
+	struct resource *res;
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *phy_node;
+	struct mvneta_port *pp;
+	struct net_device *dev;
+	const char *dt_mac_addr;
+	char hw_mac_addr[ETH_ALEN];
+	const char *mac_from;
+	const char *managed;
+	int tx_csum_limit;
+	int phy_mode;
+	int err;
+	int cpu;
+
+	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->irq = irq_of_parse_and_map(dn, 0);
+	if (dev->irq == 0) {
+		err = -EINVAL;
+		goto err_free_netdev;
+	}
+
+	phy_node = of_parse_phandle(dn, "phy", 0);
+	if (!phy_node) {
+		if (!of_phy_is_fixed_link(dn)) {
+			dev_err(&pdev->dev, "no PHY specified\n");
+			err = -ENODEV;
+			goto err_free_irq;
+		}
+
+		err = of_phy_register_fixed_link(dn);
+		if (err < 0) {
+			dev_err(&pdev->dev, "cannot register fixed PHY\n");
+			goto err_free_irq;
+		}
+
+		/* In the case of a fixed PHY, the DT node associated
+		 * to the PHY is the Ethernet MAC DT node.
+		 */
+		phy_node = of_node_get(dn);
+	}
+
+	phy_mode = of_get_phy_mode(dn);
+	if (phy_mode < 0) {
+		dev_err(&pdev->dev, "incorrect phy-mode\n");
+		err = -EINVAL;
+		goto err_put_phy_node;
+	}
+
+	dev->tx_queue_len = MVNETA_MAX_TXD;
+	dev->watchdog_timeo = 5 * HZ;
+	dev->netdev_ops = &mvneta_netdev_ops;
+
+	dev->ethtool_ops = &mvneta_eth_tool_ops;
+
+	pp = netdev_priv(dev);
+	pp->phy_node = phy_node;
+	pp->phy_interface = phy_mode;
+
+	err = of_property_read_string(dn, "managed", &managed);
+	pp->use_inband_status = (err == 0 &&
+				 strcmp(managed, "in-band-status") == 0);
+	pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
+
+	pp->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pp->clk)) {
+		err = PTR_ERR(pp->clk);
+		goto err_put_phy_node;
+	}
+
+	clk_prepare_enable(pp->clk);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pp->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pp->base)) {
+		err = PTR_ERR(pp->base);
+		goto err_clk;
+	}
+
+	/* Alloc per-cpu port structure */
+	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
+	if (!pp->ports) {
+		err = -ENOMEM;
+		goto err_clk;
+	}
+
+	/* Alloc per-cpu stats */
+	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
+	if (!pp->stats) {
+		err = -ENOMEM;
+		goto err_free_ports;
+	}
+
+	dt_mac_addr = of_get_mac_address(dn);
+	if (dt_mac_addr) {
+		mac_from = "device tree";
+		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+	} else {
+		mvneta_get_mac_addr(pp, hw_mac_addr);
+		if (is_valid_ether_addr(hw_mac_addr)) {
+			mac_from = "hardware";
+			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+		} else {
+			mac_from = "random";
+			eth_hw_addr_random(dev);
+		}
+	}
+
+	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
+		if (tx_csum_limit < 0 ||
+		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
+			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+			dev_info(&pdev->dev,
+				 "Wrong TX csum limit in DT, set to %dB\n",
+				 MVNETA_TX_CSUM_DEF_SIZE);
+		}
+	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
+		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
+	} else {
+		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
+	}
+
+	pp->tx_csum_limit = tx_csum_limit;
+
+	pp->tx_ring_size = MVNETA_MAX_TXD;
+	pp->rx_ring_size = MVNETA_MAX_RXD;
+
+	pp->dev = dev;
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = mvneta_init(&pdev->dev, pp);
+	if (err < 0)
+		goto err_free_stats;
+
+	err = mvneta_port_power_up(pp, phy_mode);
+	if (err < 0) {
+		dev_err(&pdev->dev, "can't power up port\n");
+		goto err_free_stats;
+	}
+
+	dram_target_info = mv_mbus_dram_info();
+	if (dram_target_info)
+		mvneta_conf_mbus_windows(pp, dram_target_info);
+
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+		port->pp = pp;
+	}
+
+	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+	dev->hw_features |= dev->features;
+	dev->vlan_features |= dev->features;
+	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+
+	err = register_netdev(dev);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register\n");
+		goto err_free_stats;
+	}
+
+	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
+		    dev->dev_addr);
+
+	platform_set_drvdata(pdev, pp->dev);
+
+	if (pp->use_inband_status) {
+		struct phy_device *phy = of_phy_find_device(dn);
+
+		mvneta_fixed_link_update(pp, phy);
+
+		put_device(&phy->dev);
+	}
+
+	return 0;
+
+err_free_stats:
+	free_percpu(pp->stats);
+err_free_ports:
+	free_percpu(pp->ports);
+err_clk:
+	clk_disable_unprepare(pp->clk);
+err_put_phy_node:
+	of_node_put(phy_node);
+err_free_irq:
+	irq_dispose_mapping(dev->irq);
+err_free_netdev:
+	free_netdev(dev);
+	return err;
+}
+
+/* Device removal routine */
+static int mvneta_remove(struct platform_device *pdev)
+{
+	struct net_device  *dev = platform_get_drvdata(pdev);
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	clk_disable_unprepare(pp->clk);
+	free_percpu(pp->ports);
+	free_percpu(pp->stats);
+	irq_dispose_mapping(dev->irq);
+	of_node_put(pp->phy_node);
+	free_netdev(dev);
+
+	return 0;
+}
+
+static const struct of_device_id mvneta_match[] = {
+	{ .compatible = "marvell,armada-370-neta" },
+	{ .compatible = "marvell,armada-xp-neta" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mvneta_match);
+
+static struct platform_driver mvneta_driver = {
+	.probe = mvneta_probe,
+	.remove = mvneta_remove,
+	.driver = {
+		.name = MVNETA_DRIVER_NAME,
+		.of_match_table = mvneta_match,
+	},
+};
+
+module_platform_driver(mvneta_driver);
+
+MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+
+module_param(rxq_number, int, S_IRUGO);
+module_param(txq_number, int, S_IRUGO);
+
+module_param(rxq_def, int, S_IRUGO);
+module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
new file mode 100644
index 0000000..4f34e1b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -0,0 +1,6539 @@
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)	(0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG		0x60
+#define MVPP2_RX_FIFO_INIT_REG			0x64
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port)			(0x140 + 4 * (port))
+#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)	(((s) & 0xfff) << 16)
+#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK	BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
+#define     MVPP2_POOL_BUF_SIZE_OFFSET		5
+#define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
+#define     MVPP2_SNOOP_PKT_SIZE_MASK		0x1ff
+#define     MVPP2_SNOOP_BUF_HDR_MASK		BIT(9)
+#define     MVPP2_RXQ_POOL_SHORT_OFFS		20
+#define     MVPP2_RXQ_POOL_SHORT_MASK		0x700000
+#define     MVPP2_RXQ_POOL_LONG_OFFS		24
+#define     MVPP2_RXQ_POOL_LONG_MASK		0x7000000
+#define     MVPP2_RXQ_PACKET_OFFSET_OFFS	28
+#define     MVPP2_RXQ_PACKET_OFFSET_MASK	0x70000000
+#define     MVPP2_RXQ_DISABLE_MASK		BIT(31)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG		0x1000
+#define     MVPP2_PRS_PORT_LU_MAX		0xf
+#define     MVPP2_PRS_PORT_LU_MASK(port)	(0xff << ((port) * 4))
+#define     MVPP2_PRS_PORT_LU_VAL(port, val)	((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port)		(0x1004 + ((port) & 4))
+#define     MVPP2_PRS_INIT_OFF_MASK(port)	(0x3f << (((port) % 4) * 8))
+#define     MVPP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port)		(0x100c + ((port) & 4))
+#define     MVPP2_PRS_MAX_LOOP_MASK(port)	(0xff << (((port) % 4) * 8))
+#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)	((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG			0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx)		(0x1104 + (idx) * 4)
+#define     MVPP2_PRS_TCAM_INV_MASK		BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG			0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx)		(0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG			0x1230
+#define     MVPP2_PRS_TCAM_EN_MASK		BIT(0)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG			0x1800
+#define     MVPP2_CLS_MODE_ACTIVE_MASK		BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG			0x1810
+#define     MVPP2_CLS_PORT_WAY_MASK(port)	(1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG			0x1814
+#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS	6
+#define MVPP2_CLS_LKP_TBL_REG			0x1818
+#define     MVPP2_CLS_LKP_TBL_RXQ_MASK		0xff
+#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK	BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG		0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG			0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG			0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG			0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(0x1980 + ((port) * 4))
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK	0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port)		(0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG		0x19d0
+#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << (port))
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG			0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG			0x2044
+#define MVPP2_RXQ_DESC_SIZE_REG			0x2048
+#define     MVPP2_RXQ_DESC_SIZE_MASK		0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
+#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET	0
+#define     MVPP2_RXQ_NUM_NEW_OFFSET		16
+#define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
+#define     MVPP2_RXQ_OCCUPIED_MASK		0x3fff
+#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET	16
+#define     MVPP2_RXQ_NON_OCCUPIED_MASK		0x3fff0000
+#define MVPP2_RXQ_THRESH_REG			0x204c
+#define     MVPP2_OCCUPIED_THRESH_OFFSET	0
+#define     MVPP2_OCCUPIED_THRESH_MASK		0x3fff
+#define MVPP2_RXQ_INDEX_REG			0x2050
+#define MVPP2_TXQ_NUM_REG			0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG			0x2084
+#define MVPP2_TXQ_DESC_SIZE_REG			0x2088
+#define     MVPP2_TXQ_DESC_SIZE_MASK		0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG		0x2090
+#define MVPP2_TXQ_THRESH_REG			0x2094
+#define     MVPP2_TRANSMITTED_THRESH_OFFSET	16
+#define     MVPP2_TRANSMITTED_THRESH_MASK	0x3fff0000
+#define MVPP2_TXQ_INDEX_REG			0x2098
+#define MVPP2_TXQ_PREF_BUF_REG			0x209c
+#define     MVPP2_PREF_BUF_PTR(desc)		((desc) & 0xfff)
+#define     MVPP2_PREF_BUF_SIZE_4		(BIT(12) | BIT(13))
+#define     MVPP2_PREF_BUF_SIZE_16		(BIT(12) | BIT(14))
+#define     MVPP2_PREF_BUF_THRESH(val)		((val) << 17)
+#define     MVPP2_TXQ_DRAIN_EN_MASK		BIT(31)
+#define MVPP2_TXQ_PENDING_REG			0x20a0
+#define     MVPP2_TXQ_PENDING_MASK		0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG		0x20a4
+#define MVPP2_TXQ_SENT_REG(txq)			(0x3c00 + 4 * (txq))
+#define     MVPP2_TRANSMITTED_COUNT_OFFSET	16
+#define     MVPP2_TRANSMITTED_COUNT_MASK	0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG			0x20b0
+#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET		16
+#define MVPP2_TXQ_RSVD_RSLT_REG			0x20b4
+#define     MVPP2_TXQ_RSVD_RSLT_MASK		0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG			0x20b8
+#define     MVPP2_TXQ_RSVD_CLR_OFFSET		16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK	0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_PENDING_MASK		0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w)			(0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w)			(0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w)			(0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE			0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_REG(rxq)		(0x5400 + 4 * (rxq))
+#define MVPP2_ISR_ENABLE_REG(port)		(0x5420 + 4 * (port))
+#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
+#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
+#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
+#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
+#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
+#define     MVPP2_CAUSE_FCS_ERR_MASK		BIT(25)
+#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK	BIT(26)
+#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK	BIT(29)
+#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK	BIT(30)
+#define     MVPP2_CAUSE_MISC_SUM_MASK		BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port)		(0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG		0x54bc
+#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
+#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK	0x3fc00000
+#define     MVPP2_PON_CAUSE_MISC_SUM_MASK		BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG		0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
+#define     MVPP2_BM_POOL_BASE_ADDR_MASK	0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
+#define     MVPP2_BM_POOL_SIZE_MASK		0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
+#define     MVPP2_BM_POOL_GET_READ_PTR_MASK	0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
+#define     MVPP2_BM_POOL_PTRS_NUM_MASK		0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
+#define     MVPP2_BM_BPPI_PTR_NUM_MASK		0x7ff
+#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK	BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
+#define     MVPP2_BM_START_MASK			BIT(0)
+#define     MVPP2_BM_STOP_MASK			BIT(1)
+#define     MVPP2_BM_STATE_MASK			BIT(4)
+#define     MVPP2_BM_LOW_THRESH_OFFS		8
+#define     MVPP2_BM_LOW_THRESH_MASK		0x7f00
+#define     MVPP2_BM_LOW_THRESH_VALUE(val)	((val) << \
+						MVPP2_BM_LOW_THRESH_OFFS)
+#define     MVPP2_BM_HIGH_THRESH_OFFS		16
+#define     MVPP2_BM_HIGH_THRESH_MASK		0x7f0000
+#define     MVPP2_BM_HIGH_THRESH_VALUE(val)	((val) << \
+						MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
+#define     MVPP2_BM_RELEASED_DELAY_MASK	BIT(0)
+#define     MVPP2_BM_ALLOC_FAILED_MASK		BIT(1)
+#define     MVPP2_BM_BPPE_EMPTY_MASK		BIT(2)
+#define     MVPP2_BM_BPPE_FULL_MASK		BIT(3)
+#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK	BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
+#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK	BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG			0x6440
+#define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
+#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK	BIT(0)
+#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK	BIT(1)
+#define     MVPP2_BM_PHY_RLS_GRNTD_MASK		BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG			0x64c0
+#define MVPP2_BM_MC_RLS_REG			0x64c4
+#define     MVPP2_BM_MC_ID_MASK			0xfff
+#define     MVPP2_BM_FORCE_RELEASE_MASK		BIT(12)
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG		0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG		0x8004
+#define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
+#define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
+#define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
+#define MVPP2_TXP_SCHED_MTU_REG			0x801c
+#define     MVPP2_TXP_MTU_MAX			0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG		0x8020
+#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK	0x7ffff
+#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK	0x3ff00000
+#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)	((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG		0x8024
+#define     MVPP2_TXP_TOKEN_SIZE_MAX		0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q)		(0x8040 + ((q) << 2))
+#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK	0x7ffff
+#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK	0x3ff00000
+#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)	((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(0x8060 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_SIZE_MAX		0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(0x8080 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_CNTR_MAX		0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG			0x8800
+#define MVPP2_TX_PORT_FLUSH_REG			0x8810
+#define     MVPP2_TX_PORT_FLUSH_MASK(port)	(1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE			0x24
+#define MVPP2_SRC_ADDR_HIGH			0x28
+#define MVPP2_PHY_AN_CFG0_REG			0x34
+#define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
+#define MVPP2_MIB_COUNTERS_BASE(port)		(0x1000 + ((port) >> 1) * \
+						0x400 + (port) * 0x400)
+#define     MVPP2_MIB_LATE_COLLISION		0x7c
+#define MVPP2_ISR_SUM_MASK_REG			0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG	0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT		0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG			0x0
+#define      MVPP2_GMAC_PORT_EN_MASK		BIT(0)
+#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS	2
+#define      MVPP2_GMAC_MAX_RX_SIZE_MASK	0x7ffc
+#define      MVPP2_GMAC_MIB_CNTR_EN_MASK	BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG			0x4
+#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
+#define      MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
+#define      MVPP2_GMAC_PCS_LB_EN_BIT		6
+#define      MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
+#define      MVPP2_GMAC_SA_LOW_OFFS		7
+#define MVPP2_GMAC_CTRL_2_REG			0x8
+#define      MVPP2_GMAC_INBAND_AN_MASK		BIT(0)
+#define      MVPP2_GMAC_PCS_ENABLE_MASK		BIT(3)
+#define      MVPP2_GMAC_PORT_RGMII_MASK		BIT(4)
+#define      MVPP2_GMAC_PORT_RESET_MASK		BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG		0xc
+#define      MVPP2_GMAC_FORCE_LINK_DOWN		BIT(0)
+#define      MVPP2_GMAC_FORCE_LINK_PASS		BIT(1)
+#define      MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
+#define      MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
+#define      MVPP2_GMAC_AN_SPEED_EN		BIT(7)
+#define      MVPP2_GMAC_FC_ADV_EN		BIT(9)
+#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
+#define      MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK	0x1fc0
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)	(((v) << 6) & \
+					MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK	0xff
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH	15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
+#define MVPP2_RX_COAL_PKTS		32
+#define MVPP2_RX_COAL_USEC		100
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE			2
+#define MVPP2_ETH_TYPE_LEN		2
+#define MVPP2_PPPOE_HDR_SIZE		8
+#define MVPP2_VLAN_TAG_LEN		4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE		0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE	32
+#define MVPP2_TX_CSUM_MAX_SIZE		9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC	1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC	1000
+
+#define MVPP2_TX_MTU_MAX		0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT			16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS			4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ			8
+
+/* Maximum number of RXQs used by single port */
+#define MVPP2_MAX_RXQ			8
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ		4
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM		(MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD			128
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD			1024
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK		64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE		256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE		32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN		(MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE	0x2000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE	0x80
+#define MVPP2_RX_FIFO_PORT_MIN_PKT	0x80
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+	      ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+
+#define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+	((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_BIT_TO_BYTE(bit)		((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE		16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK		BIT(0)
+
+/* Marvell tag types */
+enum mvpp2_tag_type {
+	MVPP2_TAG_TYPE_NONE = 0,
+	MVPP2_TAG_TYPE_MH   = 1,
+	MVPP2_TAG_TYPE_DSA  = 2,
+	MVPP2_TAG_TYPE_EDSA = 3,
+	MVPP2_TAG_TYPE_VLAN = 4,
+	MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE	256
+#define MVPP2_PRS_TCAM_WORDS		6
+#define MVPP2_PRS_SRAM_WORDS		4
+#define MVPP2_PRS_FLOW_ID_SIZE		64
+#define MVPP2_PRS_FLOW_ID_MASK		0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID	1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT	BIT(5)
+#define MVPP2_PRS_IPV4_HEAD		0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK	0xf0
+#define MVPP2_PRS_IPV4_MC		0xe0
+#define MVPP2_PRS_IPV4_MC_MASK		0xf0
+#define MVPP2_PRS_IPV4_BC_MASK		0xff
+#define MVPP2_PRS_IPV4_IHL		0x5
+#define MVPP2_PRS_IPV4_IHL_MASK		0xf
+#define MVPP2_PRS_IPV6_MC		0xff
+#define MVPP2_PRS_IPV6_MC_MASK		0xff
+#define MVPP2_PRS_IPV6_HOP_MASK		0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK	0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L	0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX		100
+
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS			8
+#define MVPP2_PRS_PORT_MASK			0xff
+#define MVPP2_PRS_LU_MASK			0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs)		\
+				    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)	\
+					      (((offs) * 2) - ((offs) % 2)  + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE			16
+#define MVPP2_PRS_TCAM_PORT_BYTE		17
+#define MVPP2_PRS_TCAM_LU_BYTE			20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs)		((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD			5
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL		0
+#define MVPP2_PE_FIRST_FREE_TID		1
+#define MVPP2_PE_LAST_FREE_TID		(MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN	(MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6		(MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL		(MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE		(MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL		(MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS			0
+#define MVPP2_PRS_SRAM_RI_WORD			0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS		32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD		1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS		32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS		64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT		72
+#define MVPP2_PRS_SRAM_UDF_OFFS			73
+#define MVPP2_PRS_SRAM_UDF_BITS			8
+#define MVPP2_PRS_SRAM_UDF_MASK			0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT		81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS		82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK		0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3		1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4		4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS	85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK	0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD		1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD	2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD	3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS		87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS		2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK		0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD		0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD	2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD	3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS		89
+#define MVPP2_PRS_SRAM_AI_OFFS			90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS		98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS		8
+#define MVPP2_PRS_SRAM_AI_MASK			0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS		106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK		0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT		110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT		111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK		0x1
+#define MVPP2_PRS_RI_DSA_MASK			0x2
+#define MVPP2_PRS_RI_VLAN_MASK			0xc
+#define MVPP2_PRS_RI_VLAN_NONE			~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_SINGLE		BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE		BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE		(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK		0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC		BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK		0x600
+#define MVPP2_PRS_RI_L2_UCAST			~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_MCAST			BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST			BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK			0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK		0x7000
+#define MVPP2_PRS_RI_L3_UN			~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_IP4			BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT			BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER		(BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6			BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT			(BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP			(BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK		0x18000
+#define MVPP2_PRS_RI_L3_UCAST			~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_MCAST			BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST			(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK		0x20000
+#define MVPP2_PRS_RI_UDF3_MASK			0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL		BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK		0x1c00000
+#define MVPP2_PRS_RI_L4_TCP			BIT(22)
+#define MVPP2_PRS_RI_L4_UDP			BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER			(BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK			0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE		BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK			0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT		BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT		BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT		BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT		BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT	BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT		BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI		0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT		BIT(7)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED		true
+#define MVPP2_PRS_UNTAGGED		false
+#define MVPP2_PRS_EDSA			true
+#define MVPP2_PRS_DSA			false
+
+/* MAC entries, shadow udf */
+enum mvpp2_prs_udf {
+	MVPP2_PRS_UDF_MAC_DEF,
+	MVPP2_PRS_UDF_MAC_RANGE,
+	MVPP2_PRS_UDF_L2_DEF,
+	MVPP2_PRS_UDF_L2_DEF_COPY,
+	MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum mvpp2_prs_lookup {
+	MVPP2_PRS_LU_MH,
+	MVPP2_PRS_LU_MAC,
+	MVPP2_PRS_LU_DSA,
+	MVPP2_PRS_LU_VLAN,
+	MVPP2_PRS_LU_L2,
+	MVPP2_PRS_LU_PPPOE,
+	MVPP2_PRS_LU_IP4,
+	MVPP2_PRS_LU_IP6,
+	MVPP2_PRS_LU_FLOWS,
+	MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum mvpp2_prs_l3_cast {
+	MVPP2_PRS_L3_UNI_CAST,
+	MVPP2_PRS_L3_MULTI_CAST,
+	MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE	512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS	3
+#define MVPP2_CLS_LKP_TBL_SIZE		64
+
+/* BM constants */
+#define MVPP2_BM_POOLS_NUM		8
+#define MVPP2_BM_LONG_BUF_NUM		1024
+#define MVPP2_BM_SHORT_BUF_NUM		2048
+#define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN		128
+#define MVPP2_BM_SWF_LONG_POOL(port)	((port > 2) ? 2 : port)
+#define MVPP2_BM_SWF_SHORT_POOL		3
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS	8
+#define MVPP2_BM_COOKIE_CPU_OFFS	24
+
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE		MVPP2_RX_MAX_PKT_SIZE(512)
+
+enum mvpp2_bm_type {
+	MVPP2_BM_FREE,
+	MVPP2_BM_SWF_LONG,
+	MVPP2_BM_SWF_SHORT
+};
+
+/* Definitions */
+
+/* Shared Packet Processor resources */
+struct mvpp2 {
+	/* Shared registers' base addresses */
+	void __iomem *base;
+	void __iomem *lms_base;
+
+	/* Common clocks */
+	struct clk *pp_clk;
+	struct clk *gop_clk;
+
+	/* List of pointers to port structures */
+	struct mvpp2_port **port_list;
+
+	/* Aggregated TXQs */
+	struct mvpp2_tx_queue *aggr_txqs;
+
+	/* BM pools */
+	struct mvpp2_bm_pool *bm_pools;
+
+	/* PRS shadow table */
+	struct mvpp2_prs_shadow *prs_shadow;
+	/* PRS auxiliary table for double vlan entries control */
+	bool *prs_double_vlans;
+
+	/* Tclk value */
+	u32 tclk;
+};
+
+struct mvpp2_pcpu_stats {
+	struct	u64_stats_sync syncp;
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+};
+
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+	struct hrtimer tx_done_timer;
+	bool timer_scheduled;
+	/* Tasklet for egress finalization */
+	struct tasklet_struct tx_done_tasklet;
+};
+
+struct mvpp2_port {
+	u8 id;
+
+	int irq;
+
+	struct mvpp2 *priv;
+
+	/* Per-port registers' base address */
+	void __iomem *base;
+
+	struct mvpp2_rx_queue **rxqs;
+	struct mvpp2_tx_queue **txqs;
+	struct net_device *dev;
+
+	int pkt_size;
+
+	u32 pending_cause_rx;
+	struct napi_struct napi;
+
+	/* Per-CPU port control */
+	struct mvpp2_port_pcpu __percpu *pcpu;
+
+	/* Flags */
+	unsigned long flags;
+
+	u16 tx_ring_size;
+	u16 rx_ring_size;
+	struct mvpp2_pcpu_stats __percpu *stats;
+
+	struct phy_device *phy_dev;
+	phy_interface_t phy_interface;
+	struct device_node *phy_node;
+	unsigned int link;
+	unsigned int duplex;
+	unsigned int speed;
+
+	struct mvpp2_bm_pool *pool_long;
+	struct mvpp2_bm_pool *pool_short;
+
+	/* Index of first port's physical RXQ */
+	u8 first_rxq;
+};
+
+/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT		0
+#define MVPP2_TXD_IP_HLEN_SHIFT		8
+#define MVPP2_TXD_L4_CSUM_FRAG		BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT		BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE	BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE	BIT(23)
+#define MVPP2_TXD_L4_UDP		BIT(24)
+#define MVPP2_TXD_L3_IP6		BIT(26)
+#define MVPP2_TXD_L_DESC		BIT(28)
+#define MVPP2_TXD_F_DESC		BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY		BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK		(BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC		0x0
+#define MVPP2_RXD_ERR_OVERRUN		BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE		(BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS	16
+#define MVPP2_RXD_BM_POOL_ID_MASK	(BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC		BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK		BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR	BIT(24)
+#define MVPP2_RXD_L4_TCP		BIT(25)
+#define MVPP2_RXD_L4_UDP		BIT(26)
+#define MVPP2_RXD_L3_IP4		BIT(28)
+#define MVPP2_RXD_L3_IP6		BIT(30)
+#define MVPP2_RXD_BUF_HDR		BIT(31)
+
+struct mvpp2_tx_desc {
+	u32 command;		/* Options used by HW for packet transmitting.*/
+	u8  packet_offset;	/* the offset from the buffer beginning	*/
+	u8  phys_txq;		/* destination queue ID			*/
+	u16 data_size;		/* data size of transmitted packet in bytes */
+	u32 buf_phys_addr;	/* physical addr of transmitted buffer	*/
+	u32 buf_cookie;		/* cookie for access to TX buffer in tx path */
+	u32 reserved1[3];	/* hw_cmd (for future use, BM, PON, PNC) */
+	u32 reserved2;		/* reserved (for future use)		*/
+};
+
+struct mvpp2_rx_desc {
+	u32 status;		/* info about received packet		*/
+	u16 reserved1;		/* parser_info (for future use, PnC)	*/
+	u16 data_size;		/* size of received packet in bytes	*/
+	u32 buf_phys_addr;	/* physical address of the buffer	*/
+	u32 buf_cookie;		/* cookie for access to RX buffer in rx path */
+	u16 reserved2;		/* gem_port_id (for future use, PON)	*/
+	u16 reserved3;		/* csum_l4 (for future use, PnC)	*/
+	u8  reserved4;		/* bm_qset (for future use, BM)		*/
+	u8  reserved5;
+	u16 reserved6;		/* classify_info (for future use, PnC)	*/
+	u32 reserved7;		/* flow_id (for future use, PnC) */
+	u32 reserved8;
+};
+
+struct mvpp2_txq_pcpu_buf {
+	/* Transmitted SKB */
+	struct sk_buff *skb;
+
+	/* Physical address of transmitted buffer */
+	dma_addr_t phys;
+
+	/* Size transmitted */
+	size_t size;
+};
+
+/* Per-CPU Tx queue control */
+struct mvpp2_txq_pcpu {
+	int cpu;
+
+	/* Number of Tx DMA descriptors in the descriptor ring */
+	int size;
+
+	/* Number of currently used Tx DMA descriptor in the
+	 * descriptor ring
+	 */
+	int count;
+
+	/* Number of Tx DMA descriptors reserved for each CPU */
+	int reserved_num;
+
+	/* Infos about transmitted buffers */
+	struct mvpp2_txq_pcpu_buf *buffs;
+
+	/* Index of last TX DMA descriptor that was inserted */
+	int txq_put_index;
+
+	/* Index of the TX DMA descriptor to be cleaned up */
+	int txq_get_index;
+};
+
+struct mvpp2_tx_queue {
+	/* Physical number of this Tx queue */
+	u8 id;
+
+	/* Logical number of this Tx queue */
+	u8 log_id;
+
+	/* Number of Tx DMA descriptors in the descriptor ring */
+	int size;
+
+	/* Number of currently used Tx DMA descriptor in the descriptor ring */
+	int count;
+
+	/* Per-CPU control of physical Tx queues */
+	struct mvpp2_txq_pcpu __percpu *pcpu;
+
+	/* Array of transmitted skb */
+	struct sk_buff **tx_skb;
+
+	u32 done_pkts_coal;
+
+	/* Virtual address of thex Tx DMA descriptors array */
+	struct mvpp2_tx_desc *descs;
+
+	/* DMA address of the Tx DMA descriptors array */
+	dma_addr_t descs_phys;
+
+	/* Index of the last Tx DMA descriptor */
+	int last_desc;
+
+	/* Index of the next Tx DMA descriptor to process */
+	int next_desc_to_proc;
+};
+
+struct mvpp2_rx_queue {
+	/* RX queue number, in the range 0-31 for physical RXQs */
+	u8 id;
+
+	/* Num of rx descriptors in the rx descriptor ring */
+	int size;
+
+	u32 pkts_coal;
+	u32 time_coal;
+
+	/* Virtual address of the RX DMA descriptors array */
+	struct mvpp2_rx_desc *descs;
+
+	/* DMA address of the RX DMA descriptors array */
+	dma_addr_t descs_phys;
+
+	/* Index of the last RX DMA descriptor */
+	int last_desc;
+
+	/* Index of the next RX DMA descriptor to process */
+	int next_desc_to_proc;
+
+	/* ID of port to which physical RXQ is mapped */
+	int port;
+
+	/* Port's logic RXQ number to which physical RXQ is mapped */
+	int logic_rxq;
+};
+
+union mvpp2_prs_tcam_entry {
+	u32 word[MVPP2_PRS_TCAM_WORDS];
+	u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mvpp2_prs_sram_entry {
+	u32 word[MVPP2_PRS_SRAM_WORDS];
+	u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mvpp2_prs_entry {
+	u32 index;
+	union mvpp2_prs_tcam_entry tcam;
+	union mvpp2_prs_sram_entry sram;
+};
+
+struct mvpp2_prs_shadow {
+	bool valid;
+	bool finish;
+
+	/* Lookup ID */
+	int lu;
+
+	/* User defined offset */
+	int udf;
+
+	/* Result info */
+	u32 ri;
+	u32 ri_mask;
+};
+
+struct mvpp2_cls_flow_entry {
+	u32 index;
+	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lookup_entry {
+	u32 lkpid;
+	u32 way;
+	u32 data;
+};
+
+struct mvpp2_bm_pool {
+	/* Pool number in the range 0-7 */
+	int id;
+	enum mvpp2_bm_type type;
+
+	/* Buffer Pointers Pool External (BPPE) size */
+	int size;
+	/* Number of buffers for this pool */
+	int buf_num;
+	/* Pool buffer size */
+	int buf_size;
+	/* Packet size */
+	int pkt_size;
+
+	/* BPPE virtual base address */
+	u32 *virt_addr;
+	/* BPPE physical base address */
+	dma_addr_t phys_addr;
+
+	/* Ports using BM pool */
+	u32 port_map;
+
+	/* Occupied buffers indicator */
+	atomic_t in_use;
+	int in_use_thresh;
+};
+
+struct mvpp2_buff_hdr {
+	u32 next_buff_phys_addr;
+	u32 next_buff_virt_addr;
+	u16 byte_count;
+	u16 info;
+	u8  reserved1;		/* bm_qset (for future use, BM)		*/
+};
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK	0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info)	((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS	12
+#define MVPP2_B_HDR_INFO_LAST_MASK	BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+	   ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Static declaractions */
+
+/* Number of RXQs used by single port */
+static int rxq_number = MVPP2_DEFAULT_RXQ;
+/* Number of TXQs used by single port */
+static int txq_number = MVPP2_MAX_TXQ;
+
+#define MVPP2_DRIVER_NAME "mvpp2"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+	writel(data, priv->base + offset);
+}
+
+static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+	return readl(priv->base + offset);
+}
+
+static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
+{
+	txq_pcpu->txq_get_index++;
+	if (txq_pcpu->txq_get_index == txq_pcpu->size)
+		txq_pcpu->txq_get_index = 0;
+}
+
+static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+			      struct sk_buff *skb,
+			      struct mvpp2_tx_desc *tx_desc)
+{
+	struct mvpp2_txq_pcpu_buf *tx_buf =
+		txq_pcpu->buffs + txq_pcpu->txq_put_index;
+	tx_buf->skb = skb;
+	tx_buf->size = tx_desc->data_size;
+	tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
+	txq_pcpu->txq_put_index++;
+	if (txq_pcpu->txq_put_index == txq_pcpu->size)
+		txq_pcpu->txq_put_index = 0;
+}
+
+/* Get number of physical egress port */
+static inline int mvpp2_egress_port(struct mvpp2_port *port)
+{
+	return MVPP2_MAX_TCONT + port->id;
+}
+
+/* Get number of physical TXQ */
+static inline int mvpp2_txq_phys(int port, int txq)
+{
+	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+/* Parser configuration routines */
+
+/* Update parser tcam and sram hw entries */
+static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+	int i;
+
+	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+		return -EINVAL;
+
+	/* Clear entry invalidation bit */
+	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+	/* Write tcam index - indirect access */
+	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+
+	/* Write sram index - indirect access */
+	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+	return 0;
+}
+
+/* Read tcam entry from hw */
+static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+	int i;
+
+	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+		return -EINVAL;
+
+	/* Write tcam index - indirect access */
+	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+		return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+		pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+
+	/* Write sram index - indirect access */
+	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+		pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+
+	return 0;
+}
+
+/* Invalidate tcam hw entry */
+static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
+{
+	/* Write index - indirect access */
+	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+		    MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
+{
+	priv->prs_shadow[index].valid = true;
+	priv->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
+				    unsigned int ri, unsigned int ri_mask)
+{
+	priv->prs_shadow[index].ri_mask = ri_mask;
+	priv->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
+{
+	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
+
+	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
+	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+}
+
+/* Update mask for single port in tcam sw entry */
+static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
+				    unsigned int port, bool add)
+{
+	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+	if (add)
+		pe->tcam.byte[enable_off] &= ~(1 << port);
+	else
+		pe->tcam.byte[enable_off] |= 1 << port;
+}
+
+/* Update port map in tcam sw entry */
+static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
+					unsigned int ports)
+{
+	unsigned char port_mask = MVPP2_PRS_PORT_MASK;
+	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+	pe->tcam.byte[enable_off] &= ~port_mask;
+	pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+}
+
+/* Obtain port map from tcam sw entry */
+static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+{
+	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
+					 unsigned int offs, unsigned char byte,
+					 unsigned char enable)
+{
+	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
+	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+}
+
+/* Get byte of data and its enable bits from tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+					 unsigned int offs, unsigned char *byte,
+					 unsigned char *enable)
+{
+	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
+	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
+				    u16 data)
+{
+	int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
+	u16 tcam_data;
+
+	tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
+	if (tcam_data != data)
+		return false;
+	return true;
+}
+
+/* Update ai bits in tcam sw entry */
+static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
+				     unsigned int bits, unsigned int enable)
+{
+	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+
+	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+
+		if (!(enable & BIT(i)))
+			continue;
+
+		if (bits & BIT(i))
+			pe->tcam.byte[ai_idx] |= 1 << i;
+		else
+			pe->tcam.byte[ai_idx] &= ~(1 << i);
+	}
+
+	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+}
+
+/* Get ai bits from tcam sw entry */
+static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
+{
+	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+}
+
+/* Set ethertype in tcam sw entry */
+static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
+				  unsigned short ethertype)
+{
+	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
+				    int val)
+{
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+}
+
+/* Clear bits in sram sw entry */
+static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
+				      int val)
+{
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+}
+
+/* Update ri bits in sram sw entry */
+static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
+				     unsigned int bits, unsigned int mask)
+{
+	unsigned int i;
+
+	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+		int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
+
+		if (!(mask & BIT(i)))
+			continue;
+
+		if (bits & BIT(i))
+			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+		else
+			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+	}
+}
+
+/* Obtain ri bits from sram sw entry */
+static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
+{
+	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
+				     unsigned int bits, unsigned int mask)
+{
+	unsigned int i;
+	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
+
+	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+
+		if (!(mask & BIT(i)))
+			continue;
+
+		if (bits & BIT(i))
+			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+		else
+			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+	}
+}
+
+/* Read ai bits from sram sw entry */
+static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
+{
+	u8 bits;
+	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+	int ai_en_off = ai_off + 1;
+	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+	bits = (pe->sram.byte[ai_off] >> ai_shift) |
+	       (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+
+	return bits;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
+				       unsigned int lu)
+{
+	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
+				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
+	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+				     unsigned int op)
+{
+	/* Set sign */
+	if (shift < 0) {
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+		shift = 0 - shift;
+	} else {
+		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+	}
+
+	/* Set value */
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
+							   (unsigned char)shift;
+
+	/* Reset and set operation */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+	/* Set base offset as current */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
+				      unsigned int type, int offset,
+				      unsigned int op)
+{
+	/* Set sign */
+	if (offset < 0) {
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+		offset = 0 - offset;
+	} else {
+		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+	}
+
+	/* Set value */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+				  MVPP2_PRS_SRAM_UDF_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+					MVPP2_PRS_SRAM_UDF_BITS)] &=
+	      ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+					MVPP2_PRS_SRAM_UDF_BITS)] |=
+				(offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+	/* Set offset type */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+	/* Set offset operation */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+					     ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+				    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+			     (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+	/* Set base offset as current */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid;
+
+	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+
+	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+		u8 bits;
+
+		if (!priv->prs_shadow[tid].valid ||
+		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+			continue;
+
+		pe->index = tid;
+		mvpp2_prs_hw_read(priv, pe);
+		bits = mvpp2_prs_sram_ai_get(pe);
+
+		/* Sram store classification lookup ID in AI bits [5:0] */
+		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+			return pe;
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Return first free tcam index, seeking from start to end */
+static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
+				     unsigned char end)
+{
+	int tid;
+
+	if (start > end)
+		swap(start, end);
+
+	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
+		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
+
+	for (tid = start; tid <= end; tid++) {
+		if (!priv->prs_shadow[tid].valid)
+			return tid;
+	}
+
+	return -EINVAL;
+}
+
+/* Enable/disable dropping all mac da's */
+static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+{
+	struct mvpp2_prs_entry pe;
+
+	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+		/* Entry exist - update port only */
+		pe.index = MVPP2_PE_DROP_ALL;
+		mvpp2_prs_hw_read(priv, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+		pe.index = MVPP2_PE_DROP_ALL;
+
+		/* Non-promiscuous mode for all ports - DROP unknown packets */
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+					 MVPP2_PRS_RI_DROP_MASK);
+
+		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set port to promiscuous mode */
+static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
+{
+	struct mvpp2_prs_entry pe;
+
+	/* Promiscuous mode - Accept unknown packets */
+
+	if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
+		/* Entry exist - update port only */
+		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+		mvpp2_prs_hw_read(priv, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+
+		/* Continue - set next lookup */
+		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+		/* Set result info bits */
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
+					 MVPP2_PRS_RI_L2_CAST_MASK);
+
+		/* Shift to ethertype */
+		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Accept multicast */
+static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
+				    bool add)
+{
+	struct mvpp2_prs_entry pe;
+	unsigned char da_mc;
+
+	/* Ethernet multicast address first byte is
+	 * 0x01 for IPv4 and 0x33 for IPv6
+	 */
+	da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
+
+	if (priv->prs_shadow[index].valid) {
+		/* Entry exist - update port only */
+		pe.index = index;
+		mvpp2_prs_hw_read(priv, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+		pe.index = index;
+
+		/* Continue - set next lookup */
+		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+		/* Set result info bits */
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
+					 MVPP2_PRS_RI_L2_CAST_MASK);
+
+		/* Update tcam entry data first byte */
+		mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
+
+		/* Shift to ethertype */
+		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa packets */
+static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+				  bool tagged, bool extend)
+{
+	struct mvpp2_prs_entry pe;
+	int tid, shift;
+
+	if (extend) {
+		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+		shift = 8;
+	} else {
+		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+		shift = 4;
+	}
+
+	if (priv->prs_shadow[tid].valid) {
+		/* Entry exist - update port only */
+		pe.index = tid;
+		mvpp2_prs_hw_read(priv, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+		pe.index = tid;
+
+		/* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
+		mvpp2_prs_sram_shift_set(&pe, shift,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+		if (tagged) {
+			/* Set tagged bit in DSA tag */
+			mvpp2_prs_tcam_data_byte_set(&pe, 0,
+						     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+						     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+			/* Clear all ai bits for next iteration */
+			mvpp2_prs_sram_ai_update(&pe, 0,
+						 MVPP2_PRS_SRAM_AI_MASK);
+			/* If packet is tagged continue check vlans */
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+		} else {
+			/* Set result info bits to 'no vlans' */
+			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+		}
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
+					    bool add, bool tagged, bool extend)
+{
+	struct mvpp2_prs_entry pe;
+	int tid, shift, port_mask;
+
+	if (extend) {
+		tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
+		      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+		port_mask = 0;
+		shift = 8;
+	} else {
+		tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
+		      MVPP2_PE_ETYPE_DSA_UNTAGGED;
+		port_mask = MVPP2_PRS_PORT_MASK;
+		shift = 4;
+	}
+
+	if (priv->prs_shadow[tid].valid) {
+		/* Entry exist - update port only */
+		pe.index = tid;
+		mvpp2_prs_hw_read(priv, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+		pe.index = tid;
+
+		/* Set ethertype */
+		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
+		mvpp2_prs_match_etype(&pe, 2, 0);
+
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+					 MVPP2_PRS_RI_DSA_MASK);
+		/* Shift ethertype + 2 byte reserved + tag*/
+		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+		if (tagged) {
+			/* Set tagged bit in DSA tag */
+			mvpp2_prs_tcam_data_byte_set(&pe,
+						     MVPP2_ETH_TYPE_LEN + 2 + 3,
+						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+			/* Clear all ai bits for next iteration */
+			mvpp2_prs_sram_ai_update(&pe, 0,
+						 MVPP2_PRS_SRAM_AI_MASK);
+			/* If packet is tagged continue check vlans */
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+		} else {
+			/* Set result info bits to 'no vlans' */
+			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+		}
+		/* Mask/unmask all ports, depending on dsa type */
+		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
+						   unsigned short tpid, int ai)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid;
+
+	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		unsigned int ri_bits, ai_bits;
+		bool match;
+
+		if (!priv->prs_shadow[tid].valid ||
+		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+			continue;
+
+		pe->index = tid;
+
+		mvpp2_prs_hw_read(priv, pe);
+		match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
+		if (!match)
+			continue;
+
+		/* Get vlan type */
+		ri_bits = mvpp2_prs_sram_ri_get(pe);
+		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+
+		/* Get current ai value from tcam */
+		ai_bits = mvpp2_prs_tcam_ai_get(pe);
+		/* Clear double vlan bit */
+		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+		if (ai != ai_bits)
+			continue;
+
+		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+			return pe;
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Add/update single/triple vlan entry */
+static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+			      unsigned int port_map)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid_aux, tid;
+	int ret = 0;
+
+	pe = mvpp2_prs_vlan_find(priv, tpid, ai);
+
+	if (!pe) {
+		/* Create new tcam entry */
+		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
+						MVPP2_PE_FIRST_FREE_TID);
+		if (tid < 0)
+			return tid;
+
+		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+		if (!pe)
+			return -ENOMEM;
+
+		/* Get last double vlan tid */
+		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+			unsigned int ri_bits;
+
+			if (!priv->prs_shadow[tid_aux].valid ||
+			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+				continue;
+
+			pe->index = tid_aux;
+			mvpp2_prs_hw_read(priv, pe);
+			ri_bits = mvpp2_prs_sram_ri_get(pe);
+			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+			    MVPP2_PRS_RI_VLAN_DOUBLE)
+				break;
+		}
+
+		if (tid <= tid_aux) {
+			ret = -EINVAL;
+			goto error;
+		}
+
+		memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+		pe->index = tid;
+
+		mvpp2_prs_match_etype(pe, 0, tpid);
+
+		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
+		/* Shift 4 bytes - skip 1 vlan tag */
+		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+		/* Clear all ai bits for next iteration */
+		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+		} else {
+			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+		}
+		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+		mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+	}
+	/* Update ports' mask */
+	mvpp2_prs_tcam_port_map_set(pe, port_map);
+
+	mvpp2_prs_hw_write(priv, pe);
+
+error:
+	kfree(pe);
+
+	return ret;
+}
+
+/* Get first free double vlan ai number */
+static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
+{
+	int i;
+
+	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+		if (!priv->prs_double_vlans[i])
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+/* Search for existing double vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
+							  unsigned short tpid1,
+							  unsigned short tpid2)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid;
+
+	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		unsigned int ri_mask;
+		bool match;
+
+		if (!priv->prs_shadow[tid].valid ||
+		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+			continue;
+
+		pe->index = tid;
+		mvpp2_prs_hw_read(priv, pe);
+
+		match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
+			&& mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
+
+		if (!match)
+			continue;
+
+		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
+		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+			return pe;
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Add or update double vlan entry */
+static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+				     unsigned short tpid2,
+				     unsigned int port_map)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid_aux, tid, ai, ret = 0;
+
+	pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+
+	if (!pe) {
+		/* Create new tcam entry */
+		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+				MVPP2_PE_LAST_FREE_TID);
+		if (tid < 0)
+			return tid;
+
+		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+		if (!pe)
+			return -ENOMEM;
+
+		/* Set ai value for new double vlan entry */
+		ai = mvpp2_prs_double_vlan_ai_free_get(priv);
+		if (ai < 0) {
+			ret = ai;
+			goto error;
+		}
+
+		/* Get first single/triple vlan tid */
+		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+			unsigned int ri_bits;
+
+			if (!priv->prs_shadow[tid_aux].valid ||
+			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+				continue;
+
+			pe->index = tid_aux;
+			mvpp2_prs_hw_read(priv, pe);
+			ri_bits = mvpp2_prs_sram_ri_get(pe);
+			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+				break;
+		}
+
+		if (tid >= tid_aux) {
+			ret = -ERANGE;
+			goto error;
+		}
+
+		memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+		pe->index = tid;
+
+		priv->prs_double_vlans[ai] = true;
+
+		mvpp2_prs_match_etype(pe, 0, tpid1);
+		mvpp2_prs_match_etype(pe, 4, tpid2);
+
+		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+		/* Shift 8 bytes - skip 2 vlan tags */
+		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+					 MVPP2_PRS_RI_VLAN_MASK);
+		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+					 MVPP2_PRS_SRAM_AI_MASK);
+
+		mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+	}
+
+	/* Update ports' mask */
+	mvpp2_prs_tcam_port_map_set(pe, port_map);
+	mvpp2_prs_hw_write(priv, pe);
+
+error:
+	kfree(pe);
+	return ret;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
+			       unsigned int ri, unsigned int ri_mask)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+	    (proto != IPPROTO_IGMP))
+		return -EINVAL;
+
+	/* Fragmented packet */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = tid;
+
+	/* Set next lu to IPv4 */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L4 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct iphdr) - 4,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
+				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Not fragmented packet */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	pe.index = tid;
+	/* Clear ri before updating */
+	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
+	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+	struct mvpp2_prs_entry pe;
+	int mask, tid;
+
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = tid;
+
+	switch (l3_cast) {
+	case MVPP2_PRS_L3_MULTI_CAST:
+		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+					     MVPP2_PRS_IPV4_MC_MASK);
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+					 MVPP2_PRS_RI_L3_ADDR_MASK);
+		break;
+	case  MVPP2_PRS_L3_BROAD_CAST:
+		mask = MVPP2_PRS_IPV4_BC_MASK;
+		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+					 MVPP2_PRS_RI_L3_ADDR_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Set entries for protocols over IPv6  */
+static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
+			       unsigned int ri, unsigned int ri_mask)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
+		return -EINVAL;
+
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = tid;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct ipv6hdr) - 6,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Write HW */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+		return -EINVAL;
+
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = tid;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+				 MVPP2_PRS_RI_L3_ADDR_MASK);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Shift back to IPv6 NH */
+	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+				     MVPP2_PRS_IPV6_MC_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Parser per-port initialization */
+static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
+				   int lu_max, int offset)
+{
+	u32 val;
+
+	/* Set lookup ID */
+	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
+	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
+
+	/* Set maximum number of loops for packet received from port */
+	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
+	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
+
+	/* Set initial offset for packet header extraction for the first
+	 * searching loop
+	 */
+	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
+	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
+}
+
+/* Default flow entries initialization for all ports */
+static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+	int port;
+
+	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+		/* Set flow ID*/
+		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+		/* Update shadow table and hw entry */
+		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+		mvpp2_prs_hw_write(priv, &pe);
+	}
+}
+
+/* Set default entry for Marvell Header field */
+static void mvpp2_prs_mh_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+	pe.index = MVPP2_PE_MH_DEFAULT;
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mvpp2_prs_mac_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+	/* Non-promiscuous mode for all ports - DROP unknown packets */
+	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+				 MVPP2_PRS_RI_DROP_MASK);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* place holders only - no ports */
+	mvpp2_prs_mac_drop_all_set(priv, 0, false);
+	mvpp2_prs_mac_promisc_set(priv, 0, false);
+	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
+	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+
+	/* None tagged EDSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+			      MVPP2_PRS_EDSA);
+
+	/* Tagged EDSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+	/* None tagged DSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+			      MVPP2_PRS_DSA);
+
+	/* Tagged DSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+	/* None tagged EDSA ethertype entry - place holder*/
+	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+	/* Tagged EDSA ethertype entry - place holder*/
+	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+	/* None tagged DSA ethertype entry */
+	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+	/* Tagged DSA ethertype entry */
+	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+	/* Set default entry, in case DSA or EDSA tag not found */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+	pe.index = MVPP2_PE_DSA_DEFAULT;
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+	/* Shift 0 bytes */
+	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+	/* Clear all sram ai bits for next iteration */
+	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Match basic ethertypes */
+static int mvpp2_prs_etype_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	/* Ethertype: PPPoE */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+				 MVPP2_PRS_RI_PPPOE_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+				MVPP2_PRS_RI_PPPOE_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Ethertype: ARP */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+	/* Generate flow in the next iteration*/
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = true;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Ethertype: LBTD */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+	/* Generate flow in the next iteration*/
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				 MVPP2_PRS_RI_CPU_CODE_MASK |
+				 MVPP2_PRS_RI_UDF3_MASK);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = true;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				MVPP2_PRS_RI_CPU_CODE_MASK |
+				MVPP2_PRS_RI_UDF3_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Ethertype: IPv4 without options */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+				     MVPP2_PRS_IPV4_HEAD_MASK |
+				     MVPP2_PRS_IPV4_IHL_MASK);
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Skip eth_type + 4 bytes of IP header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Ethertype: IPv4 with options */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	pe.index = tid;
+
+	/* Clear tcam data before updating */
+	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
+
+	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+				     MVPP2_PRS_IPV4_HEAD,
+				     MVPP2_PRS_IPV4_HEAD_MASK);
+
+	/* Clear ri before updating */
+	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Ethertype: IPv6 without options */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+	/* Skip DIP of IPV6 header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+				 MVPP2_MAX_L3_ADDR_SIZE,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Generate flow in the next iteration*/
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Set L3 offset even it's unknown L3 */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	priv->prs_shadow[pe.index].finish = true;
+	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+	int err;
+
+	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+					      MVPP2_PRS_DBL_VLANS_MAX,
+					      GFP_KERNEL);
+	if (!priv->prs_double_vlans)
+		return -ENOMEM;
+
+	/* Double VLAN: 0x8100, 0x88A8 */
+	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+					MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Double VLAN: 0x8100, 0x8100 */
+	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
+					MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Single VLAN: 0x88a8 */
+	err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+				 MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Single VLAN: 0x8100 */
+	err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+				 MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Set default double vlan entry */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+	pe.index = MVPP2_PE_VLAN_DBL;
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+	/* Clear ai for next iterations */
+	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+				 MVPP2_PRS_RI_VLAN_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+				 MVPP2_PRS_DBL_VLAN_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Set default vlan none entry */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+	pe.index = MVPP2_PE_VLAN_NONE;
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+				 MVPP2_PRS_RI_VLAN_MASK);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	/* IPv4 over PPPoE with options */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Skip eth_type + 4 bytes of IP header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* IPv4 over PPPoE without options */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	pe.index = tid;
+
+	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+				     MVPP2_PRS_IPV4_HEAD_MASK |
+				     MVPP2_PRS_IPV4_IHL_MASK);
+
+	/* Clear ri before updating */
+	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* IPv6 over PPPoE */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Skip eth_type + 4 bytes of IPv6 header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Non-IP over PPPoE */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	pe.index = tid;
+
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	/* Set L3 offset even if it's unknown L3 */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+	int err;
+
+	/* Set entries for TCP, UDP and IGMP over IPv4 */
+	err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
+				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				  MVPP2_PRS_RI_CPU_CODE_MASK |
+				  MVPP2_PRS_RI_UDF3_MASK);
+	if (err)
+		return err;
+
+	/* IPv4 Broadcast */
+	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
+	if (err)
+		return err;
+
+	/* IPv4 Multicast */
+	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+	if (err)
+		return err;
+
+	/* Default IPv4 entry for unknown protocols */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+	/* Set next lu to IPv4 */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L4 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct iphdr) - 4,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+				 MVPP2_PRS_RI_L4_PROTO_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Default IPv4 entry for unicast address */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+				 MVPP2_PRS_RI_L3_ADDR_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
+{
+	struct mvpp2_prs_entry pe;
+	int tid, err;
+
+	/* Set entries for TCP, UDP and ICMP over IPv6 */
+	err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
+				  MVPP2_PRS_RI_L4_TCP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
+				  MVPP2_PRS_RI_L4_UDP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
+				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				  MVPP2_PRS_RI_CPU_CODE_MASK |
+				  MVPP2_PRS_RI_UDF3_MASK);
+	if (err)
+		return err;
+
+	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+	/* Result Info: UDF7=1, DS lite */
+	err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
+				  MVPP2_PRS_RI_UDF7_IP6_LITE,
+				  MVPP2_PRS_RI_UDF7_MASK);
+	if (err)
+		return err;
+
+	/* IPv6 multicast */
+	err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+	if (err)
+		return err;
+
+	/* Entry for checking hop limit */
+	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid < 0)
+		return tid;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = tid;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+				 MVPP2_PRS_RI_DROP_MASK,
+				 MVPP2_PRS_RI_L3_PROTO_MASK |
+				 MVPP2_PRS_RI_DROP_MASK);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Default IPv6 entry for unknown protocols */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+				 MVPP2_PRS_RI_L4_PROTO_MASK);
+	/* Set L4 offset relatively to our current place */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct ipv6hdr) - 4,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Default IPv6 entry for unknown ext protocols */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+				 MVPP2_PRS_RI_L4_PROTO_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	/* Default IPv6 entry for unicast address */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+	/* Finished: go to IPv6 again */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+				 MVPP2_PRS_RI_L3_ADDR_MASK);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Shift back to IPV6 NH */
+	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_hw_write(priv, &pe);
+
+	return 0;
+}
+
+/* Parser default initialization */
+static int mvpp2_prs_default_init(struct platform_device *pdev,
+				  struct mvpp2 *priv)
+{
+	int err, index, i;
+
+	/* Enable tcam table */
+	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+	/* Clear all tcam and sram entries */
+	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
+		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+	}
+
+	/* Invalidate all tcam entries */
+	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+		mvpp2_prs_hw_inv(priv, index);
+
+	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+					sizeof(struct mvpp2_prs_shadow),
+					GFP_KERNEL);
+	if (!priv->prs_shadow)
+		return -ENOMEM;
+
+	/* Always start from lookup = 0 */
+	for (index = 0; index < MVPP2_MAX_PORTS; index++)
+		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
+				       MVPP2_PRS_PORT_LU_MAX, 0);
+
+	mvpp2_prs_def_flow_init(priv);
+
+	mvpp2_prs_mh_init(priv);
+
+	mvpp2_prs_mac_init(priv);
+
+	mvpp2_prs_dsa_init(priv);
+
+	err = mvpp2_prs_etype_init(priv);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_vlan_init(pdev, priv);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_pppoe_init(priv);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip6_init(priv);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip4_init(priv);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
+				       const u8 *da, unsigned char *mask)
+{
+	unsigned char tcam_byte, tcam_mask;
+	int index;
+
+	for (index = 0; index < ETH_ALEN; index++) {
+		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
+		if (tcam_mask != mask[index])
+			return false;
+
+		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+			return false;
+	}
+
+	return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static struct mvpp2_prs_entry *
+mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+			    unsigned char *mask, int udf_type)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid;
+
+	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+
+	/* Go through the all entires with MVPP2_PRS_LU_MAC */
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		unsigned int entry_pmap;
+
+		if (!priv->prs_shadow[tid].valid ||
+		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+		    (priv->prs_shadow[tid].udf != udf_type))
+			continue;
+
+		pe->index = tid;
+		mvpp2_prs_hw_read(priv, pe);
+		entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
+
+		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
+		    entry_pmap == pmap)
+			return pe;
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Update parser's mac da entry */
+static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
+				   const u8 *da, bool add)
+{
+	struct mvpp2_prs_entry *pe;
+	unsigned int pmap, len, ri;
+	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+	int tid;
+
+	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+	pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
+					 MVPP2_PRS_UDF_MAC_DEF);
+
+	/* No such entry */
+	if (!pe) {
+		if (!add)
+			return 0;
+
+		/* Create new TCAM entry */
+		/* Find first range mac entry*/
+		for (tid = MVPP2_PE_FIRST_FREE_TID;
+		     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
+			if (priv->prs_shadow[tid].valid &&
+			    (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
+			    (priv->prs_shadow[tid].udf ==
+						       MVPP2_PRS_UDF_MAC_RANGE))
+				break;
+
+		/* Go through the all entries from first to last */
+		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+						tid - 1);
+		if (tid < 0)
+			return tid;
+
+		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+		if (!pe)
+			return -1;
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+		pe->index = tid;
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(pe, 0);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(pe, port, add);
+
+	/* Invalidate the entry if no ports are left enabled */
+	pmap = mvpp2_prs_tcam_port_map_get(pe);
+	if (pmap == 0) {
+		if (add) {
+			kfree(pe);
+			return -1;
+		}
+		mvpp2_prs_hw_inv(priv, pe->index);
+		priv->prs_shadow[pe->index].valid = false;
+		kfree(pe);
+		return 0;
+	}
+
+	/* Continue - set next lookup */
+	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+
+	/* Set match on DA */
+	len = ETH_ALEN;
+	while (len--)
+		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+
+	/* Set result info bits */
+	if (is_broadcast_ether_addr(da))
+		ri = MVPP2_PRS_RI_L2_BCAST;
+	else if (is_multicast_ether_addr(da))
+		ri = MVPP2_PRS_RI_L2_MCAST;
+	else
+		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
+
+	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+				 MVPP2_PRS_RI_MAC_ME_MASK);
+	mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+				MVPP2_PRS_RI_MAC_ME_MASK);
+
+	/* Shift to ethertype */
+	mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Update shadow table and hw entry */
+	priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
+	mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
+	mvpp2_prs_hw_write(priv, pe);
+
+	kfree(pe);
+
+	return 0;
+}
+
+static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int err;
+
+	/* Remove old parser entry */
+	err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
+				      false);
+	if (err)
+		return err;
+
+	/* Add new parser entry */
+	err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
+	if (err)
+		return err;
+
+	/* Set addr in the device */
+	ether_addr_copy(dev->dev_addr, da);
+
+	return 0;
+}
+
+/* Delete all port's multicast simple (not range) entries */
+static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
+{
+	struct mvpp2_prs_entry pe;
+	int index, tid;
+
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+		if (!priv->prs_shadow[tid].valid ||
+		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+			continue;
+
+		/* Only simple mac entries */
+		pe.index = tid;
+		mvpp2_prs_hw_read(priv, &pe);
+
+		/* Read mac addr from entry */
+		for (index = 0; index < ETH_ALEN; index++)
+			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+						     &da_mask[index]);
+
+		if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
+			/* Delete this entry */
+			mvpp2_prs_mac_da_accept(priv, port, da, false);
+	}
+}
+
+static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+{
+	switch (type) {
+	case MVPP2_TAG_TYPE_EDSA:
+		/* Add port to EDSA entries */
+		mvpp2_prs_dsa_tag_set(priv, port, true,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+		mvpp2_prs_dsa_tag_set(priv, port, true,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+		/* Remove port from DSA entries */
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+		break;
+
+	case MVPP2_TAG_TYPE_DSA:
+		/* Add port to DSA entries */
+		mvpp2_prs_dsa_tag_set(priv, port, true,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(priv, port, true,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+		/* Remove port from EDSA entries */
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+		break;
+
+	case MVPP2_TAG_TYPE_MH:
+	case MVPP2_TAG_TYPE_NONE:
+		/* Remove port form EDSA and DSA entries */
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+		mvpp2_prs_dsa_tag_set(priv, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+		break;
+
+	default:
+		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Set prs flow for the port */
+static int mvpp2_prs_def_flow(struct mvpp2_port *port)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid;
+
+	pe = mvpp2_prs_flow_find(port->priv, port->id);
+
+	/* Such entry not exist */
+	if (!pe) {
+		/* Go through the all entires from last to first */
+		tid = mvpp2_prs_tcam_first_free(port->priv,
+						MVPP2_PE_LAST_FREE_TID,
+					       MVPP2_PE_FIRST_FREE_TID);
+		if (tid < 0)
+			return tid;
+
+		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+		if (!pe)
+			return -ENOMEM;
+
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+		pe->index = tid;
+
+		/* Set flow ID*/
+		mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
+	}
+
+	mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
+	mvpp2_prs_hw_write(port->priv, pe);
+	kfree(pe);
+
+	return 0;
+}
+
+/* Classifier configuration routines */
+
+/* Update classification flow table registers */
+static void mvpp2_cls_flow_write(struct mvpp2 *priv,
+				 struct mvpp2_cls_flow_entry *fe)
+{
+	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
+	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
+	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
+}
+
+/* Update classification lookup table register */
+static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
+				   struct mvpp2_cls_lookup_entry *le)
+{
+	u32 val;
+
+	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+	mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+/* Classifier default initialization */
+static void mvpp2_cls_init(struct mvpp2 *priv)
+{
+	struct mvpp2_cls_lookup_entry le;
+	struct mvpp2_cls_flow_entry fe;
+	int index;
+
+	/* Enable classifier */
+	mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+	/* Clear classifier flow table */
+	memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
+	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+		fe.index = index;
+		mvpp2_cls_flow_write(priv, &fe);
+	}
+
+	/* Clear classifier lookup table */
+	le.data = 0;
+	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+		le.lkpid = index;
+		le.way = 0;
+		mvpp2_cls_lookup_write(priv, &le);
+
+		le.way = 1;
+		mvpp2_cls_lookup_write(priv, &le);
+	}
+}
+
+static void mvpp2_cls_port_config(struct mvpp2_port *port)
+{
+	struct mvpp2_cls_lookup_entry le;
+	u32 val;
+
+	/* Set way for the port */
+	val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
+	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
+	mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
+
+	/* Pick the entry to be accessed in lookup ID decoding table
+	 * according to the way and lkpid.
+	 */
+	le.lkpid = port->id;
+	le.way = 0;
+	le.data = 0;
+
+	/* Set initial CPU queue for receiving packets */
+	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+	le.data |= port->first_rxq;
+
+	/* Disable classification engines */
+	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+	/* Update lookup ID table entry */
+	mvpp2_cls_lookup_write(port->priv, &le);
+}
+
+/* Set CPU queue number for oversize packets */
+static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
+{
+	u32 val;
+
+	mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
+		    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+
+	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
+		    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
+
+	val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
+	val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
+}
+
+/* Buffer Manager configuration routines */
+
+/* Create pool */
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+				struct mvpp2 *priv,
+				struct mvpp2_bm_pool *bm_pool, int size)
+{
+	int size_bytes;
+	u32 val;
+
+	size_bytes = sizeof(u32) * size;
+	bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
+						&bm_pool->phys_addr,
+						GFP_KERNEL);
+	if (!bm_pool->virt_addr)
+		return -ENOMEM;
+
+	if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+		dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+				  bm_pool->phys_addr);
+		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+		return -ENOMEM;
+	}
+
+	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
+		    bm_pool->phys_addr);
+	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
+
+	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+	val |= MVPP2_BM_START_MASK;
+	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+	bm_pool->type = MVPP2_BM_FREE;
+	bm_pool->size = size;
+	bm_pool->pkt_size = 0;
+	bm_pool->buf_num = 0;
+	atomic_set(&bm_pool->in_use, 0);
+
+	return 0;
+}
+
+/* Set pool buffer size */
+static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
+				      struct mvpp2_bm_pool *bm_pool,
+				      int buf_size)
+{
+	u32 val;
+
+	bm_pool->buf_size = buf_size;
+
+	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
+}
+
+/* Free all buffers from the pool */
+static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
+			       struct mvpp2_bm_pool *bm_pool)
+{
+	int i;
+
+	for (i = 0; i < bm_pool->buf_num; i++) {
+		dma_addr_t buf_phys_addr;
+		u32 vaddr;
+
+		/* Get buffer virtual address (indirect access) */
+		buf_phys_addr = mvpp2_read(priv,
+					   MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+		vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+
+		dma_unmap_single(dev, buf_phys_addr,
+				 bm_pool->buf_size, DMA_FROM_DEVICE);
+
+		if (!vaddr)
+			break;
+		dev_kfree_skb_any((struct sk_buff *)vaddr);
+	}
+
+	/* Update BM driver with number of buffers removed from pool */
+	bm_pool->buf_num -= i;
+}
+
+/* Cleanup pool */
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+				 struct mvpp2 *priv,
+				 struct mvpp2_bm_pool *bm_pool)
+{
+	u32 val;
+
+	mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
+	if (bm_pool->buf_num) {
+		WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+		return 0;
+	}
+
+	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+	val |= MVPP2_BM_STOP_MASK;
+	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+	dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+			  bm_pool->virt_addr,
+			  bm_pool->phys_addr);
+	return 0;
+}
+
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+			       struct mvpp2 *priv)
+{
+	int i, err, size;
+	struct mvpp2_bm_pool *bm_pool;
+
+	/* Create all pools with maximum size */
+	size = MVPP2_BM_POOL_SIZE_MAX;
+	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+		bm_pool = &priv->bm_pools[i];
+		bm_pool->id = i;
+		err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
+		if (err)
+			goto err_unroll_pools;
+		mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
+	}
+	return 0;
+
+err_unroll_pools:
+	dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+	for (i = i - 1; i >= 0; i--)
+		mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
+	return err;
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+	int i, err;
+
+	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+		/* Mask BM all interrupts */
+		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
+		/* Clear BM cause register */
+		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+	}
+
+	/* Allocate and initialize BM pools */
+	priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+				     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
+	if (!priv->bm_pools)
+		return -ENOMEM;
+
+	err = mvpp2_bm_pools_init(pdev, priv);
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+/* Attach long pool to rxq */
+static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
+				    int lrxq, int long_pool)
+{
+	u32 val;
+	int prxq;
+
+	/* Get queue physical ID */
+	prxq = port->rxqs[lrxq]->id;
+
+	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
+	val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
+		    MVPP2_RXQ_POOL_LONG_MASK);
+
+	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
+				     int lrxq, int short_pool)
+{
+	u32 val;
+	int prxq;
+
+	/* Get queue physical ID */
+	prxq = port->rxqs[lrxq]->id;
+
+	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
+	val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
+		    MVPP2_RXQ_POOL_SHORT_MASK);
+
+	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Allocate skb for BM pool */
+static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
+				       struct mvpp2_bm_pool *bm_pool,
+				       dma_addr_t *buf_phys_addr,
+				       gfp_t gfp_mask)
+{
+	struct sk_buff *skb;
+	dma_addr_t phys_addr;
+
+	skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
+	if (!skb)
+		return NULL;
+
+	phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
+				   MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+				    DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+	*buf_phys_addr = phys_addr;
+
+	return skb;
+}
+
+/* Set pool number in a BM cookie */
+static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
+{
+	u32 bm;
+
+	bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
+	bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
+
+	return bm;
+}
+
+/* Get pool number from a BM cookie */
+static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+{
+	return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
+}
+
+/* Release buffer to BM */
+static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
+				     u32 buf_phys_addr, u32 buf_virt_addr)
+{
+	mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
+	mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
+}
+
+/* Release multicast buffer */
+static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
+				 u32 buf_phys_addr, u32 buf_virt_addr,
+				 int mc_id)
+{
+	u32 val = 0;
+
+	val |= (mc_id & MVPP2_BM_MC_ID_MASK);
+	mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+
+	mvpp2_bm_pool_put(port, pool,
+			  buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
+			  buf_virt_addr);
+}
+
+/* Refill BM pool */
+static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
+			      u32 phys_addr, u32 cookie)
+{
+	int pool = mvpp2_bm_cookie_pool_get(bm);
+
+	mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+}
+
+/* Allocate buffers for the pool */
+static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
+			     struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+	struct sk_buff *skb;
+	int i, buf_size, total_size;
+	u32 bm;
+	dma_addr_t phys_addr;
+
+	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+	if (buf_num < 0 ||
+	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
+		netdev_err(port->dev,
+			   "cannot allocate %d buffers for pool %d\n",
+			   buf_num, bm_pool->id);
+		return 0;
+	}
+
+	bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
+	for (i = 0; i < buf_num; i++) {
+		skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+		if (!skb)
+			break;
+
+		mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+	}
+
+	/* Update BM driver with number of buffers added to pool */
+	bm_pool->buf_num += i;
+	bm_pool->in_use_thresh = bm_pool->buf_num / 4;
+
+	netdev_dbg(port->dev,
+		   "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+		   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+		   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+	netdev_dbg(port->dev,
+		   "%s pool %d: %d of %d buffers added\n",
+		   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+		   bm_pool->id, i, buf_num);
+	return i;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
+		  int pkt_size)
+{
+	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+	int num;
+
+	if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
+		netdev_err(port->dev, "mixing pool types is forbidden\n");
+		return NULL;
+	}
+
+	if (new_pool->type == MVPP2_BM_FREE)
+		new_pool->type = type;
+
+	/* Allocate buffers in case BM pool is used as long pool, but packet
+	 * size doesn't match MTU or BM pool hasn't being used yet
+	 */
+	if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
+	    (new_pool->pkt_size == 0)) {
+		int pkts_num;
+
+		/* Set default buffer number or free all the buffers in case
+		 * the pool is not empty
+		 */
+		pkts_num = new_pool->buf_num;
+		if (pkts_num == 0)
+			pkts_num = type == MVPP2_BM_SWF_LONG ?
+				   MVPP2_BM_LONG_BUF_NUM :
+				   MVPP2_BM_SHORT_BUF_NUM;
+		else
+			mvpp2_bm_bufs_free(port->dev->dev.parent,
+					   port->priv, new_pool);
+
+		new_pool->pkt_size = pkt_size;
+
+		/* Allocate buffers for this pool */
+		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+		if (num != pkts_num) {
+			WARN(1, "pool %d: %d of %d allocated\n",
+			     new_pool->id, num, pkts_num);
+			return NULL;
+		}
+	}
+
+	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+	return new_pool;
+}
+
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+	int rxq;
+
+	if (!port->pool_long) {
+		port->pool_long =
+		       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
+					 MVPP2_BM_SWF_LONG,
+					 port->pkt_size);
+		if (!port->pool_long)
+			return -ENOMEM;
+
+		port->pool_long->port_map |= (1 << port->id);
+
+		for (rxq = 0; rxq < rxq_number; rxq++)
+			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+	}
+
+	if (!port->pool_short) {
+		port->pool_short =
+			mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
+					  MVPP2_BM_SWF_SHORT,
+					  MVPP2_BM_SHORT_PKT_SIZE);
+		if (!port->pool_short)
+			return -ENOMEM;
+
+		port->pool_short->port_map |= (1 << port->id);
+
+		for (rxq = 0; rxq < rxq_number; rxq++)
+			mvpp2_rxq_short_pool_set(port, rxq,
+						 port->pool_short->id);
+	}
+
+	return 0;
+}
+
+static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2_bm_pool *port_pool = port->pool_long;
+	int num, pkts_num = port_pool->buf_num;
+	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+
+	/* Update BM pool with new buffer size */
+	mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
+	if (port_pool->buf_num) {
+		WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
+		return -EIO;
+	}
+
+	port_pool->pkt_size = pkt_size;
+	num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
+	if (num != pkts_num) {
+		WARN(1, "pool %d: %d of %d allocated\n",
+		     port_pool->id, num, pkts_num);
+		return -EIO;
+	}
+
+	mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
+				  MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
+	dev->mtu = mtu;
+	netdev_update_features(dev);
+	return 0;
+}
+
+static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
+{
+	int cpu, cpu_mask = 0;
+
+	for_each_present_cpu(cpu)
+		cpu_mask |= 1 << cpu;
+	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+		    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
+}
+
+static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
+{
+	int cpu, cpu_mask = 0;
+
+	for_each_present_cpu(cpu)
+		cpu_mask |= 1 << cpu;
+	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+		    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
+}
+
+/* Mask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_mask(void *arg)
+{
+	struct mvpp2_port *port = arg;
+
+	mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_unmask(void *arg)
+{
+	struct mvpp2_port *port = arg;
+
+	mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
+		    (MVPP2_CAUSE_MISC_SUM_MASK |
+		     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+}
+
+/* Port configuration routines */
+
+static void mvpp2_port_mii_set(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+
+	switch (port->phy_interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+		val |= MVPP2_GMAC_INBAND_AN_MASK;
+		break;
+	case PHY_INTERFACE_MODE_RGMII:
+		val |= MVPP2_GMAC_PORT_RGMII_MASK;
+	default:
+		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+	}
+
+	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+}
+
+static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	val |= MVPP2_GMAC_FC_ADV_EN;
+	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_port_enable(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+	val |= MVPP2_GMAC_PORT_EN_MASK;
+	val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+static void mvpp2_port_disable(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
+		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+static void mvpp2_port_loopback_set(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+
+	if (port->speed == 1000)
+		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+	else
+		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+	else
+		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+static void mvpp2_port_reset(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+		    ~MVPP2_GMAC_PORT_RESET_MASK;
+	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+	while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+	       MVPP2_GMAC_PORT_RESET_MASK)
+		continue;
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set defaults to the MVPP2 port */
+static void mvpp2_defaults_set(struct mvpp2_port *port)
+{
+	int tx_port_num, val, queue, ptxq, lrxq;
+
+	/* Configure port to loopback if needed */
+	if (port->flags & MVPP2_F_LOOPBACK)
+		mvpp2_port_loopback_set(port);
+
+	/* Update TX FIFO MIN Threshold */
+	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+	/* Min. TX threshold must be less than minimal packet length */
+	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+
+	/* Disable Legacy WRR, Disable EJP, Release from reset */
+	tx_port_num = mvpp2_egress_port(port);
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+		    tx_port_num);
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+	/* Close bandwidth for all queues */
+	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+		ptxq = mvpp2_txq_phys(port->id, queue);
+		mvpp2_write(port->priv,
+			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+	}
+
+	/* Set refill period to 1 usec, refill tokens
+	 * and bucket size to maximum
+	 */
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
+		    port->priv->tclk / USEC_PER_SEC);
+	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
+	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
+	val = MVPP2_TXP_TOKEN_SIZE_MAX;
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+
+	/* Set MaximumLowLatencyPacketSize value to 256 */
+	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
+		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+	/* Enable Rx cache snoop */
+	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+		queue = port->rxqs[lrxq]->id;
+		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
+			   MVPP2_SNOOP_BUF_HDR_MASK;
+		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+	}
+
+	/* At default, mask all interrupts to all present cpus */
+	mvpp2_interrupts_disable(port);
+}
+
+/* Enable/disable receiving packets */
+static void mvpp2_ingress_enable(struct mvpp2_port *port)
+{
+	u32 val;
+	int lrxq, queue;
+
+	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+		queue = port->rxqs[lrxq]->id;
+		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+		val &= ~MVPP2_RXQ_DISABLE_MASK;
+		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+	}
+}
+
+static void mvpp2_ingress_disable(struct mvpp2_port *port)
+{
+	u32 val;
+	int lrxq, queue;
+
+	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+		queue = port->rxqs[lrxq]->id;
+		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+		val |= MVPP2_RXQ_DISABLE_MASK;
+		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+	}
+}
+
+/* Enable transmit via physical egress queue
+ * - HW starts take descriptors from DRAM
+ */
+static void mvpp2_egress_enable(struct mvpp2_port *port)
+{
+	u32 qmap;
+	int queue;
+	int tx_port_num = mvpp2_egress_port(port);
+
+	/* Enable all initialized TXs. */
+	qmap = 0;
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+		if (txq->descs != NULL)
+			qmap |= (1 << queue);
+	}
+
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+static void mvpp2_egress_disable(struct mvpp2_port *port)
+{
+	u32 reg_data;
+	int delay;
+	int tx_port_num = mvpp2_egress_port(port);
+
+	/* Issue stop command for active channels only */
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+		    MVPP2_TXP_SCHED_ENQ_MASK;
+	if (reg_data != 0)
+		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
+			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+	/* Wait for all Tx activity to terminate. */
+	delay = 0;
+	do {
+		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+			netdev_warn(port->dev,
+				    "Tx stop timed out, status=0x%08x\n",
+				    reg_data);
+			break;
+		}
+		mdelay(1);
+		delay++;
+
+		/* Check port TX Command register that all
+		 * Tx queues are stopped
+		 */
+		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
+	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int
+mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
+{
+	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+	return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void
+mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
+			int used_count, int free_count)
+{
+	/* Decrement the number of used descriptors and increment count
+	 * increment the number of free descriptors.
+	 */
+	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mvpp2_rx_desc *
+mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
+{
+	int rx_desc = rxq->next_desc_to_proc;
+
+	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+	prefetch(rxq->descs + rxq->next_desc_to_proc);
+	return rxq->descs + rx_desc;
+}
+
+/* Set rx queue offset */
+static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
+				 int prxq, int offset)
+{
+	u32 val;
+
+	/* Convert offset from bytes to units of 32 bytes */
+	offset = offset >> 5;
+
+	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+	/* Offset is in */
+	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+		    MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Obtain BM cookie information from descriptor */
+static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+{
+	int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+		   MVPP2_RXD_BM_POOL_ID_OFFS;
+	int cpu = smp_processor_id();
+
+	return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
+	       ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
+}
+
+/* Tx descriptors helper methods */
+
+/* Get number of Tx descriptors waiting to be transmitted by HW */
+static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
+				       struct mvpp2_tx_queue *txq)
+{
+	u32 val;
+
+	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+
+	return val & MVPP2_TXQ_PENDING_MASK;
+}
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+static struct mvpp2_tx_desc *
+mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
+{
+	int tx_desc = txq->next_desc_to_proc;
+
+	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
+	return txq->descs + tx_desc;
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent */
+static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
+{
+	/* aggregated access - relevant TXQ number is written in TX desc */
+	mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+
+/* Check if there are enough free descriptors in aggregated txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ */
+static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+				     struct mvpp2_tx_queue *aggr_txq, int num)
+{
+	if ((aggr_txq->count + num) > aggr_txq->size) {
+		/* Update number of occupied aggregated Tx descriptors */
+		int cpu = smp_processor_id();
+		u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+
+		aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
+	}
+
+	if ((aggr_txq->count + num) > aggr_txq->size)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* Reserved Tx descriptors allocation request */
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+					 struct mvpp2_tx_queue *txq, int num)
+{
+	u32 val;
+
+	val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+	mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+
+	val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+
+	return val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Check if there are enough reserved descriptors for transmission.
+ * If not, request chunk of reserved descriptors and check again.
+ */
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+					    struct mvpp2_tx_queue *txq,
+					    struct mvpp2_txq_pcpu *txq_pcpu,
+					    int num)
+{
+	int req, cpu, desc_count;
+
+	if (txq_pcpu->reserved_num >= num)
+		return 0;
+
+	/* Not enough descriptors reserved! Update the reserved descriptor
+	 * count and check again.
+	 */
+
+	desc_count = 0;
+	/* Compute total of used descriptors */
+	for_each_present_cpu(cpu) {
+		struct mvpp2_txq_pcpu *txq_pcpu_aux;
+
+		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+		desc_count += txq_pcpu_aux->count;
+		desc_count += txq_pcpu_aux->reserved_num;
+	}
+
+	req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+	desc_count += req;
+
+	if (desc_count >
+	   (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+		return -ENOMEM;
+
+	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+
+	/* OK, the descriptor cound has been updated: check again. */
+	if (txq_pcpu->reserved_num < num)
+		return -ENOMEM;
+	return 0;
+}
+
+/* Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+{
+	if (txq->next_desc_to_proc == 0)
+		txq->next_desc_to_proc = txq->last_desc - 1;
+	else
+		txq->next_desc_to_proc--;
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+			       int ip_hdr_len, int l4_proto)
+{
+	u32 command;
+
+	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+	 * G_L4_chk, L4_type required only for checksum calculation
+	 */
+	command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+	command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+	command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+	if (l3_proto == swab16(ETH_P_IP)) {
+		command &= ~MVPP2_TXD_IP_CSUM_DISABLE;	/* enable IPv4 csum */
+		command &= ~MVPP2_TXD_L3_IP6;		/* enable IPv4 */
+	} else {
+		command |= MVPP2_TXD_L3_IP6;		/* enable IPv6 */
+	}
+
+	if (l4_proto == IPPROTO_TCP) {
+		command &= ~MVPP2_TXD_L4_UDP;		/* enable TCP */
+		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
+	} else if (l4_proto == IPPROTO_UDP) {
+		command |= MVPP2_TXD_L4_UDP;		/* enable UDP */
+		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
+	} else {
+		command |= MVPP2_TXD_L4_CSUM_NOT;
+	}
+
+	return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ */
+static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
+					   struct mvpp2_tx_queue *txq)
+{
+	u32 val;
+
+	/* Reading status reg resets transmitted descriptor counter */
+	val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+
+	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
+		MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+static void mvpp2_txq_sent_counter_clear(void *arg)
+{
+	struct mvpp2_port *port = arg;
+	int queue;
+
+	for (queue = 0; queue < txq_number; queue++) {
+		int id = port->txqs[queue]->id;
+
+		mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+	}
+}
+
+/* Set max sizes for Tx queues */
+static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
+{
+	u32	val, size, mtu;
+	int	txq, tx_port_num;
+
+	mtu = port->pkt_size * 8;
+	if (mtu > MVPP2_TXP_MTU_MAX)
+		mtu = MVPP2_TXP_MTU_MAX;
+
+	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+	mtu = 3 * mtu;
+
+	/* Indirect access to registers */
+	tx_port_num = mvpp2_egress_port(port);
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+	/* Set MTU */
+	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
+	val &= ~MVPP2_TXP_MTU_MAX;
+	val |= mtu;
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
+
+	/* TXP token size and all TXQs token size must be larger that MTU */
+	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+	if (size < mtu) {
+		size = mtu;
+		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+		val |= size;
+		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+	}
+
+	for (txq = 0; txq < txq_number; txq++) {
+		val = mvpp2_read(port->priv,
+				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+		if (size < mtu) {
+			size = mtu;
+			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+			val |= size;
+			mvpp2_write(port->priv,
+				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+				    val);
+		}
+	}
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
+				   struct mvpp2_rx_queue *rxq, u32 pkts)
+{
+	u32 val;
+
+	val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
+	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+
+	rxq->pkts_coal = pkts;
+}
+
+/* Set the time delay in usec before Rx interrupt */
+static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
+				   struct mvpp2_rx_queue *rxq, u32 usec)
+{
+	u32 val;
+
+	val = (port->priv->tclk / USEC_PER_SEC) * usec;
+	mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+
+	rxq->time_coal = usec;
+}
+
+/* Free Tx queue skbuffs */
+static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
+				struct mvpp2_tx_queue *txq,
+				struct mvpp2_txq_pcpu *txq_pcpu, int num)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		struct mvpp2_txq_pcpu_buf *tx_buf =
+			txq_pcpu->buffs + txq_pcpu->txq_get_index;
+
+		dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+				 tx_buf->size, DMA_TO_DEVICE);
+		if (tx_buf->skb)
+			dev_kfree_skb_any(tx_buf->skb);
+
+		mvpp2_txq_inc_get(txq_pcpu);
+	}
+}
+
+static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
+							u32 cause)
+{
+	int queue = fls(cause) - 1;
+
+	return port->rxqs[queue];
+}
+
+static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
+							u32 cause)
+{
+	int queue = fls(cause) - 1;
+
+	return port->txqs[queue];
+}
+
+/* Handle end of transmission */
+static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+			   struct mvpp2_txq_pcpu *txq_pcpu)
+{
+	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
+	int tx_done;
+
+	if (txq_pcpu->cpu != smp_processor_id())
+		netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
+
+	tx_done = mvpp2_txq_sent_desc_proc(port, txq);
+	if (!tx_done)
+		return;
+	mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
+
+	txq_pcpu->count -= tx_done;
+
+	if (netif_tx_queue_stopped(nq))
+		if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
+			netif_tx_wake_queue(nq);
+}
+
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+	struct mvpp2_tx_queue *txq;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	unsigned int tx_todo = 0;
+
+	while (cause) {
+		txq = mvpp2_get_tx_queue(port, cause);
+		if (!txq)
+			break;
+
+		txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+		if (txq_pcpu->count) {
+			mvpp2_txq_done(port, txq, txq_pcpu);
+			tx_todo += txq_pcpu->count;
+		}
+
+		cause &= ~(1 << txq->log_id);
+	}
+	return tx_todo;
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Allocate and initialize descriptors for aggr TXQ */
+static int mvpp2_aggr_txq_init(struct platform_device *pdev,
+			       struct mvpp2_tx_queue *aggr_txq,
+			       int desc_num, int cpu,
+			       struct mvpp2 *priv)
+{
+	/* Allocate memory for TX descriptors */
+	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+				desc_num * MVPP2_DESC_ALIGNED_SIZE,
+				&aggr_txq->descs_phys, GFP_KERNEL);
+	if (!aggr_txq->descs)
+		return -ENOMEM;
+
+	/* Make sure descriptor address is cache line size aligned  */
+	BUG_ON(aggr_txq->descs !=
+	       PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+	aggr_txq->last_desc = aggr_txq->size - 1;
+
+	/* Aggr TXQ no reset WA */
+	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
+						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+
+	/* Set Tx descriptors queue starting address */
+	/* indirect access */
+	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
+		    aggr_txq->descs_phys);
+	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
+
+	return 0;
+}
+
+/* Create a specified Rx queue */
+static int mvpp2_rxq_init(struct mvpp2_port *port,
+			  struct mvpp2_rx_queue *rxq)
+
+{
+	rxq->size = port->rx_ring_size;
+
+	/* Allocate memory for RX descriptors */
+	rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
+					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+					&rxq->descs_phys, GFP_KERNEL);
+	if (!rxq->descs)
+		return -ENOMEM;
+
+	BUG_ON(rxq->descs !=
+	       PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+	rxq->last_desc = rxq->size - 1;
+
+	/* Zero occupied and non-occupied counters - direct access */
+	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+
+	/* Set Rx descriptors queue starting address - indirect access */
+	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
+	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+	mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+
+	/* Set Offset */
+	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
+
+	/* Set coalescing pkts and time */
+	mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
+	mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+
+	/* Add number of descriptors ready for receiving packets */
+	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
+
+	return 0;
+}
+
+/* Push packets received by the RXQ to BM pool */
+static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
+				struct mvpp2_rx_queue *rxq)
+{
+	int rx_received, i;
+
+	rx_received = mvpp2_rxq_received(port, rxq->id);
+	if (!rx_received)
+		return;
+
+	for (i = 0; i < rx_received; i++) {
+		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+		u32 bm = mvpp2_bm_cookie_build(rx_desc);
+
+		mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
+				  rx_desc->buf_cookie);
+	}
+	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
+}
+
+/* Cleanup Rx queue */
+static void mvpp2_rxq_deinit(struct mvpp2_port *port,
+			     struct mvpp2_rx_queue *rxq)
+{
+	mvpp2_rxq_drop_pkts(port, rxq);
+
+	if (rxq->descs)
+		dma_free_coherent(port->dev->dev.parent,
+				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+				  rxq->descs,
+				  rxq->descs_phys);
+
+	rxq->descs             = NULL;
+	rxq->last_desc         = 0;
+	rxq->next_desc_to_proc = 0;
+	rxq->descs_phys        = 0;
+
+	/* Clear Rx descriptors queue starting address and size;
+	 * free descriptor number
+	 */
+	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
+	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+}
+
+/* Create and initialize a Tx queue */
+static int mvpp2_txq_init(struct mvpp2_port *port,
+			  struct mvpp2_tx_queue *txq)
+{
+	u32 val;
+	int cpu, desc, desc_per_txq, tx_port_num;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+
+	txq->size = port->tx_ring_size;
+
+	/* Allocate memory for Tx descriptors */
+	txq->descs = dma_alloc_coherent(port->dev->dev.parent,
+				txq->size * MVPP2_DESC_ALIGNED_SIZE,
+				&txq->descs_phys, GFP_KERNEL);
+	if (!txq->descs)
+		return -ENOMEM;
+
+	/* Make sure descriptor address is cache line size aligned  */
+	BUG_ON(txq->descs !=
+	       PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+	txq->last_desc = txq->size - 1;
+
+	/* Set Tx descriptors queue starting address - indirect access */
+	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
+	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
+					     MVPP2_TXQ_DESC_SIZE_MASK);
+	mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
+	mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
+		    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+	val &= ~MVPP2_TXQ_PENDING_MASK;
+	mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+
+	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
+	 * for each existing TXQ.
+	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
+	 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
+	 */
+	desc_per_txq = 16;
+	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
+	       (txq->log_id * desc_per_txq);
+
+	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
+		    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+		    MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+
+	/* WRR / EJP configuration - indirect access */
+	tx_port_num = mvpp2_egress_port(port);
+	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
+
+	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+		    val);
+
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+		txq_pcpu->size = txq->size;
+		txq_pcpu->buffs = kmalloc(txq_pcpu->size *
+					  sizeof(struct mvpp2_txq_pcpu_buf),
+					  GFP_KERNEL);
+		if (!txq_pcpu->buffs)
+			goto error;
+
+		txq_pcpu->count = 0;
+		txq_pcpu->reserved_num = 0;
+		txq_pcpu->txq_put_index = 0;
+		txq_pcpu->txq_get_index = 0;
+	}
+
+	return 0;
+
+error:
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+		kfree(txq_pcpu->buffs);
+	}
+
+	dma_free_coherent(port->dev->dev.parent,
+			  txq->size * MVPP2_DESC_ALIGNED_SIZE,
+			  txq->descs, txq->descs_phys);
+
+	return -ENOMEM;
+}
+
+/* Free allocated TXQ resources */
+static void mvpp2_txq_deinit(struct mvpp2_port *port,
+			     struct mvpp2_tx_queue *txq)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	int cpu;
+
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+		kfree(txq_pcpu->buffs);
+	}
+
+	if (txq->descs)
+		dma_free_coherent(port->dev->dev.parent,
+				  txq->size * MVPP2_DESC_ALIGNED_SIZE,
+				  txq->descs, txq->descs_phys);
+
+	txq->descs             = NULL;
+	txq->last_desc         = 0;
+	txq->next_desc_to_proc = 0;
+	txq->descs_phys        = 0;
+
+	/* Set minimum bandwidth for disabled TXQs */
+	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+
+	/* Set Tx descriptors queue starting address and size */
+	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
+	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+}
+
+/* Cleanup Tx ports */
+static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	int delay, pending, cpu;
+	u32 val;
+
+	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+	val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+	val |= MVPP2_TXQ_DRAIN_EN_MASK;
+	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+
+	/* The napi queue has been stopped so wait for all packets
+	 * to be transmitted.
+	 */
+	delay = 0;
+	do {
+		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+			netdev_warn(port->dev,
+				    "port %d: cleaning queue %d timed out\n",
+				    port->id, txq->log_id);
+			break;
+		}
+		mdelay(1);
+		delay++;
+
+		pending = mvpp2_txq_pend_desc_num_get(port, txq);
+	} while (pending);
+
+	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+
+		/* Release all packets */
+		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
+
+		/* Reset queue */
+		txq_pcpu->count = 0;
+		txq_pcpu->txq_put_index = 0;
+		txq_pcpu->txq_get_index = 0;
+	}
+}
+
+/* Cleanup all Tx queues */
+static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
+{
+	struct mvpp2_tx_queue *txq;
+	int queue;
+	u32 val;
+
+	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
+
+	/* Reset Tx ports and delete Tx queues */
+	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
+	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+
+	for (queue = 0; queue < txq_number; queue++) {
+		txq = port->txqs[queue];
+		mvpp2_txq_clean(port, txq);
+		mvpp2_txq_deinit(port, txq);
+	}
+
+	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+
+	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
+	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+}
+
+/* Cleanup all Rx queues */
+static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
+{
+	int queue;
+
+	for (queue = 0; queue < rxq_number; queue++)
+		mvpp2_rxq_deinit(port, port->rxqs[queue]);
+}
+
+/* Init all Rx queues for port */
+static int mvpp2_setup_rxqs(struct mvpp2_port *port)
+{
+	int queue, err;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		err = mvpp2_rxq_init(port, port->rxqs[queue]);
+		if (err)
+			goto err_cleanup;
+	}
+	return 0;
+
+err_cleanup:
+	mvpp2_cleanup_rxqs(port);
+	return err;
+}
+
+/* Init all tx queues for port */
+static int mvpp2_setup_txqs(struct mvpp2_port *port)
+{
+	struct mvpp2_tx_queue *txq;
+	int queue, err;
+
+	for (queue = 0; queue < txq_number; queue++) {
+		txq = port->txqs[queue];
+		err = mvpp2_txq_init(port, txq);
+		if (err)
+			goto err_cleanup;
+	}
+
+	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+	return 0;
+
+err_cleanup:
+	mvpp2_cleanup_txqs(port);
+	return err;
+}
+
+/* The callback for per-port interrupt */
+static irqreturn_t mvpp2_isr(int irq, void *dev_id)
+{
+	struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+
+	mvpp2_interrupts_disable(port);
+
+	napi_schedule(&port->napi);
+
+	return IRQ_HANDLED;
+}
+
+/* Adjust link */
+static void mvpp2_link_event(struct net_device *dev)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	struct phy_device *phydev = port->phy_dev;
+	int status_change = 0;
+	u32 val;
+
+	if (phydev->link) {
+		if ((port->speed != phydev->speed) ||
+		    (port->duplex != phydev->duplex)) {
+			u32 val;
+
+			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
+				 MVPP2_GMAC_CONFIG_GMII_SPEED |
+				 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
+				 MVPP2_GMAC_AN_SPEED_EN |
+				 MVPP2_GMAC_AN_DUPLEX_EN);
+
+			if (phydev->duplex)
+				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+			if (phydev->speed == SPEED_1000)
+				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+			else if (phydev->speed == SPEED_100)
+				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+			port->duplex = phydev->duplex;
+			port->speed  = phydev->speed;
+		}
+	}
+
+	if (phydev->link != port->link) {
+		if (!phydev->link) {
+			port->duplex = -1;
+			port->speed = 0;
+		}
+
+		port->link = phydev->link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (phydev->link) {
+			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			val |= (MVPP2_GMAC_FORCE_LINK_PASS |
+				MVPP2_GMAC_FORCE_LINK_DOWN);
+			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			mvpp2_egress_enable(port);
+			mvpp2_ingress_enable(port);
+		} else {
+			mvpp2_ingress_disable(port);
+			mvpp2_egress_disable(port);
+		}
+		phy_print_status(phydev);
+	}
+}
+
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+	ktime_t interval;
+
+	if (!port_pcpu->timer_scheduled) {
+		port_pcpu->timer_scheduled = true;
+		interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+		hrtimer_start(&port_pcpu->tx_done_timer, interval,
+			      HRTIMER_MODE_REL_PINNED);
+	}
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+	unsigned int tx_todo, cause;
+
+	if (!netif_running(dev))
+		return;
+	port_pcpu->timer_scheduled = false;
+
+	/* Process all the Tx queues */
+	cause = (1 << txq_number) - 1;
+	tx_todo = mvpp2_tx_done(port, cause);
+
+	/* Set the timer in case not all the packets were processed */
+	if (tx_todo)
+		mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+	struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+							 struct mvpp2_port_pcpu,
+							 tx_done_timer);
+
+	tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+	return HRTIMER_NORESTART;
+}
+
+/* Main RX/TX processing routines */
+
+/* Display more error info */
+static void mvpp2_rx_error(struct mvpp2_port *port,
+			   struct mvpp2_rx_desc *rx_desc)
+{
+	u32 status = rx_desc->status;
+
+	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+	case MVPP2_RXD_ERR_CRC:
+		netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVPP2_RXD_ERR_OVERRUN:
+		netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVPP2_RXD_ERR_RESOURCE:
+		netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	}
+}
+
+/* Handle RX checksum offload */
+static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
+			  struct sk_buff *skb)
+{
+	if (((status & MVPP2_RXD_L3_IP4) &&
+	     !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+	    (status & MVPP2_RXD_L3_IP6))
+		if (((status & MVPP2_RXD_L4_UDP) ||
+		     (status & MVPP2_RXD_L4_TCP)) &&
+		     (status & MVPP2_RXD_L4_CSUM_OK)) {
+			skb->csum = 0;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			return;
+		}
+
+	skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+static int mvpp2_rx_refill(struct mvpp2_port *port,
+			   struct mvpp2_bm_pool *bm_pool,
+			   u32 bm, int is_recycle)
+{
+	struct sk_buff *skb;
+	dma_addr_t phys_addr;
+
+	if (is_recycle &&
+	    (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
+		return 0;
+
+	/* No recycle or too many buffers are in use, so allocate a new skb */
+	skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+	atomic_dec(&bm_pool->in_use);
+	return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int ip_hdr_len = 0;
+		u8 l4_proto;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *ip4h = ip_hdr(skb);
+
+			/* Calculate IPv4 checksum and L4 checksum */
+			ip_hdr_len = ip4h->ihl;
+			l4_proto = ip4h->protocol;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			/* Read l4_protocol from one of IPv6 extra headers */
+			if (skb_network_header_len(skb) > 0)
+				ip_hdr_len = (skb_network_header_len(skb) >> 2);
+			l4_proto = ip6h->nexthdr;
+		} else {
+			return MVPP2_TXD_L4_CSUM_NOT;
+		}
+
+		return mvpp2_txq_desc_csum(skb_network_offset(skb),
+				skb->protocol, ip_hdr_len, l4_proto);
+	}
+
+	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+}
+
+static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
+			      struct mvpp2_rx_desc *rx_desc)
+{
+	struct mvpp2_buff_hdr *buff_hdr;
+	struct sk_buff *skb;
+	u32 rx_status = rx_desc->status;
+	u32 buff_phys_addr;
+	u32 buff_virt_addr;
+	u32 buff_phys_addr_next;
+	u32 buff_virt_addr_next;
+	int mc_id;
+	int pool_id;
+
+	pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+		   MVPP2_RXD_BM_POOL_ID_OFFS;
+	buff_phys_addr = rx_desc->buf_phys_addr;
+	buff_virt_addr = rx_desc->buf_cookie;
+
+	do {
+		skb = (struct sk_buff *)buff_virt_addr;
+		buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
+
+		mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
+
+		buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
+		buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
+
+		/* Release buffer */
+		mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
+				     buff_virt_addr, mc_id);
+
+		buff_phys_addr = buff_phys_addr_next;
+		buff_virt_addr = buff_virt_addr_next;
+
+	} while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
+}
+
+/* Main rx processing */
+static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
+		    struct mvpp2_rx_queue *rxq)
+{
+	struct net_device *dev = port->dev;
+	int rx_received;
+	int rx_done = 0;
+	u32 rcvd_pkts = 0;
+	u32 rcvd_bytes = 0;
+
+	/* Get number of received packets and clamp the to-do */
+	rx_received = mvpp2_rxq_received(port, rxq->id);
+	if (rx_todo > rx_received)
+		rx_todo = rx_received;
+
+	while (rx_done < rx_todo) {
+		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+		struct mvpp2_bm_pool *bm_pool;
+		struct sk_buff *skb;
+		dma_addr_t phys_addr;
+		u32 bm, rx_status;
+		int pool, rx_bytes, err;
+
+		rx_done++;
+		rx_status = rx_desc->status;
+		rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
+		phys_addr = rx_desc->buf_phys_addr;
+
+		bm = mvpp2_bm_cookie_build(rx_desc);
+		pool = mvpp2_bm_cookie_pool_get(bm);
+		bm_pool = &port->priv->bm_pools[pool];
+		/* Check if buffer header is used */
+		if (rx_status & MVPP2_RXD_BUF_HDR) {
+			mvpp2_buff_hdr_rx(port, rx_desc);
+			continue;
+		}
+
+		/* In case of an error, release the requested buffer pointer
+		 * to the Buffer Manager. This request process is controlled
+		 * by the hardware, and the information about the buffer is
+		 * comprised by the RX descriptor.
+		 */
+		if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+		err_drop_frame:
+			dev->stats.rx_errors++;
+			mvpp2_rx_error(port, rx_desc);
+			/* Return the buffer to the pool */
+			mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
+					  rx_desc->buf_cookie);
+			continue;
+		}
+
+		skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+		err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+		if (err) {
+			netdev_err(port->dev, "failed to refill BM pools\n");
+			goto err_drop_frame;
+		}
+
+		dma_unmap_single(dev->dev.parent, phys_addr,
+				 bm_pool->buf_size, DMA_FROM_DEVICE);
+
+		rcvd_pkts++;
+		rcvd_bytes += rx_bytes;
+		atomic_inc(&bm_pool->in_use);
+
+		skb_reserve(skb, MVPP2_MH_SIZE);
+		skb_put(skb, rx_bytes);
+		skb->protocol = eth_type_trans(skb, dev);
+		mvpp2_rx_csum(port, rx_status, skb);
+
+		napi_gro_receive(&port->napi, skb);
+	}
+
+	if (rcvd_pkts) {
+		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes   += rcvd_bytes;
+		u64_stats_update_end(&stats->syncp);
+	}
+
+	/* Update Rx queue management counters */
+	wmb();
+	mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
+
+	return rx_todo;
+}
+
+static inline void
+tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+		  struct mvpp2_tx_desc *desc)
+{
+	dma_unmap_single(dev, desc->buf_phys_addr,
+			 desc->data_size, DMA_TO_DEVICE);
+	mvpp2_txq_desc_put(txq);
+}
+
+/* Handle tx fragmentation processing */
+static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
+				 struct mvpp2_tx_queue *aggr_txq,
+				 struct mvpp2_tx_queue *txq)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+	struct mvpp2_tx_desc *tx_desc;
+	int i;
+	dma_addr_t buf_phys_addr;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		void *addr = page_address(frag->page.p) + frag->page_offset;
+
+		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+		tx_desc->phys_txq = txq->id;
+		tx_desc->data_size = frag->size;
+
+		buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
+					       tx_desc->data_size,
+					       DMA_TO_DEVICE);
+		if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+			mvpp2_txq_desc_put(txq);
+			goto error;
+		}
+
+		tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+		tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+
+		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+			/* Last descriptor */
+			tx_desc->command = MVPP2_TXD_L_DESC;
+			mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+		} else {
+			/* Descriptor in the middle: Not First, Not Last */
+			tx_desc->command = 0;
+			mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+		}
+	}
+
+	return 0;
+
+error:
+	/* Release all descriptors that were used to map fragments of
+	 * this packet, as well as the corresponding DMA mappings
+	 */
+	for (i = i - 1; i >= 0; i--) {
+		tx_desc = txq->descs + i;
+		tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+	}
+
+	return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2_tx_queue *txq, *aggr_txq;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	struct mvpp2_tx_desc *tx_desc;
+	dma_addr_t buf_phys_addr;
+	int frags = 0;
+	u16 txq_id;
+	u32 tx_cmd;
+
+	txq_id = skb_get_queue_mapping(skb);
+	txq = port->txqs[txq_id];
+	txq_pcpu = this_cpu_ptr(txq->pcpu);
+	aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+
+	frags = skb_shinfo(skb)->nr_frags + 1;
+
+	/* Check number of available descriptors */
+	if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
+	    mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
+					     txq_pcpu, frags)) {
+		frags = 0;
+		goto out;
+	}
+
+	/* Get a descriptor for the first part of the packet */
+	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+	tx_desc->phys_txq = txq->id;
+	tx_desc->data_size = skb_headlen(skb);
+
+	buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+				       tx_desc->data_size, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+		mvpp2_txq_desc_put(txq);
+		frags = 0;
+		goto out;
+	}
+	tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+	tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+	tx_cmd = mvpp2_skb_tx_csum(port, skb);
+
+	if (frags == 1) {
+		/* First and Last descriptor */
+		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+		tx_desc->command = tx_cmd;
+		mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
+	} else {
+		/* First but not Last */
+		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
+		tx_desc->command = tx_cmd;
+		mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
+
+		/* Continue with other skb fragments */
+		if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
+			tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+			frags = 0;
+			goto out;
+		}
+	}
+
+	txq_pcpu->reserved_num -= frags;
+	txq_pcpu->count += frags;
+	aggr_txq->count += frags;
+
+	/* Enable transmit */
+	wmb();
+	mvpp2_aggr_txq_pend_desc_add(port, frags);
+
+	if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
+		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+		netif_tx_stop_queue(nq);
+	}
+out:
+	if (frags > 0) {
+		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes += skb->len;
+		u64_stats_update_end(&stats->syncp);
+	} else {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+	/* Finalize TX processing */
+	if (txq_pcpu->count >= txq->done_pkts_coal)
+		mvpp2_txq_done(port, txq, txq_pcpu);
+
+	/* Set the timer in case not all frags were processed */
+	if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+		struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+		mvpp2_timer_set(port_pcpu);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static inline void mvpp2_cause_error(struct net_device *dev, int cause)
+{
+	if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+		netdev_err(dev, "FCS error\n");
+	if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+		netdev_err(dev, "rx fifo overrun error\n");
+	if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+		netdev_err(dev, "tx fifo underrun error\n");
+}
+
+static int mvpp2_poll(struct napi_struct *napi, int budget)
+{
+	u32 cause_rx_tx, cause_rx, cause_misc;
+	int rx_done = 0;
+	struct mvpp2_port *port = netdev_priv(napi->dev);
+
+	/* Rx/Tx cause register
+	 *
+	 * Bits 0-15: each bit indicates received packets on the Rx queue
+	 * (bit 0 is for Rx queue 0).
+	 *
+	 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
+	 * (bit 16 is for Tx queue 0).
+	 *
+	 * Each CPU has its own Rx/Tx cause register
+	 */
+	cause_rx_tx = mvpp2_read(port->priv,
+				 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+	cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
+
+	if (cause_misc) {
+		mvpp2_cause_error(port->dev, cause_misc);
+
+		/* Clear the cause register */
+		mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
+		mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+			    cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+	}
+
+	cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+
+	/* Process RX packets */
+	cause_rx |= port->pending_cause_rx;
+	while (cause_rx && budget > 0) {
+		int count;
+		struct mvpp2_rx_queue *rxq;
+
+		rxq = mvpp2_get_rx_queue(port, cause_rx);
+		if (!rxq)
+			break;
+
+		count = mvpp2_rx(port, budget, rxq);
+		rx_done += count;
+		budget -= count;
+		if (budget > 0) {
+			/* Clear the bit associated to this Rx queue
+			 * so that next iteration will continue from
+			 * the next Rx queue.
+			 */
+			cause_rx &= ~(1 << rxq->logic_rxq);
+		}
+	}
+
+	if (budget > 0) {
+		cause_rx = 0;
+		napi_complete(napi);
+
+		mvpp2_interrupts_enable(port);
+	}
+	port->pending_cause_rx = cause_rx;
+	return rx_done;
+}
+
+/* Set hw internals when starting port */
+static void mvpp2_start_dev(struct mvpp2_port *port)
+{
+	mvpp2_gmac_max_rx_size_set(port);
+	mvpp2_txp_max_tx_size_set(port);
+
+	napi_enable(&port->napi);
+
+	/* Enable interrupts on all CPUs */
+	mvpp2_interrupts_enable(port);
+
+	mvpp2_port_enable(port);
+	phy_start(port->phy_dev);
+	netif_tx_start_all_queues(port->dev);
+}
+
+/* Set hw internals when stopping port */
+static void mvpp2_stop_dev(struct mvpp2_port *port)
+{
+	/* Stop new packets from arriving to RXQs */
+	mvpp2_ingress_disable(port);
+
+	mdelay(10);
+
+	/* Disable interrupts on all CPUs */
+	mvpp2_interrupts_disable(port);
+
+	napi_disable(&port->napi);
+
+	netif_carrier_off(port->dev);
+	netif_tx_stop_all_queues(port->dev);
+
+	mvpp2_egress_disable(port);
+	mvpp2_port_disable(port);
+	phy_stop(port->phy_dev);
+}
+
+/* Return positive if MTU is valid */
+static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
+{
+	if (mtu < 68) {
+		netdev_err(dev, "cannot change mtu to less than 68\n");
+		return -EINVAL;
+	}
+
+	/* 9676 == 9700 - 20 and rounding to 8 */
+	if (mtu > 9676) {
+		netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
+		mtu = 9676;
+	}
+
+	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+		netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+			    ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+	}
+
+	return mtu;
+}
+
+static int mvpp2_check_ringparam_valid(struct net_device *dev,
+				       struct ethtool_ringparam *ring)
+{
+	u16 new_rx_pending = ring->rx_pending;
+	u16 new_tx_pending = ring->tx_pending;
+
+	if (ring->rx_pending == 0 || ring->tx_pending == 0)
+		return -EINVAL;
+
+	if (ring->rx_pending > MVPP2_MAX_RXD)
+		new_rx_pending = MVPP2_MAX_RXD;
+	else if (!IS_ALIGNED(ring->rx_pending, 16))
+		new_rx_pending = ALIGN(ring->rx_pending, 16);
+
+	if (ring->tx_pending > MVPP2_MAX_TXD)
+		new_tx_pending = MVPP2_MAX_TXD;
+	else if (!IS_ALIGNED(ring->tx_pending, 32))
+		new_tx_pending = ALIGN(ring->tx_pending, 32);
+
+	if (ring->rx_pending != new_rx_pending) {
+		netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
+			    ring->rx_pending, new_rx_pending);
+		ring->rx_pending = new_rx_pending;
+	}
+
+	if (ring->tx_pending != new_tx_pending) {
+		netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
+			    ring->tx_pending, new_tx_pending);
+		ring->tx_pending = new_tx_pending;
+	}
+
+	return 0;
+}
+
+static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+{
+	u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+	mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+	mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
+	mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
+	addr[0] = (mac_addr_h >> 24) & 0xFF;
+	addr[1] = (mac_addr_h >> 16) & 0xFF;
+	addr[2] = (mac_addr_h >> 8) & 0xFF;
+	addr[3] = mac_addr_h & 0xFF;
+	addr[4] = mac_addr_m & 0xFF;
+	addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+static int mvpp2_phy_connect(struct mvpp2_port *port)
+{
+	struct phy_device *phy_dev;
+
+	phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
+				 port->phy_interface);
+	if (!phy_dev) {
+		netdev_err(port->dev, "cannot connect to phy\n");
+		return -ENODEV;
+	}
+	phy_dev->supported &= PHY_GBIT_FEATURES;
+	phy_dev->advertising = phy_dev->supported;
+
+	port->phy_dev = phy_dev;
+	port->link    = 0;
+	port->duplex  = 0;
+	port->speed   = 0;
+
+	return 0;
+}
+
+static void mvpp2_phy_disconnect(struct mvpp2_port *port)
+{
+	phy_disconnect(port->phy_dev);
+	port->phy_dev = NULL;
+}
+
+static int mvpp2_open(struct net_device *dev)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	unsigned char mac_bcast[ETH_ALEN] = {
+			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+	int err;
+
+	err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
+		return err;
+	}
+	err = mvpp2_prs_mac_da_accept(port->priv, port->id,
+				      dev->dev_addr, true);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
+		return err;
+	}
+	err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
+		return err;
+	}
+	err = mvpp2_prs_def_flow(port);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
+		return err;
+	}
+
+	/* Allocate the Rx/Tx queues */
+	err = mvpp2_setup_rxqs(port);
+	if (err) {
+		netdev_err(port->dev, "cannot allocate Rx queues\n");
+		return err;
+	}
+
+	err = mvpp2_setup_txqs(port);
+	if (err) {
+		netdev_err(port->dev, "cannot allocate Tx queues\n");
+		goto err_cleanup_rxqs;
+	}
+
+	err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
+	if (err) {
+		netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
+		goto err_cleanup_txqs;
+	}
+
+	/* In default link is down */
+	netif_carrier_off(port->dev);
+
+	err = mvpp2_phy_connect(port);
+	if (err < 0)
+		goto err_free_irq;
+
+	/* Unmask interrupts on all CPUs */
+	on_each_cpu(mvpp2_interrupts_unmask, port, 1);
+
+	mvpp2_start_dev(port);
+
+	return 0;
+
+err_free_irq:
+	free_irq(port->irq, port);
+err_cleanup_txqs:
+	mvpp2_cleanup_txqs(port);
+err_cleanup_rxqs:
+	mvpp2_cleanup_rxqs(port);
+	return err;
+}
+
+static int mvpp2_stop(struct net_device *dev)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2_port_pcpu *port_pcpu;
+	int cpu;
+
+	mvpp2_stop_dev(port);
+	mvpp2_phy_disconnect(port);
+
+	/* Mask interrupts on all CPUs */
+	on_each_cpu(mvpp2_interrupts_mask, port, 1);
+
+	free_irq(port->irq, port);
+	for_each_present_cpu(cpu) {
+		port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+		hrtimer_cancel(&port_pcpu->tx_done_timer);
+		port_pcpu->timer_scheduled = false;
+		tasklet_kill(&port_pcpu->tx_done_tasklet);
+	}
+	mvpp2_cleanup_rxqs(port);
+	mvpp2_cleanup_txqs(port);
+
+	return 0;
+}
+
+static void mvpp2_set_rx_mode(struct net_device *dev)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	struct mvpp2 *priv = port->priv;
+	struct netdev_hw_addr *ha;
+	int id = port->id;
+	bool allmulti = dev->flags & IFF_ALLMULTI;
+
+	mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
+	mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
+	mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
+
+	/* Remove all port->id's mcast enries */
+	mvpp2_prs_mcast_del_all(priv, id);
+
+	if (allmulti && !netdev_mc_empty(dev)) {
+		netdev_for_each_mc_addr(ha, dev)
+			mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
+	}
+}
+
+static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	const struct sockaddr *addr = p;
+	int err;
+
+	if (!is_valid_ether_addr(addr->sa_data)) {
+		err = -EADDRNOTAVAIL;
+		goto error;
+	}
+
+	if (!netif_running(dev)) {
+		err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+		if (!err)
+			return 0;
+		/* Reconfigure parser to accept the original MAC address */
+		err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+		if (err)
+			goto error;
+	}
+
+	mvpp2_stop_dev(port);
+
+	err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+	if (!err)
+		goto out_start;
+
+	/* Reconfigure parser accept the original MAC address */
+	err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+	if (err)
+		goto error;
+out_start:
+	mvpp2_start_dev(port);
+	mvpp2_egress_enable(port);
+	mvpp2_ingress_enable(port);
+	return 0;
+
+error:
+	netdev_err(dev, "fail to change MAC address\n");
+	return err;
+}
+
+static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int err;
+
+	mtu = mvpp2_check_mtu_valid(dev, mtu);
+	if (mtu < 0) {
+		err = mtu;
+		goto error;
+	}
+
+	if (!netif_running(dev)) {
+		err = mvpp2_bm_update_mtu(dev, mtu);
+		if (!err) {
+			port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+			return 0;
+		}
+
+		/* Reconfigure BM to the original MTU */
+		err = mvpp2_bm_update_mtu(dev, dev->mtu);
+		if (err)
+			goto error;
+	}
+
+	mvpp2_stop_dev(port);
+
+	err = mvpp2_bm_update_mtu(dev, mtu);
+	if (!err) {
+		port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+		goto out_start;
+	}
+
+	/* Reconfigure BM to the original MTU */
+	err = mvpp2_bm_update_mtu(dev, dev->mtu);
+	if (err)
+		goto error;
+
+out_start:
+	mvpp2_start_dev(port);
+	mvpp2_egress_enable(port);
+	mvpp2_ingress_enable(port);
+
+	return 0;
+
+error:
+	netdev_err(dev, "fail to change MTU\n");
+	return err;
+}
+
+static struct rtnl_link_stats64 *
+mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	unsigned int start;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct mvpp2_pcpu_stats *cpu_stats;
+		u64 rx_packets;
+		u64 rx_bytes;
+		u64 tx_packets;
+		u64 tx_bytes;
+
+		cpu_stats = per_cpu_ptr(port->stats, cpu);
+		do {
+			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+			rx_packets = cpu_stats->rx_packets;
+			rx_bytes   = cpu_stats->rx_bytes;
+			tx_packets = cpu_stats->tx_packets;
+			tx_bytes   = cpu_stats->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+		stats->rx_packets += rx_packets;
+		stats->rx_bytes   += rx_bytes;
+		stats->tx_packets += tx_packets;
+		stats->tx_bytes   += tx_bytes;
+	}
+
+	stats->rx_errors	= dev->stats.rx_errors;
+	stats->rx_dropped	= dev->stats.rx_dropped;
+	stats->tx_dropped	= dev->stats.tx_dropped;
+
+	return stats;
+}
+
+static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int ret;
+
+	if (!port->phy_dev)
+		return -ENOTSUPP;
+
+	ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
+	if (!ret)
+		mvpp2_link_event(dev);
+
+	return ret;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+static int mvpp2_ethtool_get_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+
+	if (!port->phy_dev)
+		return -ENODEV;
+	return phy_ethtool_gset(port->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+static int mvpp2_ethtool_set_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+
+	if (!port->phy_dev)
+		return -ENODEV;
+	return phy_ethtool_sset(port->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
+				      struct ethtool_coalesce *c)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int queue;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+		rxq->time_coal = c->rx_coalesce_usecs;
+		rxq->pkts_coal = c->rx_max_coalesced_frames;
+		mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
+		mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+	}
+
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+		txq->done_pkts_coal = c->tx_max_coalesced_frames;
+	}
+
+	return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
+				      struct ethtool_coalesce *c)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+
+	c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
+	c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
+	c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
+	return 0;
+}
+
+static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+		sizeof(drvinfo->bus_info));
+}
+
+static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
+					struct ethtool_ringparam *ring)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+
+	ring->rx_max_pending = MVPP2_MAX_RXD;
+	ring->tx_max_pending = MVPP2_MAX_TXD;
+	ring->rx_pending = port->rx_ring_size;
+	ring->tx_pending = port->tx_ring_size;
+}
+
+static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
+				       struct ethtool_ringparam *ring)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	u16 prev_rx_ring_size = port->rx_ring_size;
+	u16 prev_tx_ring_size = port->tx_ring_size;
+	int err;
+
+	err = mvpp2_check_ringparam_valid(dev, ring);
+	if (err)
+		return err;
+
+	if (!netif_running(dev)) {
+		port->rx_ring_size = ring->rx_pending;
+		port->tx_ring_size = ring->tx_pending;
+		return 0;
+	}
+
+	/* The interface is running, so we have to force a
+	 * reallocation of the queues
+	 */
+	mvpp2_stop_dev(port);
+	mvpp2_cleanup_rxqs(port);
+	mvpp2_cleanup_txqs(port);
+
+	port->rx_ring_size = ring->rx_pending;
+	port->tx_ring_size = ring->tx_pending;
+
+	err = mvpp2_setup_rxqs(port);
+	if (err) {
+		/* Reallocate Rx queues with the original ring size */
+		port->rx_ring_size = prev_rx_ring_size;
+		ring->rx_pending = prev_rx_ring_size;
+		err = mvpp2_setup_rxqs(port);
+		if (err)
+			goto err_out;
+	}
+	err = mvpp2_setup_txqs(port);
+	if (err) {
+		/* Reallocate Tx queues with the original ring size */
+		port->tx_ring_size = prev_tx_ring_size;
+		ring->tx_pending = prev_tx_ring_size;
+		err = mvpp2_setup_txqs(port);
+		if (err)
+			goto err_clean_rxqs;
+	}
+
+	mvpp2_start_dev(port);
+	mvpp2_egress_enable(port);
+	mvpp2_ingress_enable(port);
+
+	return 0;
+
+err_clean_rxqs:
+	mvpp2_cleanup_rxqs(port);
+err_out:
+	netdev_err(dev, "fail to change ring parameters");
+	return err;
+}
+
+/* Device ops */
+
+static const struct net_device_ops mvpp2_netdev_ops = {
+	.ndo_open		= mvpp2_open,
+	.ndo_stop		= mvpp2_stop,
+	.ndo_start_xmit		= mvpp2_tx,
+	.ndo_set_rx_mode	= mvpp2_set_rx_mode,
+	.ndo_set_mac_address	= mvpp2_set_mac_address,
+	.ndo_change_mtu		= mvpp2_change_mtu,
+	.ndo_get_stats64	= mvpp2_get_stats64,
+	.ndo_do_ioctl		= mvpp2_ioctl,
+};
+
+static const struct ethtool_ops mvpp2_eth_tool_ops = {
+	.get_link	= ethtool_op_get_link,
+	.get_settings	= mvpp2_ethtool_get_settings,
+	.set_settings	= mvpp2_ethtool_set_settings,
+	.set_coalesce	= mvpp2_ethtool_set_coalesce,
+	.get_coalesce	= mvpp2_ethtool_get_coalesce,
+	.get_drvinfo	= mvpp2_ethtool_get_drvinfo,
+	.get_ringparam	= mvpp2_ethtool_get_ringparam,
+	.set_ringparam	= mvpp2_ethtool_set_ringparam,
+};
+
+/* Driver initialization */
+
+static void mvpp2_port_power_up(struct mvpp2_port *port)
+{
+	mvpp2_port_mii_set(port);
+	mvpp2_port_periodic_xon_disable(port);
+	mvpp2_port_fc_adv_enable(port);
+	mvpp2_port_reset(port);
+}
+
+/* Initialize port HW */
+static int mvpp2_port_init(struct mvpp2_port *port)
+{
+	struct device *dev = port->dev->dev.parent;
+	struct mvpp2 *priv = port->priv;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	int queue, cpu, err;
+
+	if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+		return -EINVAL;
+
+	/* Disable port */
+	mvpp2_egress_disable(port);
+	mvpp2_port_disable(port);
+
+	port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
+				  GFP_KERNEL);
+	if (!port->txqs)
+		return -ENOMEM;
+
+	/* Associate physical Tx queues to this port and initialize.
+	 * The mapping is predefined.
+	 */
+	for (queue = 0; queue < txq_number; queue++) {
+		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
+		struct mvpp2_tx_queue *txq;
+
+		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
+		if (!txq)
+			return -ENOMEM;
+
+		txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
+		if (!txq->pcpu) {
+			err = -ENOMEM;
+			goto err_free_percpu;
+		}
+
+		txq->id = queue_phy_id;
+		txq->log_id = queue;
+		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+		for_each_present_cpu(cpu) {
+			txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+			txq_pcpu->cpu = cpu;
+		}
+
+		port->txqs[queue] = txq;
+	}
+
+	port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
+				  GFP_KERNEL);
+	if (!port->rxqs) {
+		err = -ENOMEM;
+		goto err_free_percpu;
+	}
+
+	/* Allocate and initialize Rx queue for this port */
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvpp2_rx_queue *rxq;
+
+		/* Map physical Rx queue to port's logical Rx queue */
+		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
+		if (!rxq)
+			goto err_free_percpu;
+		/* Map this Rx queue to a physical queue */
+		rxq->id = port->first_rxq + queue;
+		rxq->port = port->id;
+		rxq->logic_rxq = queue;
+
+		port->rxqs[queue] = rxq;
+	}
+
+	/* Configure Rx queue group interrupt for this port */
+	mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+
+	/* Create Rx descriptor rings */
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+		rxq->size = port->rx_ring_size;
+		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+		rxq->time_coal = MVPP2_RX_COAL_USEC;
+	}
+
+	mvpp2_ingress_disable(port);
+
+	/* Port default configuration */
+	mvpp2_defaults_set(port);
+
+	/* Port's classifier configuration */
+	mvpp2_cls_oversize_rxq_set(port);
+	mvpp2_cls_port_config(port);
+
+	/* Provide an initial Rx packet size */
+	port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
+
+	/* Initialize pools for swf */
+	err = mvpp2_swf_bm_pool_init(port);
+	if (err)
+		goto err_free_percpu;
+
+	return 0;
+
+err_free_percpu:
+	for (queue = 0; queue < txq_number; queue++) {
+		if (!port->txqs[queue])
+			continue;
+		free_percpu(port->txqs[queue]->pcpu);
+	}
+	return err;
+}
+
+/* Ports initialization */
+static int mvpp2_port_probe(struct platform_device *pdev,
+			    struct device_node *port_node,
+			    struct mvpp2 *priv,
+			    int *next_first_rxq)
+{
+	struct device_node *phy_node;
+	struct mvpp2_port *port;
+	struct mvpp2_port_pcpu *port_pcpu;
+	struct net_device *dev;
+	struct resource *res;
+	const char *dt_mac_addr;
+	const char *mac_from;
+	char hw_mac_addr[ETH_ALEN];
+	u32 id;
+	int features;
+	int phy_mode;
+	int priv_common_regs_num = 2;
+	int err, i, cpu;
+
+	dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
+				 rxq_number);
+	if (!dev)
+		return -ENOMEM;
+
+	phy_node = of_parse_phandle(port_node, "phy", 0);
+	if (!phy_node) {
+		dev_err(&pdev->dev, "missing phy\n");
+		err = -ENODEV;
+		goto err_free_netdev;
+	}
+
+	phy_mode = of_get_phy_mode(port_node);
+	if (phy_mode < 0) {
+		dev_err(&pdev->dev, "incorrect phy mode\n");
+		err = phy_mode;
+		goto err_free_netdev;
+	}
+
+	if (of_property_read_u32(port_node, "port-id", &id)) {
+		err = -EINVAL;
+		dev_err(&pdev->dev, "missing port-id value\n");
+		goto err_free_netdev;
+	}
+
+	dev->tx_queue_len = MVPP2_MAX_TXD;
+	dev->watchdog_timeo = 5 * HZ;
+	dev->netdev_ops = &mvpp2_netdev_ops;
+	dev->ethtool_ops = &mvpp2_eth_tool_ops;
+
+	port = netdev_priv(dev);
+
+	port->irq = irq_of_parse_and_map(port_node, 0);
+	if (port->irq <= 0) {
+		err = -EINVAL;
+		goto err_free_netdev;
+	}
+
+	if (of_property_read_bool(port_node, "marvell,loopback"))
+		port->flags |= MVPP2_F_LOOPBACK;
+
+	port->priv = priv;
+	port->id = id;
+	port->first_rxq = *next_first_rxq;
+	port->phy_node = phy_node;
+	port->phy_interface = phy_mode;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM,
+				    priv_common_regs_num + id);
+	port->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(port->base)) {
+		err = PTR_ERR(port->base);
+		goto err_free_irq;
+	}
+
+	/* Alloc per-cpu stats */
+	port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
+	if (!port->stats) {
+		err = -ENOMEM;
+		goto err_free_irq;
+	}
+
+	dt_mac_addr = of_get_mac_address(port_node);
+	if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+		mac_from = "device tree";
+		ether_addr_copy(dev->dev_addr, dt_mac_addr);
+	} else {
+		mvpp2_get_mac_address(port, hw_mac_addr);
+		if (is_valid_ether_addr(hw_mac_addr)) {
+			mac_from = "hardware";
+			ether_addr_copy(dev->dev_addr, hw_mac_addr);
+		} else {
+			mac_from = "random";
+			eth_hw_addr_random(dev);
+		}
+	}
+
+	port->tx_ring_size = MVPP2_MAX_TXD;
+	port->rx_ring_size = MVPP2_MAX_RXD;
+	port->dev = dev;
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = mvpp2_port_init(port);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to init port %d\n", id);
+		goto err_free_stats;
+	}
+	mvpp2_port_power_up(port);
+
+	port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+	if (!port->pcpu) {
+		err = -ENOMEM;
+		goto err_free_txq_pcpu;
+	}
+
+	for_each_present_cpu(cpu) {
+		port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+		hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL_PINNED);
+		port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+		port_pcpu->timer_scheduled = false;
+
+		tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+			     (unsigned long)dev);
+	}
+
+	netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
+	features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	dev->features = features | NETIF_F_RXCSUM;
+	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
+	dev->vlan_features |= features;
+
+	err = register_netdev(dev);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register netdev\n");
+		goto err_free_port_pcpu;
+	}
+	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
+
+	/* Increment the first Rx queue number to be used by the next port */
+	*next_first_rxq += rxq_number;
+	priv->port_list[id] = port;
+	return 0;
+
+err_free_port_pcpu:
+	free_percpu(port->pcpu);
+err_free_txq_pcpu:
+	for (i = 0; i < txq_number; i++)
+		free_percpu(port->txqs[i]->pcpu);
+err_free_stats:
+	free_percpu(port->stats);
+err_free_irq:
+	irq_dispose_mapping(port->irq);
+err_free_netdev:
+	free_netdev(dev);
+	return err;
+}
+
+/* Ports removal routine */
+static void mvpp2_port_remove(struct mvpp2_port *port)
+{
+	int i;
+
+	unregister_netdev(port->dev);
+	free_percpu(port->pcpu);
+	free_percpu(port->stats);
+	for (i = 0; i < txq_number; i++)
+		free_percpu(port->txqs[i]->pcpu);
+	irq_dispose_mapping(port->irq);
+	free_netdev(port->dev);
+}
+
+/* Initialize decoding windows */
+static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
+				    struct mvpp2 *priv)
+{
+	u32 win_enable;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
+		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
+
+		if (i < 4)
+			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
+	}
+
+	win_enable = 0;
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		mvpp2_write(priv, MVPP2_WIN_BASE(i),
+			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
+			    dram->mbus_dram_target_id);
+
+		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
+			    (cs->size - 1) & 0xffff0000);
+
+		win_enable |= (1 << i);
+	}
+
+	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Initialize Rx FIFO's */
+static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
+{
+	int port;
+
+	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+			    MVPP2_RX_FIFO_PORT_DATA_SIZE);
+		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+			    MVPP2_RX_FIFO_PORT_ATTR_SIZE);
+	}
+
+	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+		    MVPP2_RX_FIFO_PORT_MIN_PKT);
+	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+/* Initialize network controller common part HW */
+static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+	const struct mbus_dram_target_info *dram_target_info;
+	int err, i;
+	u32 val;
+
+	/* Checks for hardware constraints */
+	if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+	    (txq_number > MVPP2_MAX_TXQ)) {
+		dev_err(&pdev->dev, "invalid queue size parameter\n");
+		return -EINVAL;
+	}
+
+	/* MBUS windows configuration */
+	dram_target_info = mv_mbus_dram_info();
+	if (dram_target_info)
+		mvpp2_conf_mbus_windows(dram_target_info, priv);
+
+	/* Disable HW PHY polling */
+	val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+	val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+	writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+
+	/* Allocate and initialize aggregated TXQs */
+	priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+				       sizeof(struct mvpp2_tx_queue),
+				       GFP_KERNEL);
+	if (!priv->aggr_txqs)
+		return -ENOMEM;
+
+	for_each_present_cpu(i) {
+		priv->aggr_txqs[i].id = i;
+		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
+		err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
+					  MVPP2_AGGR_TXQ_SIZE, i, priv);
+		if (err < 0)
+			return err;
+	}
+
+	/* Rx Fifo Init */
+	mvpp2_rx_fifo_init(priv);
+
+	/* Reset Rx queue group interrupt configuration */
+	for (i = 0; i < MVPP2_MAX_PORTS; i++)
+		mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+
+	writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+	       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+
+	/* Allow cache snoop when transmiting packets */
+	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
+
+	/* Buffer Manager initialization */
+	err = mvpp2_bm_init(pdev, priv);
+	if (err < 0)
+		return err;
+
+	/* Parser default initialization */
+	err = mvpp2_prs_default_init(pdev, priv);
+	if (err < 0)
+		return err;
+
+	/* Classifier default initialization */
+	mvpp2_cls_init(priv);
+
+	return 0;
+}
+
+static int mvpp2_probe(struct platform_device *pdev)
+{
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *port_node;
+	struct mvpp2 *priv;
+	struct resource *res;
+	int port_count, first_rxq;
+	int err;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->lms_base))
+		return PTR_ERR(priv->lms_base);
+
+	priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+	if (IS_ERR(priv->pp_clk))
+		return PTR_ERR(priv->pp_clk);
+	err = clk_prepare_enable(priv->pp_clk);
+	if (err < 0)
+		return err;
+
+	priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+	if (IS_ERR(priv->gop_clk)) {
+		err = PTR_ERR(priv->gop_clk);
+		goto err_pp_clk;
+	}
+	err = clk_prepare_enable(priv->gop_clk);
+	if (err < 0)
+		goto err_pp_clk;
+
+	/* Get system's tclk rate */
+	priv->tclk = clk_get_rate(priv->pp_clk);
+
+	/* Initialize network controller */
+	err = mvpp2_init(pdev, priv);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to initialize controller\n");
+		goto err_gop_clk;
+	}
+
+	port_count = of_get_available_child_count(dn);
+	if (port_count == 0) {
+		dev_err(&pdev->dev, "no ports enabled\n");
+		err = -ENODEV;
+		goto err_gop_clk;
+	}
+
+	priv->port_list = devm_kcalloc(&pdev->dev, port_count,
+				      sizeof(struct mvpp2_port *),
+				      GFP_KERNEL);
+	if (!priv->port_list) {
+		err = -ENOMEM;
+		goto err_gop_clk;
+	}
+
+	/* Initialize ports */
+	first_rxq = 0;
+	for_each_available_child_of_node(dn, port_node) {
+		err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+		if (err < 0)
+			goto err_gop_clk;
+	}
+
+	platform_set_drvdata(pdev, priv);
+	return 0;
+
+err_gop_clk:
+	clk_disable_unprepare(priv->gop_clk);
+err_pp_clk:
+	clk_disable_unprepare(priv->pp_clk);
+	return err;
+}
+
+static int mvpp2_remove(struct platform_device *pdev)
+{
+	struct mvpp2 *priv = platform_get_drvdata(pdev);
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *port_node;
+	int i = 0;
+
+	for_each_available_child_of_node(dn, port_node) {
+		if (priv->port_list[i])
+			mvpp2_port_remove(priv->port_list[i]);
+		i++;
+	}
+
+	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+		struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
+
+		mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
+	}
+
+	for_each_present_cpu(i) {
+		struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
+
+		dma_free_coherent(&pdev->dev,
+				  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+				  aggr_txq->descs,
+				  aggr_txq->descs_phys);
+	}
+
+	clk_disable_unprepare(priv->pp_clk);
+	clk_disable_unprepare(priv->gop_clk);
+
+	return 0;
+}
+
+static const struct of_device_id mvpp2_match[] = {
+	{ .compatible = "marvell,armada-375-pp2" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mvpp2_match);
+
+static struct platform_driver mvpp2_driver = {
+	.probe = mvpp2_probe,
+	.remove = mvpp2_remove,
+	.driver = {
+		.name = MVPP2_DRIVER_NAME,
+		.of_match_table = mvpp2_match,
+	},
+};
+
+module_platform_driver(mvpp2_driver);
+
+MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
new file mode 100644
index 0000000..7ace07d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -0,0 +1,1630 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ *		Sachin Sanap <ssanap@marvell.com>
+ *		Zhangfei Gao <zgao6@marvell.com>
+ *		Philip Rakity <prakity@marvell.com>
+ *		Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pxa168_eth.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#define DRIVER_NAME	"pxa168-eth"
+#define DRIVER_VERSION	"0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS		0x0000
+#define SMI			0x0010
+#define PORT_CONFIG		0x0400
+#define PORT_CONFIG_EXT		0x0408
+#define PORT_COMMAND		0x0410
+#define PORT_STATUS		0x0418
+#define HTPR			0x0428
+#define MAC_ADDR_LOW		0x0430
+#define MAC_ADDR_HIGH		0x0438
+#define SDMA_CONFIG		0x0440
+#define SDMA_CMD		0x0448
+#define INT_CAUSE		0x0450
+#define INT_W_CLEAR		0x0454
+#define INT_MASK		0x0458
+#define ETH_F_RX_DESC_0		0x0480
+#define ETH_C_RX_DESC_0		0x04A0
+#define ETH_C_TX_DESC_1		0x04E4
+
+/* smi register */
+#define SMI_BUSY		(1 << 28)	/* 0 - Write, 1 - Read  */
+#define SMI_R_VALID		(1 << 27)	/* 0 - Write, 1 - Read  */
+#define SMI_OP_W		(0 << 26)	/* Write operation      */
+#define SMI_OP_R		(1 << 26)	/* Read operation */
+
+#define PHY_WAIT_ITERATIONS	10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT	0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA	(1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT		(1 << 23)
+#define RX_FIRST_DESC		(1 << 17)
+#define RX_LAST_DESC		(1 << 16)
+#define RX_ERROR		(1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT		(1 << 23)
+#define TX_GEN_CRC		(1 << 22)
+#define TX_ZERO_PADDING		(1 << 18)
+#define TX_FIRST_DESC		(1 << 17)
+#define TX_LAST_DESC		(1 << 16)
+#define TX_ERROR		(1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT		(1 << 31)
+#define SDMA_CMD_TXDL		(1 << 24)
+#define SDMA_CMD_TXDH		(1 << 23)
+#define SDMA_CMD_AR		(1 << 15)
+#define SDMA_CMD_ERD		(1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_DUPLEX_FULL		(1 << 15)
+#define PCR_HS			(1 << 12)
+#define PCR_EN			(1 << 7)
+#define PCR_PM			(1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM		(1 << 28)
+#define PCXR_DSCP_EN		(1 << 21)
+#define PCXR_RMII_EN		(1 << 20)
+#define PCXR_AN_SPEED_DIS	(1 << 19)
+#define PCXR_SPEED_100		(1 << 18)
+#define PCXR_MFL_1518		(0 << 14)
+#define PCXR_MFL_1536		(1 << 14)
+#define PCXR_MFL_2048		(2 << 14)
+#define PCXR_MFL_64K		(3 << 14)
+#define PCXR_FLOWCTL_DIS	(1 << 12)
+#define PCXR_FLP		(1 << 11)
+#define PCXR_AN_FLOWCTL_DIS	(1 << 10)
+#define PCXR_AN_DUPLEX_DIS	(1 << 9)
+#define PCXR_PRIO_TX_OFF	3
+#define PCXR_TX_HIGH_PRI	(7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF		12
+#define SDCR_BSZ8		(3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4		(2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2		(1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1		(0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR		(1 << 6)
+#define SDCR_BLMT		(1 << 7)
+#define SDCR_RIFB		(1 << 9)
+#define SDCR_RC_OFF		2
+#define SDCR_RC_MAX_RETRANS	(0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF		(1 << 0)
+#define ICR_TXBUF_H		(1 << 2)
+#define ICR_TXBUF_L		(1 << 3)
+#define ICR_TXEND_H		(1 << 6)
+#define ICR_TXEND_L		(1 << 7)
+#define ICR_RXERR		(1 << 8)
+#define ICR_TXERR_H		(1 << 10)
+#define ICR_TXERR_L		(1 << 11)
+#define ICR_TX_UDR		(1 << 13)
+#define ICR_MII_CH		(1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
+				ICR_TXERR_H  | ICR_TXERR_L |\
+				ICR_TXEND_H  | ICR_TXEND_L |\
+				ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN		2	/* hw aligns IP header */
+
+#define NUM_RX_DESCS		64
+#define NUM_TX_DESCS		64
+
+#define HASH_ADD		0
+#define HASH_DELETE		1
+#define HASH_ADDR_TABLE_SIZE	0x4000	/* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER		12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100		(1 << 0)
+#define FULL_DUPLEX		(1 << 1)
+#define FLOW_CONTROL_DISABLED	(1 << 2)
+#define LINK_UP			(1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_TX_DONE		(1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+	u32 cmd_sts;		/* Descriptor command status            */
+	u16 byte_cnt;		/* Descriptor buffer byte count         */
+	u16 buf_size;		/* Buffer size                          */
+	u32 buf_ptr;		/* Descriptor buffer pointer            */
+	u32 next_desc_ptr;	/* Next descriptor pointer              */
+};
+
+struct tx_desc {
+	u32 cmd_sts;		/* Command/status field                 */
+	u16 reserved;
+	u16 byte_cnt;		/* buffer byte count                    */
+	u32 buf_ptr;		/* pointer to buffer for this descriptor */
+	u32 next_desc_ptr;	/* Pointer to next descriptor           */
+};
+
+struct pxa168_eth_private {
+	int port_num;		/* User Ethernet port number    */
+	int phy_addr;
+	int phy_speed;
+	int phy_duplex;
+	phy_interface_t phy_intf;
+
+	int rx_resource_err;	/* Rx ring resource error flag */
+
+	/* Next available and first returning Rx resource */
+	int rx_curr_desc_q, rx_used_desc_q;
+
+	/* Next available and first returning Tx resource */
+	int tx_curr_desc_q, tx_used_desc_q;
+
+	struct rx_desc *p_rx_desc_area;
+	dma_addr_t rx_desc_dma;
+	int rx_desc_area_size;
+	struct sk_buff **rx_skb;
+
+	struct tx_desc *p_tx_desc_area;
+	dma_addr_t tx_desc_dma;
+	int tx_desc_area_size;
+	struct sk_buff **tx_skb;
+
+	struct work_struct tx_timeout_task;
+
+	struct net_device *dev;
+	struct napi_struct napi;
+	u8 work_todo;
+	int skb_size;
+
+	/* Size of Tx Ring per queue */
+	int tx_ring_size;
+	/* Number of tx descriptors in use */
+	int tx_desc_count;
+	/* Size of Rx Ring per queue */
+	int rx_ring_size;
+	/* Number of rx descriptors in use */
+	int rx_desc_count;
+
+	/*
+	 * Used in case RX Ring is empty, which can occur when
+	 * system does not have resources (skb's)
+	 */
+	struct timer_list timeout;
+	struct mii_bus *smi_bus;
+	struct phy_device *phy;
+
+	/* clock */
+	struct clk *clk;
+	struct pxa168_eth_platform_data *pd;
+	/*
+	 * Ethernet controller base address.
+	 */
+	void __iomem *base;
+
+	/* Pointer to the hardware address filter table */
+	void *htpr;
+	dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+	__le32 lo;
+	__le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+	HASH_ENTRY_VALID = 1,
+	SKIP = 2,
+	HASH_ENTRY_RECEIVE_DISCARD = 4,
+	HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static int pxa168_init_phy(struct net_device *dev);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+	return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+	writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+	int delay;
+	int max_retries = 40;
+
+	do {
+		wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+		udelay(100);
+
+		delay = 10;
+		while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+		       && delay-- > 0) {
+			udelay(10);
+		}
+	} while (max_retries-- > 0 && delay <= 0);
+
+	if (max_retries <= 0)
+		netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct rx_desc *p_used_rx_desc;
+	int used_rx_desc;
+
+	while (pep->rx_desc_count < pep->rx_ring_size) {
+		int size;
+
+		skb = netdev_alloc_skb(dev, pep->skb_size);
+		if (!skb)
+			break;
+		if (SKB_DMA_REALIGN)
+			skb_reserve(skb, SKB_DMA_REALIGN);
+		pep->rx_desc_count++;
+		/* Get 'used' Rx descriptor */
+		used_rx_desc = pep->rx_used_desc_q;
+		p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+		size = skb_end_pointer(skb) - skb->data;
+		p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+							 skb->data,
+							 size,
+							 DMA_FROM_DEVICE);
+		p_used_rx_desc->buf_size = size;
+		pep->rx_skb[used_rx_desc] = skb;
+
+		/* Return the descriptor to DMA ownership */
+		wmb();
+		p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+		wmb();
+
+		/* Move the used descriptor pointer to the next descriptor */
+		pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+		/* Any Rx return cancels the Rx resource error status */
+		pep->rx_resource_err = 0;
+
+		skb_reserve(skb, ETH_HW_IP_ALIGN);
+	}
+
+	/*
+	 * If RX ring is empty of SKB, set a timer to try allocating
+	 * again at a later time.
+	 */
+	if (pep->rx_desc_count == 0) {
+		pep->timeout.expires = jiffies + (HZ / 10);
+		add_timer(&pep->timeout);
+	}
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+	struct pxa168_eth_private *pep = (void *)data;
+	napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+	return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+	    | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+	    | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+	    | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+	int i;
+	for (i = 0; i < ETH_ALEN; i++) {
+		mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+				((mac_addr[i] & 0xf0) >> 4);
+	}
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+	int i;
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig    - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+	u32 hash_result;
+	u32 addr0;
+	u32 addr1;
+	u32 addr2;
+	u32 addr3;
+	unsigned char mac_addr[ETH_ALEN];
+
+	/* Make a copy of MAC address since we are going to performe bit
+	 * operations on it
+	 */
+	memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+	nibble_swap_every_byte(mac_addr);
+	inverse_every_nibble(mac_addr);
+
+	addr0 = (mac_addr[5] >> 2) & 0x3f;
+	addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+	addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+	addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+	hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+	hash_result = hash_result & 0x07ff;
+	return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ *	  part of chain in the hash table.We can't just delete the entry since
+ *	  that will break the chain.We need to defragment the tables time to
+ *	  time.
+ * rd   - 0 Discard packet upon match.
+ *	- 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+			      unsigned char *mac_addr,
+			      u32 rd, u32 skip, int del)
+{
+	struct addr_table_entry *entry, *start;
+	u32 new_high;
+	u32 new_low;
+	u32 i;
+
+	new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+	    | (((mac_addr[1] >> 0) & 0xf) << 11)
+	    | (((mac_addr[0] >> 4) & 0xf) << 7)
+	    | (((mac_addr[0] >> 0) & 0xf) << 3)
+	    | (((mac_addr[3] >> 4) & 0x1) << 31)
+	    | (((mac_addr[3] >> 0) & 0xf) << 27)
+	    | (((mac_addr[2] >> 4) & 0xf) << 23)
+	    | (((mac_addr[2] >> 0) & 0xf) << 19)
+	    | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+	    | HASH_ENTRY_VALID;
+
+	new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+	    | (((mac_addr[5] >> 0) & 0xf) << 11)
+	    | (((mac_addr[4] >> 4) & 0xf) << 7)
+	    | (((mac_addr[4] >> 0) & 0xf) << 3)
+	    | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+	/*
+	 * Pick the appropriate table, start scanning for free/reusable
+	 * entries at the index obtained by hashing the specified MAC address
+	 */
+	start = pep->htpr;
+	entry = start + hash_function(mac_addr);
+	for (i = 0; i < HOP_NUMBER; i++) {
+		if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+			break;
+		} else {
+			/* if same address put in same position */
+			if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+				(new_low & 0xfffffff8)) &&
+				(le32_to_cpu(entry->hi) == new_high)) {
+				break;
+			}
+		}
+		if (entry == start + 0x7ff)
+			entry = start;
+		else
+			entry++;
+	}
+
+	if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+	    (le32_to_cpu(entry->hi) != new_high) && del)
+		return 0;
+
+	if (i == HOP_NUMBER) {
+		if (!del) {
+			netdev_info(pep->dev,
+				    "%s: table section is full, need to "
+				    "move to 16kB implementation?\n",
+				    __FILE__);
+			return -ENOSPC;
+		} else
+			return 0;
+	}
+
+	/*
+	 * Update the selected entry
+	 */
+	if (del) {
+		entry->hi = 0;
+		entry->lo = 0;
+	} else {
+		entry->hi = cpu_to_le32(new_high);
+		entry->lo = cpu_to_le32(new_low);
+	}
+
+	return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ *  Create an addressTable entry from MAC address info
+ *  found in the specifed net_device struct
+ *
+ *  Input : pointer to ethernet interface network device structure
+ *  Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+					  unsigned char *oaddr,
+					  unsigned char *addr)
+{
+	/* Delete old entry */
+	if (oaddr)
+		add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+	/* Add new entry */
+	add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+	/*
+	 * Hardware expects CPU to build a hash table based on a predefined
+	 * hash function and populate it based on hardware address. The
+	 * location of the hash table is identified by 32-bit pointer stored
+	 * in HTPR internal register. Two possible sizes exists for the hash
+	 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+	 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+	 * 1/2kB.
+	 */
+	/* TODO: Add support for 8kB hash table and alternative hash
+	 * function.Driver can dynamically switch to them if the 1/2kB hash
+	 * table is full.
+	 */
+	if (pep->htpr == NULL) {
+		pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
+						HASH_ADDR_TABLE_SIZE,
+						&pep->htpr_dma, GFP_KERNEL);
+		if (pep->htpr == NULL)
+			return -ENOMEM;
+	} else {
+		memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+	}
+	wrl(pep, HTPR, pep->htpr_dma);
+	return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct netdev_hw_addr *ha;
+	u32 val;
+
+	val = rdl(pep, PORT_CONFIG);
+	if (dev->flags & IFF_PROMISC)
+		val |= PCR_PM;
+	else
+		val &= ~PCR_PM;
+	wrl(pep, PORT_CONFIG, val);
+
+	/*
+	 * Remove the old list of MAC address and add dev->addr
+	 * and multicast address.
+	 */
+	memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+	update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+	netdev_for_each_mc_addr(ha, dev)
+		update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static void pxa168_eth_get_mac_address(struct net_device *dev,
+				       unsigned char *addr)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
+	unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
+
+	addr[0] = (mac_h >> 24) & 0xff;
+	addr[1] = (mac_h >> 16) & 0xff;
+	addr[2] = (mac_h >> 8) & 0xff;
+	addr[3] = mac_h & 0xff;
+	addr[4] = (mac_l >> 8) & 0xff;
+	addr[5] = mac_l & 0xff;
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *sa = addr;
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	unsigned char oldMac[ETH_ALEN];
+	u32 mac_h, mac_l;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+
+	mac_h = dev->dev_addr[0] << 24;
+	mac_h |= dev->dev_addr[1] << 16;
+	mac_h |= dev->dev_addr[2] << 8;
+	mac_h |= dev->dev_addr[3];
+	mac_l = dev->dev_addr[4] << 8;
+	mac_l |= dev->dev_addr[5];
+	wrl(pep, MAC_ADDR_HIGH, mac_h);
+	wrl(pep, MAC_ADDR_LOW, mac_l);
+
+	netif_addr_lock_bh(dev);
+	update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+	netif_addr_unlock_bh(dev);
+	return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+	unsigned int val = 0;
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	int tx_curr_desc, rx_curr_desc;
+
+	phy_start(pep->phy);
+
+	/* Assignment of Tx CTRP of given queue */
+	tx_curr_desc = pep->tx_curr_desc_q;
+	wrl(pep, ETH_C_TX_DESC_1,
+	    (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+	/* Assignment of Rx CRDP of given queue */
+	rx_curr_desc = pep->rx_curr_desc_q;
+	wrl(pep, ETH_C_RX_DESC_0,
+	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+	wrl(pep, ETH_F_RX_DESC_0,
+	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+	/* Clear all interrupts */
+	wrl(pep, INT_CAUSE, 0);
+
+	/* Enable all interrupts for receive, transmit and error. */
+	wrl(pep, INT_MASK, ALL_INTS);
+
+	val = rdl(pep, PORT_CONFIG);
+	val |= PCR_EN;
+	wrl(pep, PORT_CONFIG, val);
+
+	/* Start RX DMA engine */
+	val = rdl(pep, SDMA_CMD);
+	val |= SDMA_CMD_ERD;
+	wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	unsigned int val = 0;
+
+	/* Stop all interrupts for receive, transmit and error. */
+	wrl(pep, INT_MASK, 0);
+
+	/* Clear all interrupts */
+	wrl(pep, INT_CAUSE, 0);
+
+	/* Stop RX DMA */
+	val = rdl(pep, SDMA_CMD);
+	val &= ~SDMA_CMD_ERD;	/* abort dma command */
+
+	/* Abort any transmit and receive operations and put DMA
+	 * in idle state.
+	 */
+	abort_dma(pep);
+
+	/* Disable port */
+	val = rdl(pep, PORT_CONFIG);
+	val &= ~PCR_EN;
+	wrl(pep, PORT_CONFIG, val);
+
+	phy_stop(pep->phy);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct tx_desc *desc;
+	u32 cmd_sts;
+	struct sk_buff *skb;
+	int tx_index;
+	dma_addr_t addr;
+	int count;
+	int released = 0;
+
+	netif_tx_lock(dev);
+
+	pep->work_todo &= ~WORK_TX_DONE;
+	while (pep->tx_desc_count > 0) {
+		tx_index = pep->tx_used_desc_q;
+		desc = &pep->p_tx_desc_area[tx_index];
+		cmd_sts = desc->cmd_sts;
+		if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+			if (released > 0) {
+				goto txq_reclaim_end;
+			} else {
+				released = -1;
+				goto txq_reclaim_end;
+			}
+		}
+		pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+		pep->tx_desc_count--;
+		addr = desc->buf_ptr;
+		count = desc->byte_cnt;
+		skb = pep->tx_skb[tx_index];
+		if (skb)
+			pep->tx_skb[tx_index] = NULL;
+
+		if (cmd_sts & TX_ERROR) {
+			if (net_ratelimit())
+				netdev_err(dev, "Error in TX\n");
+			dev->stats.tx_errors++;
+		}
+		dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+		if (skb)
+			dev_kfree_skb_irq(skb);
+		released++;
+	}
+txq_reclaim_end:
+	netif_tx_unlock(dev);
+	return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+
+	netdev_info(dev, "TX timeout  desc_count %d\n", pep->tx_desc_count);
+
+	schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+	struct pxa168_eth_private *pep = container_of(work,
+						 struct pxa168_eth_private,
+						 tx_timeout_task);
+	struct net_device *dev = pep->dev;
+	pxa168_eth_stop(dev);
+	pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	unsigned int received_packets = 0;
+	struct sk_buff *skb;
+
+	while (budget-- > 0) {
+		int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+		struct rx_desc *rx_desc;
+		unsigned int cmd_sts;
+
+		/* Do not process Rx ring in case of Rx ring resource error */
+		if (pep->rx_resource_err)
+			break;
+		rx_curr_desc = pep->rx_curr_desc_q;
+		rx_used_desc = pep->rx_used_desc_q;
+		rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+		cmd_sts = rx_desc->cmd_sts;
+		rmb();
+		if (cmd_sts & (BUF_OWNED_BY_DMA))
+			break;
+		skb = pep->rx_skb[rx_curr_desc];
+		pep->rx_skb[rx_curr_desc] = NULL;
+
+		rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+		pep->rx_curr_desc_q = rx_next_curr_desc;
+
+		/* Rx descriptors exhausted. */
+		/* Set the Rx ring resource error flag */
+		if (rx_next_curr_desc == rx_used_desc)
+			pep->rx_resource_err = 1;
+		pep->rx_desc_count--;
+		dma_unmap_single(NULL, rx_desc->buf_ptr,
+				 rx_desc->buf_size,
+				 DMA_FROM_DEVICE);
+		received_packets++;
+		/*
+		 * Update statistics.
+		 * Note byte count includes 4 byte CRC count
+		 */
+		stats->rx_packets++;
+		stats->rx_bytes += rx_desc->byte_cnt;
+		/*
+		 * In case received a packet without first / last bits on OR
+		 * the error summary bit is on, the packets needs to be droped.
+		 */
+		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+		     (RX_FIRST_DESC | RX_LAST_DESC))
+		    || (cmd_sts & RX_ERROR)) {
+
+			stats->rx_dropped++;
+			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+			    (RX_FIRST_DESC | RX_LAST_DESC)) {
+				if (net_ratelimit())
+					netdev_err(dev,
+						   "Rx pkt on multiple desc\n");
+			}
+			if (cmd_sts & RX_ERROR)
+				stats->rx_errors++;
+			dev_kfree_skb_irq(skb);
+		} else {
+			/*
+			 * The -4 is for the CRC in the trailer of the
+			 * received packet
+			 */
+			skb_put(skb, rx_desc->byte_cnt - 4);
+			skb->protocol = eth_type_trans(skb, dev);
+			netif_receive_skb(skb);
+		}
+	}
+	/* Fill RX ring with skb's */
+	rxq_refill(dev);
+	return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+				     struct net_device *dev)
+{
+	u32 icr;
+	int ret = 0;
+
+	icr = rdl(pep, INT_CAUSE);
+	if (icr == 0)
+		return IRQ_NONE;
+
+	wrl(pep, INT_CAUSE, ~icr);
+	if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+		pep->work_todo |= WORK_TX_DONE;
+		ret = 1;
+	}
+	if (icr & ICR_RXBUF)
+		ret = 1;
+	return ret;
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+
+	if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+		return IRQ_NONE;
+	/* Disable interrupts */
+	wrl(pep, INT_MASK, 0);
+	napi_schedule(&pep->napi);
+	return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+	int skb_size;
+
+	/*
+	 * Reserve 2+14 bytes for an ethernet header (the hardware
+	 * automatically prepends 2 bytes of dummy data to each
+	 * received packet), 16 bytes for up to four VLAN tags, and
+	 * 4 bytes for the trailing FCS -- 36 bytes total.
+	 */
+	skb_size = pep->dev->mtu + 36;
+
+	/*
+	 * Make sure that the skb size is a multiple of 8 bytes, as
+	 * the lower three bits of the receive descriptor's buffer
+	 * size field are ignored by the hardware.
+	 */
+	pep->skb_size = (skb_size + 7) & ~7;
+
+	/*
+	 * If NET_SKB_PAD is smaller than a cache line,
+	 * netdev_alloc_skb() will cause skb->data to be misaligned
+	 * to a cache line boundary.  If this is the case, include
+	 * some extra space to allow re-aligning the data area.
+	 */
+	pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+	int skb_size;
+
+	pxa168_eth_recalc_skb_size(pep);
+	if  (pep->skb_size <= 1518)
+		skb_size = PCXR_MFL_1518;
+	else if (pep->skb_size <= 1536)
+		skb_size = PCXR_MFL_1536;
+	else if (pep->skb_size <= 2048)
+		skb_size = PCXR_MFL_2048;
+	else
+		skb_size = PCXR_MFL_64K;
+
+	/* Extended Port Configuration */
+	wrl(pep, PORT_CONFIG_EXT,
+	    PCXR_AN_SPEED_DIS |		 /* Disable HW AN */
+	    PCXR_AN_DUPLEX_DIS |
+	    PCXR_AN_FLOWCTL_DIS |
+	    PCXR_2BSM |			 /* Two byte prefix aligns IP hdr */
+	    PCXR_DSCP_EN |		 /* Enable DSCP in IP */
+	    skb_size | PCXR_FLP |	 /* do not force link pass */
+	    PCXR_TX_HIGH_PRI);		 /* Transmit - high priority queue */
+
+	return 0;
+}
+
+static void pxa168_eth_adjust_link(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct phy_device *phy = pep->phy;
+	u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
+	u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
+
+	cfg = cfg_o & ~PCR_DUPLEX_FULL;
+	cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
+
+	if (phy->interface == PHY_INTERFACE_MODE_RMII)
+		cfgext |= PCXR_RMII_EN;
+	if (phy->speed == SPEED_100)
+		cfgext |= PCXR_SPEED_100;
+	if (phy->duplex)
+		cfg |= PCR_DUPLEX_FULL;
+	if (!phy->pause)
+		cfgext |= PCXR_FLOWCTL_DIS;
+
+	/* Bail out if there has nothing changed */
+	if (cfg == cfg_o && cfgext == cfgext_o)
+		return;
+
+	wrl(pep, PORT_CONFIG, cfg);
+	wrl(pep, PORT_CONFIG_EXT, cfgext);
+
+	phy_print_status(phy);
+}
+
+static int pxa168_init_phy(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct ethtool_cmd cmd;
+	int err;
+
+	if (pep->phy)
+		return 0;
+
+	pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+	if (!pep->phy)
+		return -ENODEV;
+
+	err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
+				 pep->phy_intf);
+	if (err)
+		return err;
+
+	err = pxa168_get_settings(dev, &cmd);
+	if (err)
+		return err;
+
+	cmd.phy_address = pep->phy_addr;
+	cmd.speed = pep->phy_speed;
+	cmd.duplex = pep->phy_duplex;
+	cmd.advertising = PHY_BASIC_FEATURES;
+	cmd.autoneg = AUTONEG_ENABLE;
+
+	if (cmd.speed != 0)
+		cmd.autoneg = AUTONEG_DISABLE;
+
+	return pxa168_set_settings(dev, &cmd);
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+	int err = 0;
+
+	/* Disable interrupts */
+	wrl(pep, INT_MASK, 0);
+	wrl(pep, INT_CAUSE, 0);
+	/* Write to ICR to clear interrupts. */
+	wrl(pep, INT_W_CLEAR, 0);
+	/* Abort any transmit and receive operations and put DMA
+	 * in idle state.
+	 */
+	abort_dma(pep);
+	/* Initialize address hash table */
+	err = init_hash_table(pep);
+	if (err)
+		return err;
+	/* SDMA configuration */
+	wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |	/* Burst size = 32 bytes */
+	    SDCR_RIFB |				/* Rx interrupt on frame */
+	    SDCR_BLMT |				/* Little endian transmit */
+	    SDCR_BLMR |				/* Little endian receive */
+	    SDCR_RC_MAX_RETRANS);		/* Max retransmit count */
+	/* Port Configuration */
+	wrl(pep, PORT_CONFIG, PCR_HS);		/* Hash size is 1/2kb */
+	set_port_config_ext(pep);
+
+	return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct rx_desc *p_rx_desc;
+	int size = 0, i = 0;
+	int rx_desc_num = pep->rx_ring_size;
+
+	/* Allocate RX skb rings */
+	pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+			     GFP_KERNEL);
+	if (!pep->rx_skb)
+		return -ENOMEM;
+
+	/* Allocate RX ring */
+	pep->rx_desc_count = 0;
+	size = pep->rx_ring_size * sizeof(struct rx_desc);
+	pep->rx_desc_area_size = size;
+	pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+						  &pep->rx_desc_dma,
+						  GFP_KERNEL);
+	if (!pep->p_rx_desc_area)
+		goto out;
+
+	/* initialize the next_desc_ptr links in the Rx descriptors ring */
+	p_rx_desc = pep->p_rx_desc_area;
+	for (i = 0; i < rx_desc_num; i++) {
+		p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+		    ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+	}
+	/* Save Rx desc pointer to driver struct. */
+	pep->rx_curr_desc_q = 0;
+	pep->rx_used_desc_q = 0;
+	pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+	return 0;
+out:
+	kfree(pep->rx_skb);
+	return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	int curr;
+
+	/* Free preallocated skb's on RX rings */
+	for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+		if (pep->rx_skb[curr]) {
+			dev_kfree_skb(pep->rx_skb[curr]);
+			pep->rx_desc_count--;
+		}
+	}
+	if (pep->rx_desc_count)
+		netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
+			   pep->rx_desc_count);
+	/* Free RX ring */
+	if (pep->p_rx_desc_area)
+		dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+				  pep->p_rx_desc_area, pep->rx_desc_dma);
+	kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct tx_desc *p_tx_desc;
+	int size = 0, i = 0;
+	int tx_desc_num = pep->tx_ring_size;
+
+	pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+			     GFP_KERNEL);
+	if (!pep->tx_skb)
+		return -ENOMEM;
+
+	/* Allocate TX ring */
+	pep->tx_desc_count = 0;
+	size = pep->tx_ring_size * sizeof(struct tx_desc);
+	pep->tx_desc_area_size = size;
+	pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+						  &pep->tx_desc_dma,
+						  GFP_KERNEL);
+	if (!pep->p_tx_desc_area)
+		goto out;
+	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
+	p_tx_desc = pep->p_tx_desc_area;
+	for (i = 0; i < tx_desc_num; i++) {
+		p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+		    ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+	}
+	pep->tx_curr_desc_q = 0;
+	pep->tx_used_desc_q = 0;
+	pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+	return 0;
+out:
+	kfree(pep->tx_skb);
+	return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+
+	/* Free outstanding skb's on TX ring */
+	txq_reclaim(dev, 1);
+	BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+	/* Free TX ring */
+	if (pep->p_tx_desc_area)
+		dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+				  pep->p_tx_desc_area, pep->tx_desc_dma);
+	kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	int err;
+
+	err = pxa168_init_phy(dev);
+	if (err)
+		return err;
+
+	err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
+	if (err) {
+		dev_err(&dev->dev, "can't assign irq\n");
+		return -EAGAIN;
+	}
+	pep->rx_resource_err = 0;
+	err = rxq_init(dev);
+	if (err != 0)
+		goto out_free_irq;
+	err = txq_init(dev);
+	if (err != 0)
+		goto out_free_rx_skb;
+	pep->rx_used_desc_q = 0;
+	pep->rx_curr_desc_q = 0;
+
+	/* Fill RX ring with skb's */
+	rxq_refill(dev);
+	pep->rx_used_desc_q = 0;
+	pep->rx_curr_desc_q = 0;
+	netif_carrier_off(dev);
+	napi_enable(&pep->napi);
+	eth_port_start(dev);
+	return 0;
+out_free_rx_skb:
+	rxq_deinit(dev);
+out_free_irq:
+	free_irq(dev->irq, dev);
+	return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	eth_port_reset(dev);
+
+	/* Disable interrupts */
+	wrl(pep, INT_MASK, 0);
+	wrl(pep, INT_CAUSE, 0);
+	/* Write to ICR to clear interrupts. */
+	wrl(pep, INT_W_CLEAR, 0);
+	napi_disable(&pep->napi);
+	del_timer_sync(&pep->timeout);
+	netif_carrier_off(dev);
+	free_irq(dev->irq, dev);
+	rxq_deinit(dev);
+	txq_deinit(dev);
+
+	return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+	int retval;
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+
+	if ((mtu > 9500) || (mtu < 68))
+		return -EINVAL;
+
+	dev->mtu = mtu;
+	retval = set_port_config_ext(pep);
+
+	if (!netif_running(dev))
+		return 0;
+
+	/*
+	 * Stop and then re-open the interface. This will allocate RX
+	 * skbs of the new MTU.
+	 * There is a possible danger that the open will not succeed,
+	 * due to memory being full.
+	 */
+	pxa168_eth_stop(dev);
+	if (pxa168_eth_open(dev)) {
+		dev_err(&dev->dev,
+			"fatal error on re-opening device after MTU change\n");
+	}
+
+	return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+	int tx_desc_curr;
+
+	tx_desc_curr = pep->tx_curr_desc_q;
+	pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+	BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+	pep->tx_desc_count++;
+
+	return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct pxa168_eth_private *pep =
+	    container_of(napi, struct pxa168_eth_private, napi);
+	struct net_device *dev = pep->dev;
+	int work_done = 0;
+
+	/*
+	 * We call txq_reclaim every time since in NAPI interupts are disabled
+	 * and due to this we miss the TX_DONE interrupt,which is not updated in
+	 * interrupt status register.
+	 */
+	txq_reclaim(dev, 0);
+	if (netif_queue_stopped(dev)
+	    && pep->tx_ring_size - pep->tx_desc_count > 1) {
+		netif_wake_queue(dev);
+	}
+	work_done = rxq_process(dev, budget);
+	if (work_done < budget) {
+		napi_complete(napi);
+		wrl(pep, INT_MASK, ALL_INTS);
+	}
+
+	return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct tx_desc *desc;
+	int tx_index;
+	int length;
+
+	tx_index = eth_alloc_tx_desc_index(pep);
+	desc = &pep->p_tx_desc_area[tx_index];
+	length = skb->len;
+	pep->tx_skb[tx_index] = skb;
+	desc->byte_cnt = length;
+	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+
+	skb_tx_timestamp(skb);
+
+	wmb();
+	desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+			TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+	wmb();
+	wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+	stats->tx_bytes += length;
+	stats->tx_packets++;
+	dev->trans_start = jiffies;
+	if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+		/* We handled the current skb, but now we are out of space.*/
+		netif_stop_queue(dev);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+	int i = 0;
+
+	/* wait for the SMI register to become available */
+	for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+		if (i == PHY_WAIT_ITERATIONS)
+			return -ETIMEDOUT;
+		msleep(10);
+	}
+
+	return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+	struct pxa168_eth_private *pep = bus->priv;
+	int i = 0;
+	int val;
+
+	if (smi_wait_ready(pep)) {
+		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
+	}
+	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+	/* now wait for the data to be valid */
+	for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+		if (i == PHY_WAIT_ITERATIONS) {
+			netdev_warn(pep->dev,
+				    "pxa168_eth: SMI bus read not valid\n");
+			return -ENODEV;
+		}
+		msleep(10);
+	}
+
+	return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+			    u16 value)
+{
+	struct pxa168_eth_private *pep = bus->priv;
+
+	if (smi_wait_ready(pep)) {
+		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+	    SMI_OP_W | (value & 0xffff));
+
+	if (smi_wait_ready(pep)) {
+		netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+			       int cmd)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	if (pep->phy != NULL)
+		return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+	return -EOPNOTSUPP;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+	int err;
+
+	err = phy_read_status(pep->phy);
+	if (err == 0)
+		err = phy_ethtool_gset(pep->phy, cmd);
+
+	return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+
+	return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+	.get_settings	= pxa168_get_settings,
+	.set_settings	= pxa168_set_settings,
+	.get_drvinfo	= pxa168_get_drvinfo,
+	.get_link	= ethtool_op_get_link,
+	.get_ts_info	= ethtool_op_get_ts_info,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+	.ndo_open		= pxa168_eth_open,
+	.ndo_stop		= pxa168_eth_stop,
+	.ndo_start_xmit		= pxa168_eth_start_xmit,
+	.ndo_set_rx_mode	= pxa168_eth_set_rx_mode,
+	.ndo_set_mac_address	= pxa168_eth_set_mac_address,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= pxa168_eth_do_ioctl,
+	.ndo_change_mtu		= pxa168_eth_change_mtu,
+	.ndo_tx_timeout		= pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+	struct pxa168_eth_private *pep = NULL;
+	struct net_device *dev = NULL;
+	struct resource *res;
+	struct clk *clk;
+	struct device_node *np;
+	const unsigned char *mac_addr = NULL;
+	int err;
+
+	printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
+		return -ENODEV;
+	}
+	clk_prepare_enable(clk);
+
+	dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_clk;
+	}
+
+	platform_set_drvdata(pdev, dev);
+	pep = netdev_priv(dev);
+	pep->dev = dev;
+	pep->clk = clk;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pep->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pep->base)) {
+		err = -ENOMEM;
+		goto err_netdev;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	BUG_ON(!res);
+	dev->irq = res->start;
+	dev->netdev_ops = &pxa168_eth_netdev_ops;
+	dev->watchdog_timeo = 2 * HZ;
+	dev->base_addr = 0;
+	dev->ethtool_ops = &pxa168_ethtool_ops;
+
+	INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+	if (pdev->dev.of_node)
+		mac_addr = of_get_mac_address(pdev->dev.of_node);
+
+	if (mac_addr && is_valid_ether_addr(mac_addr)) {
+		ether_addr_copy(dev->dev_addr, mac_addr);
+	} else {
+		/* try reading the mac address, if set by the bootloader */
+		pxa168_eth_get_mac_address(dev, dev->dev_addr);
+		if (!is_valid_ether_addr(dev->dev_addr)) {
+			dev_info(&pdev->dev, "Using random mac address\n");
+			eth_hw_addr_random(dev);
+		}
+	}
+
+	pep->rx_ring_size = NUM_RX_DESCS;
+	pep->tx_ring_size = NUM_TX_DESCS;
+
+	pep->pd = dev_get_platdata(&pdev->dev);
+	if (pep->pd) {
+		if (pep->pd->rx_queue_size)
+			pep->rx_ring_size = pep->pd->rx_queue_size;
+
+		if (pep->pd->tx_queue_size)
+			pep->tx_ring_size = pep->pd->tx_queue_size;
+
+		pep->port_num = pep->pd->port_number;
+		pep->phy_addr = pep->pd->phy_addr;
+		pep->phy_speed = pep->pd->speed;
+		pep->phy_duplex = pep->pd->duplex;
+		pep->phy_intf = pep->pd->intf;
+
+		if (pep->pd->init)
+			pep->pd->init();
+	} else if (pdev->dev.of_node) {
+		of_property_read_u32(pdev->dev.of_node, "port-id",
+				     &pep->port_num);
+
+		np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+		if (!np) {
+			dev_err(&pdev->dev, "missing phy-handle\n");
+			err = -EINVAL;
+			goto err_netdev;
+		}
+		of_property_read_u32(np, "reg", &pep->phy_addr);
+		pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+	}
+
+	/* Hardware supports only 3 ports */
+	BUG_ON(pep->port_num > 2);
+	netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+	memset(&pep->timeout, 0, sizeof(struct timer_list));
+	init_timer(&pep->timeout);
+	pep->timeout.function = rxq_refill_timer_wrapper;
+	pep->timeout.data = (unsigned long)pep;
+
+	pep->smi_bus = mdiobus_alloc();
+	if (pep->smi_bus == NULL) {
+		err = -ENOMEM;
+		goto err_netdev;
+	}
+	pep->smi_bus->priv = pep;
+	pep->smi_bus->name = "pxa168_eth smi";
+	pep->smi_bus->read = pxa168_smi_read;
+	pep->smi_bus->write = pxa168_smi_write;
+	snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+		pdev->name, pdev->id);
+	pep->smi_bus->parent = &pdev->dev;
+	pep->smi_bus->phy_mask = 0xffffffff;
+	err = mdiobus_register(pep->smi_bus);
+	if (err)
+		goto err_free_mdio;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	pxa168_init_hw(pep);
+	err = register_netdev(dev);
+	if (err)
+		goto err_mdiobus;
+	return 0;
+
+err_mdiobus:
+	mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+	mdiobus_free(pep->smi_bus);
+err_netdev:
+	free_netdev(dev);
+err_clk:
+	clk_disable_unprepare(clk);
+	return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct pxa168_eth_private *pep = netdev_priv(dev);
+
+	if (pep->htpr) {
+		dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+				  pep->htpr, pep->htpr_dma);
+		pep->htpr = NULL;
+	}
+	if (pep->phy)
+		phy_disconnect(pep->phy);
+	if (pep->clk) {
+		clk_disable_unprepare(pep->clk);
+	}
+
+	mdiobus_unregister(pep->smi_bus);
+	mdiobus_free(pep->smi_bus);
+	unregister_netdev(dev);
+	cancel_work_sync(&pep->tx_timeout_task);
+	free_netdev(dev);
+	return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+	return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static const struct of_device_id pxa168_eth_of_match[] = {
+	{ .compatible = "marvell,pxa168-eth" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
+
+static struct platform_driver pxa168_eth_driver = {
+	.probe = pxa168_eth_probe,
+	.remove = pxa168_eth_remove,
+	.shutdown = pxa168_eth_shutdown,
+	.resume = pxa168_eth_resume,
+	.suspend = pxa168_eth_suspend,
+	.driver = {
+		.name		= DRIVER_NAME,
+		.of_match_table	= of_match_ptr(pxa168_eth_of_match),
+	},
+};
+
+module_platform_driver(pxa168_eth_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
new file mode 100644
index 0000000..7173836
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -0,0 +1,4226 @@
+/*
+ * New driver for Marvell Yukon chipset and SysKonnect Gigabit
+ * Ethernet adapters. Based on earlier sk98lin, e100 and
+ * FreeBSD if_sk drivers.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/mii.h>
+#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/prefetch.h>
+#include <asm/irq.h>
+
+#include "skge.h"
+
+#define DRV_NAME		"skge"
+#define DRV_VERSION		"1.14"
+
+#define DEFAULT_TX_RING_SIZE	128
+#define DEFAULT_RX_RING_SIZE	512
+#define MAX_TX_RING_SIZE	1024
+#define TX_LOW_WATER		(MAX_SKB_FRAGS + 1)
+#define MAX_RX_RING_SIZE	4096
+#define RX_COPY_THRESHOLD	128
+#define RX_BUF_SIZE		1536
+#define PHY_RETRIES	        1000
+#define ETH_JUMBO_MTU		9000
+#define TX_WATCHDOG		(5 * HZ)
+#define NAPI_WEIGHT		64
+#define BLINK_MS		250
+#define LINK_HZ			HZ
+
+#define SKGE_EEPROM_MAGIC	0x9933aabb
+
+
+MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+				NETIF_MSG_LINK | NETIF_MSG_IFUP |
+				NETIF_MSG_IFDOWN);
+
+static int debug = -1;	/* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static const struct pci_device_id skge_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) },	  /* 3Com 3C940 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) },	  /* 3Com 3C940B */
+#ifdef CONFIG_SKGE_GENESIS
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
+#endif
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },	  /* D-Link DGE-530T (rev.B) */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) },	  /* D-Link DGE-530T */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) },	  /* D-Link DGE-530T Rev C1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },	  /* Marvell Yukon 88E8001/8003/8010 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) },	  /* Belkin */
+	{ PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, 	  /* CNet PowerG-2000 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) },	  /* Linksys EG1064 v2 */
+	{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, skge_id_table);
+
+static int skge_up(struct net_device *dev);
+static int skge_down(struct net_device *dev);
+static void skge_phy_reset(struct skge_port *skge);
+static void skge_tx_clean(struct net_device *dev);
+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
+static void genesis_get_stats(struct skge_port *skge, u64 *data);
+static void yukon_get_stats(struct skge_port *skge, u64 *data);
+static void yukon_init(struct skge_hw *hw, int port);
+static void genesis_mac_init(struct skge_hw *hw, int port);
+static void genesis_link_up(struct skge_port *skge);
+static void skge_set_multicast(struct net_device *dev);
+static irqreturn_t skge_intr(int irq, void *dev_id);
+
+/* Avoid conditionals by using array */
+static const int txqaddr[] = { Q_XA1, Q_XA2 };
+static const int rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
+static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
+static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
+
+static inline bool is_genesis(const struct skge_hw *hw)
+{
+#ifdef CONFIG_SKGE_GENESIS
+	return hw->chip_id == CHIP_ID_GENESIS;
+#else
+	return false;
+#endif
+}
+
+static int skge_get_regs_len(struct net_device *dev)
+{
+	return 0x4000;
+}
+
+/*
+ * Returns copy of whole control register region
+ * Note: skip RAM address register because accessing it will
+ * 	 cause bus hangs!
+ */
+static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			  void *p)
+{
+	const struct skge_port *skge = netdev_priv(dev);
+	const void __iomem *io = skge->hw->regs;
+
+	regs->version = 1;
+	memset(p, 0, regs->len);
+	memcpy_fromio(p, io, B3_RAM_ADDR);
+
+	memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+		      regs->len - B3_RI_WTO_R1);
+}
+
+/* Wake on Lan only supported on Yukon chips with rev 1 or above */
+static u32 wol_supported(const struct skge_hw *hw)
+{
+	if (is_genesis(hw))
+		return 0;
+
+	if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
+		return 0;
+
+	return WAKE_MAGIC | WAKE_PHY;
+}
+
+static void skge_wol_init(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 ctrl;
+
+	skge_write16(hw, B0_CTST, CS_RST_CLR);
+	skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+	/* Turn on Vaux */
+	skge_write8(hw, B0_POWER_CTRL,
+		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
+
+	/* WA code for COMA mode -- clear PHY reset */
+	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+		u32 reg = skge_read32(hw, B2_GP_IO);
+		reg |= GP_DIR_9;
+		reg &= ~GP_IO_9;
+		skge_write32(hw, B2_GP_IO, reg);
+	}
+
+	skge_write32(hw, SK_REG(port, GPHY_CTRL),
+		     GPC_DIS_SLEEP |
+		     GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
+		     GPC_ANEG_1 | GPC_RST_SET);
+
+	skge_write32(hw, SK_REG(port, GPHY_CTRL),
+		     GPC_DIS_SLEEP |
+		     GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
+		     GPC_ANEG_1 | GPC_RST_CLR);
+
+	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	/* Force to 10/100 skge_reset will re-enable on resume	 */
+	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
+		     (PHY_AN_100FULL | PHY_AN_100HALF |
+		      PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
+	/* no 1000 HD/FD */
+	gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
+	gm_phy_write(hw, port, PHY_MARV_CTRL,
+		     PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE |
+		     PHY_CT_RE_CFG | PHY_CT_DUP_MD);
+
+
+	/* Set GMAC to no flow control and auto update for speed/duplex */
+	gma_write16(hw, port, GM_GP_CTRL,
+		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+	/* Set WOL address */
+	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+		    skge->netdev->dev_addr, ETH_ALEN);
+
+	/* Turn on appropriate WOL control bits */
+	skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+	ctrl = 0;
+	if (skge->wol & WAKE_PHY)
+		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+	if (skge->wol & WAKE_MAGIC)
+		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
+
+	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+	skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+	/* block receiver */
+	skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+}
+
+static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	wol->supported = wol_supported(skge->hw);
+	wol->wolopts = skge->wol;
+}
+
+static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+
+	if ((wol->wolopts & ~wol_supported(hw)) ||
+	    !device_can_wakeup(&hw->pdev->dev))
+		return -EOPNOTSUPP;
+
+	skge->wol = wol->wolopts;
+
+	device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+
+	return 0;
+}
+
+/* Determine supported/advertised modes based on hardware.
+ * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
+ */
+static u32 skge_supported_modes(const struct skge_hw *hw)
+{
+	u32 supported;
+
+	if (hw->copper) {
+		supported = (SUPPORTED_10baseT_Half |
+			     SUPPORTED_10baseT_Full |
+			     SUPPORTED_100baseT_Half |
+			     SUPPORTED_100baseT_Full |
+			     SUPPORTED_1000baseT_Half |
+			     SUPPORTED_1000baseT_Full |
+			     SUPPORTED_Autoneg |
+			     SUPPORTED_TP);
+
+		if (is_genesis(hw))
+			supported &= ~(SUPPORTED_10baseT_Half |
+				       SUPPORTED_10baseT_Full |
+				       SUPPORTED_100baseT_Half |
+				       SUPPORTED_100baseT_Full);
+
+		else if (hw->chip_id == CHIP_ID_YUKON)
+			supported &= ~SUPPORTED_1000baseT_Half;
+	} else
+		supported = (SUPPORTED_1000baseT_Full |
+			     SUPPORTED_1000baseT_Half |
+			     SUPPORTED_FIBRE |
+			     SUPPORTED_Autoneg);
+
+	return supported;
+}
+
+static int skge_get_settings(struct net_device *dev,
+			     struct ethtool_cmd *ecmd)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+
+	ecmd->transceiver = XCVR_INTERNAL;
+	ecmd->supported = skge_supported_modes(hw);
+
+	if (hw->copper) {
+		ecmd->port = PORT_TP;
+		ecmd->phy_address = hw->phy_addr;
+	} else
+		ecmd->port = PORT_FIBRE;
+
+	ecmd->advertising = skge->advertising;
+	ecmd->autoneg = skge->autoneg;
+	ethtool_cmd_speed_set(ecmd, skge->speed);
+	ecmd->duplex = skge->duplex;
+	return 0;
+}
+
+static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	const struct skge_hw *hw = skge->hw;
+	u32 supported = skge_supported_modes(hw);
+	int err = 0;
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		ecmd->advertising = supported;
+		skge->duplex = -1;
+		skge->speed = -1;
+	} else {
+		u32 setting;
+		u32 speed = ethtool_cmd_speed(ecmd);
+
+		switch (speed) {
+		case SPEED_1000:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_1000baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_1000baseT_Half;
+			else
+				return -EINVAL;
+			break;
+		case SPEED_100:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_100baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_100baseT_Half;
+			else
+				return -EINVAL;
+			break;
+
+		case SPEED_10:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_10baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_10baseT_Half;
+			else
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if ((setting & supported) == 0)
+			return -EINVAL;
+
+		skge->speed = speed;
+		skge->duplex = ecmd->duplex;
+	}
+
+	skge->autoneg = ecmd->autoneg;
+	skge->advertising = ecmd->advertising;
+
+	if (netif_running(dev)) {
+		skge_down(dev);
+		err = skge_up(dev);
+		if (err) {
+			dev_close(dev);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void skge_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+		sizeof(info->bus_info));
+}
+
+static const struct skge_stat {
+	char 	   name[ETH_GSTRING_LEN];
+	u16	   xmac_offset;
+	u16	   gma_offset;
+} skge_stats[] = {
+	{ "tx_bytes",		XM_TXO_OK_HI,  GM_TXO_OK_HI },
+	{ "rx_bytes",		XM_RXO_OK_HI,  GM_RXO_OK_HI },
+
+	{ "tx_broadcast",	XM_TXF_BC_OK,  GM_TXF_BC_OK },
+	{ "rx_broadcast",	XM_RXF_BC_OK,  GM_RXF_BC_OK },
+	{ "tx_multicast",	XM_TXF_MC_OK,  GM_TXF_MC_OK },
+	{ "rx_multicast",	XM_RXF_MC_OK,  GM_RXF_MC_OK },
+	{ "tx_unicast",		XM_TXF_UC_OK,  GM_TXF_UC_OK },
+	{ "rx_unicast",		XM_RXF_UC_OK,  GM_RXF_UC_OK },
+	{ "tx_mac_pause",	XM_TXF_MPAUSE, GM_TXF_MPAUSE },
+	{ "rx_mac_pause",	XM_RXF_MPAUSE, GM_RXF_MPAUSE },
+
+	{ "collisions",		XM_TXF_SNG_COL, GM_TXF_SNG_COL },
+	{ "multi_collisions",	XM_TXF_MUL_COL, GM_TXF_MUL_COL },
+	{ "aborted",		XM_TXF_ABO_COL, GM_TXF_ABO_COL },
+	{ "late_collision",	XM_TXF_LAT_COL, GM_TXF_LAT_COL },
+	{ "fifo_underrun",	XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
+	{ "fifo_overflow",	XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
+
+	{ "rx_toolong",		XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
+	{ "rx_jabber",		XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
+	{ "rx_runt",		XM_RXE_RUNT, 	GM_RXE_FRAG },
+	{ "rx_too_long",	XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
+	{ "rx_fcs_error",	XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
+};
+
+static int skge_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(skge_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void skge_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	if (is_genesis(skge->hw))
+		genesis_get_stats(skge, data);
+	else
+		yukon_get_stats(skge, data);
+}
+
+/* Use hardware MIB variables for critical path statistics and
+ * transmit feedback not reported at interrupt.
+ * Other errors are accounted for in interrupt handler.
+ */
+static struct net_device_stats *skge_get_stats(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	u64 data[ARRAY_SIZE(skge_stats)];
+
+	if (is_genesis(skge->hw))
+		genesis_get_stats(skge, data);
+	else
+		yukon_get_stats(skge, data);
+
+	dev->stats.tx_bytes = data[0];
+	dev->stats.rx_bytes = data[1];
+	dev->stats.tx_packets = data[2] + data[4] + data[6];
+	dev->stats.rx_packets = data[3] + data[5] + data[7];
+	dev->stats.multicast = data[3] + data[5];
+	dev->stats.collisions = data[10];
+	dev->stats.tx_aborted_errors = data[12];
+
+	return &dev->stats;
+}
+
+static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       skge_stats[i].name, ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+static void skge_get_ring_param(struct net_device *dev,
+				struct ethtool_ringparam *p)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	p->rx_max_pending = MAX_RX_RING_SIZE;
+	p->tx_max_pending = MAX_TX_RING_SIZE;
+
+	p->rx_pending = skge->rx_ring.count;
+	p->tx_pending = skge->tx_ring.count;
+}
+
+static int skge_set_ring_param(struct net_device *dev,
+			       struct ethtool_ringparam *p)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	int err = 0;
+
+	if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
+	    p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
+		return -EINVAL;
+
+	skge->rx_ring.count = p->rx_pending;
+	skge->tx_ring.count = p->tx_pending;
+
+	if (netif_running(dev)) {
+		skge_down(dev);
+		err = skge_up(dev);
+		if (err)
+			dev_close(dev);
+	}
+
+	return err;
+}
+
+static u32 skge_get_msglevel(struct net_device *netdev)
+{
+	struct skge_port *skge = netdev_priv(netdev);
+	return skge->msg_enable;
+}
+
+static void skge_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct skge_port *skge = netdev_priv(netdev);
+	skge->msg_enable = value;
+}
+
+static int skge_nway_reset(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
+		return -EINVAL;
+
+	skge_phy_reset(skge);
+	return 0;
+}
+
+static void skge_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *ecmd)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) ||
+			  (skge->flow_control == FLOW_MODE_SYM_OR_REM));
+	ecmd->tx_pause = (ecmd->rx_pause ||
+			  (skge->flow_control == FLOW_MODE_LOC_SEND));
+
+	ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
+}
+
+static int skge_set_pauseparam(struct net_device *dev,
+			       struct ethtool_pauseparam *ecmd)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct ethtool_pauseparam old;
+	int err = 0;
+
+	skge_get_pauseparam(dev, &old);
+
+	if (ecmd->autoneg != old.autoneg)
+		skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
+	else {
+		if (ecmd->rx_pause && ecmd->tx_pause)
+			skge->flow_control = FLOW_MODE_SYMMETRIC;
+		else if (ecmd->rx_pause && !ecmd->tx_pause)
+			skge->flow_control = FLOW_MODE_SYM_OR_REM;
+		else if (!ecmd->rx_pause && ecmd->tx_pause)
+			skge->flow_control = FLOW_MODE_LOC_SEND;
+		else
+			skge->flow_control = FLOW_MODE_NONE;
+	}
+
+	if (netif_running(dev)) {
+		skge_down(dev);
+		err = skge_up(dev);
+		if (err) {
+			dev_close(dev);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+/* Chip internal frequency for clock calculations */
+static inline u32 hwkhz(const struct skge_hw *hw)
+{
+	return is_genesis(hw) ? 53125 : 78125;
+}
+
+/* Chip HZ to microseconds */
+static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
+{
+	return (ticks * 1000) / hwkhz(hw);
+}
+
+/* Microseconds to chip HZ */
+static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
+{
+	return hwkhz(hw) * usec / 1000;
+}
+
+static int skge_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *ecmd)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+
+	ecmd->rx_coalesce_usecs = 0;
+	ecmd->tx_coalesce_usecs = 0;
+
+	if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
+		u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
+		u32 msk = skge_read32(hw, B2_IRQM_MSK);
+
+		if (msk & rxirqmask[port])
+			ecmd->rx_coalesce_usecs = delay;
+		if (msk & txirqmask[port])
+			ecmd->tx_coalesce_usecs = delay;
+	}
+
+	return 0;
+}
+
+/* Note: interrupt timer is per board, but can turn on/off per port */
+static int skge_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *ecmd)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u32 msk = skge_read32(hw, B2_IRQM_MSK);
+	u32 delay = 25;
+
+	if (ecmd->rx_coalesce_usecs == 0)
+		msk &= ~rxirqmask[port];
+	else if (ecmd->rx_coalesce_usecs < 25 ||
+		 ecmd->rx_coalesce_usecs > 33333)
+		return -EINVAL;
+	else {
+		msk |= rxirqmask[port];
+		delay = ecmd->rx_coalesce_usecs;
+	}
+
+	if (ecmd->tx_coalesce_usecs == 0)
+		msk &= ~txirqmask[port];
+	else if (ecmd->tx_coalesce_usecs < 25 ||
+		 ecmd->tx_coalesce_usecs > 33333)
+		return -EINVAL;
+	else {
+		msk |= txirqmask[port];
+		delay = min(delay, ecmd->rx_coalesce_usecs);
+	}
+
+	skge_write32(hw, B2_IRQM_MSK, msk);
+	if (msk == 0)
+		skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
+	else {
+		skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
+		skge_write32(hw, B2_IRQM_CTRL, TIM_START);
+	}
+	return 0;
+}
+
+enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
+static void skge_led(struct skge_port *skge, enum led_mode mode)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+
+	spin_lock_bh(&hw->phy_lock);
+	if (is_genesis(hw)) {
+		switch (mode) {
+		case LED_MODE_OFF:
+			if (hw->phy_type == SK_PHY_BCOM)
+				xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
+			else {
+				skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
+				skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
+			}
+			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+			skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
+			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
+			break;
+
+		case LED_MODE_ON:
+			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
+			skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
+
+			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
+			skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
+
+			break;
+
+		case LED_MODE_TST:
+			skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
+			skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
+			skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
+
+			if (hw->phy_type == SK_PHY_BCOM)
+				xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
+			else {
+				skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
+				skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
+				skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
+			}
+
+		}
+	} else {
+		switch (mode) {
+		case LED_MODE_OFF:
+			gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+				     PHY_M_LED_MO_DUP(MO_LED_OFF)  |
+				     PHY_M_LED_MO_10(MO_LED_OFF)   |
+				     PHY_M_LED_MO_100(MO_LED_OFF)  |
+				     PHY_M_LED_MO_1000(MO_LED_OFF) |
+				     PHY_M_LED_MO_RX(MO_LED_OFF));
+			break;
+		case LED_MODE_ON:
+			gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
+				     PHY_M_LED_PULS_DUR(PULS_170MS) |
+				     PHY_M_LED_BLINK_RT(BLINK_84MS) |
+				     PHY_M_LEDC_TX_CTRL |
+				     PHY_M_LEDC_DP_CTRL);
+
+			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+				     PHY_M_LED_MO_RX(MO_LED_OFF) |
+				     (skge->speed == SPEED_100 ?
+				      PHY_M_LED_MO_100(MO_LED_ON) : 0));
+			break;
+		case LED_MODE_TST:
+			gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+			gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+				     PHY_M_LED_MO_DUP(MO_LED_ON)  |
+				     PHY_M_LED_MO_10(MO_LED_ON)   |
+				     PHY_M_LED_MO_100(MO_LED_ON)  |
+				     PHY_M_LED_MO_1000(MO_LED_ON) |
+				     PHY_M_LED_MO_RX(MO_LED_ON));
+		}
+	}
+	spin_unlock_bh(&hw->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int skge_set_phys_id(struct net_device *dev,
+			    enum ethtool_phys_id_state state)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 2;	/* cycle on/off twice per second */
+
+	case ETHTOOL_ID_ON:
+		skge_led(skge, LED_MODE_TST);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		skge_led(skge, LED_MODE_OFF);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		/* back to regular LED state */
+		skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
+	}
+
+	return 0;
+}
+
+static int skge_get_eeprom_len(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	u32 reg2;
+
+	pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
+	return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+}
+
+static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
+{
+	u32 val;
+
+	pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
+
+	do {
+		pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
+	} while (!(offset & PCI_VPD_ADDR_F));
+
+	pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
+	return val;
+}
+
+static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
+{
+	pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
+	pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
+			      offset | PCI_VPD_ADDR_F);
+
+	do {
+		pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
+	} while (offset & PCI_VPD_ADDR_F);
+}
+
+static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct pci_dev *pdev = skge->hw->pdev;
+	int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
+	int length = eeprom->len;
+	u16 offset = eeprom->offset;
+
+	if (!cap)
+		return -EINVAL;
+
+	eeprom->magic = SKGE_EEPROM_MAGIC;
+
+	while (length > 0) {
+		u32 val = skge_vpd_read(pdev, cap, offset);
+		int n = min_t(int, length, sizeof(val));
+
+		memcpy(data, &val, n);
+		length -= n;
+		data += n;
+		offset += n;
+	}
+	return 0;
+}
+
+static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct pci_dev *pdev = skge->hw->pdev;
+	int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
+	int length = eeprom->len;
+	u16 offset = eeprom->offset;
+
+	if (!cap)
+		return -EINVAL;
+
+	if (eeprom->magic != SKGE_EEPROM_MAGIC)
+		return -EINVAL;
+
+	while (length > 0) {
+		u32 val;
+		int n = min_t(int, length, sizeof(val));
+
+		if (n < sizeof(val))
+			val = skge_vpd_read(pdev, cap, offset);
+		memcpy(&val, data, n);
+
+		skge_vpd_write(pdev, cap, offset, val);
+
+		length -= n;
+		data += n;
+		offset += n;
+	}
+	return 0;
+}
+
+static const struct ethtool_ops skge_ethtool_ops = {
+	.get_settings	= skge_get_settings,
+	.set_settings	= skge_set_settings,
+	.get_drvinfo	= skge_get_drvinfo,
+	.get_regs_len	= skge_get_regs_len,
+	.get_regs	= skge_get_regs,
+	.get_wol	= skge_get_wol,
+	.set_wol	= skge_set_wol,
+	.get_msglevel	= skge_get_msglevel,
+	.set_msglevel	= skge_set_msglevel,
+	.nway_reset	= skge_nway_reset,
+	.get_link	= ethtool_op_get_link,
+	.get_eeprom_len	= skge_get_eeprom_len,
+	.get_eeprom	= skge_get_eeprom,
+	.set_eeprom	= skge_set_eeprom,
+	.get_ringparam	= skge_get_ring_param,
+	.set_ringparam	= skge_set_ring_param,
+	.get_pauseparam = skge_get_pauseparam,
+	.set_pauseparam = skge_set_pauseparam,
+	.get_coalesce	= skge_get_coalesce,
+	.set_coalesce	= skge_set_coalesce,
+	.get_strings	= skge_get_strings,
+	.set_phys_id	= skge_set_phys_id,
+	.get_sset_count = skge_get_sset_count,
+	.get_ethtool_stats = skge_get_ethtool_stats,
+};
+
+/*
+ * Allocate ring elements and chain them together
+ * One-to-one association of board descriptors with ring elements
+ */
+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
+{
+	struct skge_tx_desc *d;
+	struct skge_element *e;
+	int i;
+
+	ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
+	if (!ring->start)
+		return -ENOMEM;
+
+	for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
+		e->desc = d;
+		if (i == ring->count - 1) {
+			e->next = ring->start;
+			d->next_offset = base;
+		} else {
+			e->next = e + 1;
+			d->next_offset = base + (i+1) * sizeof(*d);
+		}
+	}
+	ring->to_use = ring->to_clean = ring->start;
+
+	return 0;
+}
+
+/* Allocate and setup a new buffer for receiving */
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+			 struct sk_buff *skb, unsigned int bufsize)
+{
+	struct skge_rx_desc *rd = e->desc;
+	dma_addr_t map;
+
+	map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
+			     PCI_DMA_FROMDEVICE);
+
+	if (pci_dma_mapping_error(skge->hw->pdev, map))
+		return -1;
+
+	rd->dma_lo = lower_32_bits(map);
+	rd->dma_hi = upper_32_bits(map);
+	e->skb = skb;
+	rd->csum1_start = ETH_HLEN;
+	rd->csum2_start = ETH_HLEN;
+	rd->csum1 = 0;
+	rd->csum2 = 0;
+
+	wmb();
+
+	rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
+	dma_unmap_addr_set(e, mapaddr, map);
+	dma_unmap_len_set(e, maplen, bufsize);
+	return 0;
+}
+
+/* Resume receiving using existing skb,
+ * Note: DMA address is not changed by chip.
+ * 	 MTU not changed while receiver active.
+ */
+static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
+{
+	struct skge_rx_desc *rd = e->desc;
+
+	rd->csum2 = 0;
+	rd->csum2_start = ETH_HLEN;
+
+	wmb();
+
+	rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
+}
+
+
+/* Free all  buffers in receive ring, assumes receiver stopped */
+static void skge_rx_clean(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	struct skge_ring *ring = &skge->rx_ring;
+	struct skge_element *e;
+
+	e = ring->start;
+	do {
+		struct skge_rx_desc *rd = e->desc;
+		rd->control = 0;
+		if (e->skb) {
+			pci_unmap_single(hw->pdev,
+					 dma_unmap_addr(e, mapaddr),
+					 dma_unmap_len(e, maplen),
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(e->skb);
+			e->skb = NULL;
+		}
+	} while ((e = e->next) != ring->start);
+}
+
+
+/* Allocate buffers for receive ring
+ * For receive:  to_clean is next received frame.
+ */
+static int skge_rx_fill(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_ring *ring = &skge->rx_ring;
+	struct skge_element *e;
+
+	e = ring->start;
+	do {
+		struct sk_buff *skb;
+
+		skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
+					 GFP_KERNEL);
+		if (!skb)
+			return -ENOMEM;
+
+		skb_reserve(skb, NET_IP_ALIGN);
+		if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+			dev_kfree_skb(skb);
+			return -EIO;
+		}
+	} while ((e = e->next) != ring->start);
+
+	ring->to_clean = ring->start;
+	return 0;
+}
+
+static const char *skge_pause(enum pause_status status)
+{
+	switch (status) {
+	case FLOW_STAT_NONE:
+		return "none";
+	case FLOW_STAT_REM_SEND:
+		return "rx only";
+	case FLOW_STAT_LOC_SEND:
+		return "tx_only";
+	case FLOW_STAT_SYMMETRIC:		/* Both station may send PAUSE */
+		return "both";
+	default:
+		return "indeterminated";
+	}
+}
+
+
+static void skge_link_up(struct skge_port *skge)
+{
+	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
+		    LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
+
+	netif_carrier_on(skge->netdev);
+	netif_wake_queue(skge->netdev);
+
+	netif_info(skge, link, skge->netdev,
+		   "Link is up at %d Mbps, %s duplex, flow control %s\n",
+		   skge->speed,
+		   skge->duplex == DUPLEX_FULL ? "full" : "half",
+		   skge_pause(skge->flow_status));
+}
+
+static void skge_link_down(struct skge_port *skge)
+{
+	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+	netif_carrier_off(skge->netdev);
+	netif_stop_queue(skge->netdev);
+
+	netif_info(skge, link, skge->netdev, "Link is down\n");
+}
+
+static void xm_link_down(struct skge_hw *hw, int port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct skge_port *skge = netdev_priv(dev);
+
+	xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
+
+	if (netif_carrier_ok(dev))
+		skge_link_down(skge);
+}
+
+static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
+{
+	int i;
+
+	xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
+	*val = xm_read16(hw, port, XM_PHY_DATA);
+
+	if (hw->phy_type == SK_PHY_XMAC)
+		goto ready;
+
+	for (i = 0; i < PHY_RETRIES; i++) {
+		if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
+			goto ready;
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+ ready:
+	*val = xm_read16(hw, port, XM_PHY_DATA);
+
+	return 0;
+}
+
+static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
+{
+	u16 v = 0;
+	if (__xm_phy_read(hw, port, reg, &v))
+		pr_warn("%s: phy read timed out\n", hw->dev[port]->name);
+	return v;
+}
+
+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
+{
+	int i;
+
+	xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
+	for (i = 0; i < PHY_RETRIES; i++) {
+		if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+			goto ready;
+		udelay(1);
+	}
+	return -EIO;
+
+ ready:
+	xm_write16(hw, port, XM_PHY_DATA, val);
+	for (i = 0; i < PHY_RETRIES; i++) {
+		if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static void genesis_init(struct skge_hw *hw)
+{
+	/* set blink source counter */
+	skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
+	skge_write8(hw, B2_BSC_CTRL, BSC_START);
+
+	/* configure mac arbiter */
+	skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
+
+	/* configure mac arbiter timeout values */
+	skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
+	skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
+	skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
+	skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
+
+	skge_write8(hw, B3_MA_RCINI_RX1, 0);
+	skge_write8(hw, B3_MA_RCINI_RX2, 0);
+	skge_write8(hw, B3_MA_RCINI_TX1, 0);
+	skge_write8(hw, B3_MA_RCINI_TX2, 0);
+
+	/* configure packet arbiter timeout */
+	skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
+	skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
+	skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
+	skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
+	skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
+}
+
+static void genesis_reset(struct skge_hw *hw, int port)
+{
+	static const u8 zero[8]  = { 0 };
+	u32 reg;
+
+	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
+	/* reset the statistics module */
+	xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
+	xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
+	xm_write32(hw, port, XM_MODE, 0);		/* clear Mode Reg */
+	xm_write16(hw, port, XM_TX_CMD, 0);	/* reset TX CMD Reg */
+	xm_write16(hw, port, XM_RX_CMD, 0);	/* reset RX CMD Reg */
+
+	/* disable Broadcom PHY IRQ */
+	if (hw->phy_type == SK_PHY_BCOM)
+		xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
+
+	xm_outhash(hw, port, XM_HSM, zero);
+
+	/* Flush TX and RX fifo */
+	reg = xm_read32(hw, port, XM_MODE);
+	xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
+	xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
+}
+
+/* Convert mode to MII values  */
+static const u16 phy_pause_map[] = {
+	[FLOW_MODE_NONE] =	0,
+	[FLOW_MODE_LOC_SEND] =	PHY_AN_PAUSE_ASYM,
+	[FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
+	[FLOW_MODE_SYM_OR_REM]  = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+};
+
+/* special defines for FIBER (88E1011S only) */
+static const u16 fiber_pause_map[] = {
+	[FLOW_MODE_NONE]	= PHY_X_P_NO_PAUSE,
+	[FLOW_MODE_LOC_SEND]	= PHY_X_P_ASYM_MD,
+	[FLOW_MODE_SYMMETRIC]	= PHY_X_P_SYM_MD,
+	[FLOW_MODE_SYM_OR_REM]	= PHY_X_P_BOTH_MD,
+};
+
+
+/* Check status of Broadcom phy link */
+static void bcom_check_link(struct skge_hw *hw, int port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct skge_port *skge = netdev_priv(dev);
+	u16 status;
+
+	/* read twice because of latch */
+	xm_phy_read(hw, port, PHY_BCOM_STAT);
+	status = xm_phy_read(hw, port, PHY_BCOM_STAT);
+
+	if ((status & PHY_ST_LSYNC) == 0) {
+		xm_link_down(hw, port);
+		return;
+	}
+
+	if (skge->autoneg == AUTONEG_ENABLE) {
+		u16 lpa, aux;
+
+		if (!(status & PHY_ST_AN_OVER))
+			return;
+
+		lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
+		if (lpa & PHY_B_AN_RF) {
+			netdev_notice(dev, "remote fault\n");
+			return;
+		}
+
+		aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
+
+		/* Check Duplex mismatch */
+		switch (aux & PHY_B_AS_AN_RES_MSK) {
+		case PHY_B_RES_1000FD:
+			skge->duplex = DUPLEX_FULL;
+			break;
+		case PHY_B_RES_1000HD:
+			skge->duplex = DUPLEX_HALF;
+			break;
+		default:
+			netdev_notice(dev, "duplex mismatch\n");
+			return;
+		}
+
+		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
+		switch (aux & PHY_B_AS_PAUSE_MSK) {
+		case PHY_B_AS_PAUSE_MSK:
+			skge->flow_status = FLOW_STAT_SYMMETRIC;
+			break;
+		case PHY_B_AS_PRR:
+			skge->flow_status = FLOW_STAT_REM_SEND;
+			break;
+		case PHY_B_AS_PRT:
+			skge->flow_status = FLOW_STAT_LOC_SEND;
+			break;
+		default:
+			skge->flow_status = FLOW_STAT_NONE;
+		}
+		skge->speed = SPEED_1000;
+	}
+
+	if (!netif_carrier_ok(dev))
+		genesis_link_up(skge);
+}
+
+/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
+ * Phy on for 100 or 10Mbit operation
+ */
+static void bcom_phy_init(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	int i;
+	u16 id1, r, ext, ctl;
+
+	/* magic workaround patterns for Broadcom */
+	static const struct {
+		u16 reg;
+		u16 val;
+	} A1hack[] = {
+		{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
+		{ 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
+		{ 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
+		{ 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
+	}, C0hack[] = {
+		{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
+		{ 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
+	};
+
+	/* read Id from external PHY (all have the same address) */
+	id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
+
+	/* Optimize MDIO transfer by suppressing preamble. */
+	r = xm_read16(hw, port, XM_MMU_CMD);
+	r |=  XM_MMU_NO_PRE;
+	xm_write16(hw, port, XM_MMU_CMD, r);
+
+	switch (id1) {
+	case PHY_BCOM_ID1_C0:
+		/*
+		 * Workaround BCOM Errata for the C0 type.
+		 * Write magic patterns to reserved registers.
+		 */
+		for (i = 0; i < ARRAY_SIZE(C0hack); i++)
+			xm_phy_write(hw, port,
+				     C0hack[i].reg, C0hack[i].val);
+
+		break;
+	case PHY_BCOM_ID1_A1:
+		/*
+		 * Workaround BCOM Errata for the A1 type.
+		 * Write magic patterns to reserved registers.
+		 */
+		for (i = 0; i < ARRAY_SIZE(A1hack); i++)
+			xm_phy_write(hw, port,
+				     A1hack[i].reg, A1hack[i].val);
+		break;
+	}
+
+	/*
+	 * Workaround BCOM Errata (#10523) for all BCom PHYs.
+	 * Disable Power Management after reset.
+	 */
+	r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
+	r |= PHY_B_AC_DIS_PM;
+	xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
+
+	/* Dummy read */
+	xm_read16(hw, port, XM_ISRC);
+
+	ext = PHY_B_PEC_EN_LTR; /* enable tx led */
+	ctl = PHY_CT_SP1000;	/* always 1000mbit */
+
+	if (skge->autoneg == AUTONEG_ENABLE) {
+		/*
+		 * Workaround BCOM Errata #1 for the C5 type.
+		 * 1000Base-T Link Acquisition Failure in Slave Mode
+		 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
+		 */
+		u16 adv = PHY_B_1000C_RD;
+		if (skge->advertising & ADVERTISED_1000baseT_Half)
+			adv |= PHY_B_1000C_AHD;
+		if (skge->advertising & ADVERTISED_1000baseT_Full)
+			adv |= PHY_B_1000C_AFD;
+		xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
+
+		ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+	} else {
+		if (skge->duplex == DUPLEX_FULL)
+			ctl |= PHY_CT_DUP_MD;
+		/* Force to slave */
+		xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
+	}
+
+	/* Set autonegotiation pause parameters */
+	xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
+		     phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
+
+	/* Handle Jumbo frames */
+	if (hw->dev[port]->mtu > ETH_DATA_LEN) {
+		xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
+			     PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
+
+		ext |= PHY_B_PEC_HIGH_LA;
+
+	}
+
+	xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
+	xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
+
+	/* Use link status change interrupt */
+	xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
+}
+
+static void xm_phy_init(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 ctrl = 0;
+
+	if (skge->autoneg == AUTONEG_ENABLE) {
+		if (skge->advertising & ADVERTISED_1000baseT_Half)
+			ctrl |= PHY_X_AN_HD;
+		if (skge->advertising & ADVERTISED_1000baseT_Full)
+			ctrl |= PHY_X_AN_FD;
+
+		ctrl |= fiber_pause_map[skge->flow_control];
+
+		xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
+
+		/* Restart Auto-negotiation */
+		ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
+	} else {
+		/* Set DuplexMode in Config register */
+		if (skge->duplex == DUPLEX_FULL)
+			ctrl |= PHY_CT_DUP_MD;
+		/*
+		 * Do NOT enable Auto-negotiation here. This would hold
+		 * the link down because no IDLEs are transmitted
+		 */
+	}
+
+	xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
+
+	/* Poll PHY for status changes */
+	mod_timer(&skge->link_timer, jiffies + LINK_HZ);
+}
+
+static int xm_check_link(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 status;
+
+	/* read twice because of latch */
+	xm_phy_read(hw, port, PHY_XMAC_STAT);
+	status = xm_phy_read(hw, port, PHY_XMAC_STAT);
+
+	if ((status & PHY_ST_LSYNC) == 0) {
+		xm_link_down(hw, port);
+		return 0;
+	}
+
+	if (skge->autoneg == AUTONEG_ENABLE) {
+		u16 lpa, res;
+
+		if (!(status & PHY_ST_AN_OVER))
+			return 0;
+
+		lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
+		if (lpa & PHY_B_AN_RF) {
+			netdev_notice(dev, "remote fault\n");
+			return 0;
+		}
+
+		res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
+
+		/* Check Duplex mismatch */
+		switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
+		case PHY_X_RS_FD:
+			skge->duplex = DUPLEX_FULL;
+			break;
+		case PHY_X_RS_HD:
+			skge->duplex = DUPLEX_HALF;
+			break;
+		default:
+			netdev_notice(dev, "duplex mismatch\n");
+			return 0;
+		}
+
+		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
+		if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
+		     skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
+		    (lpa & PHY_X_P_SYM_MD))
+			skge->flow_status = FLOW_STAT_SYMMETRIC;
+		else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
+			 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
+			/* Enable PAUSE receive, disable PAUSE transmit */
+			skge->flow_status  = FLOW_STAT_REM_SEND;
+		else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
+			 (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
+			/* Disable PAUSE receive, enable PAUSE transmit */
+			skge->flow_status = FLOW_STAT_LOC_SEND;
+		else
+			skge->flow_status = FLOW_STAT_NONE;
+
+		skge->speed = SPEED_1000;
+	}
+
+	if (!netif_carrier_ok(dev))
+		genesis_link_up(skge);
+	return 1;
+}
+
+/* Poll to check for link coming up.
+ *
+ * Since internal PHY is wired to a level triggered pin, can't
+ * get an interrupt when carrier is detected, need to poll for
+ * link coming up.
+ */
+static void xm_link_timer(unsigned long arg)
+{
+	struct skge_port *skge = (struct skge_port *) arg;
+	struct net_device *dev = skge->netdev;
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	int i;
+	unsigned long flags;
+
+	if (!netif_running(dev))
+		return;
+
+	spin_lock_irqsave(&hw->phy_lock, flags);
+
+	/*
+	 * Verify that the link by checking GPIO register three times.
+	 * This pin has the signal from the link_sync pin connected to it.
+	 */
+	for (i = 0; i < 3; i++) {
+		if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
+			goto link_down;
+	}
+
+	/* Re-enable interrupt to detect link down */
+	if (xm_check_link(dev)) {
+		u16 msk = xm_read16(hw, port, XM_IMSK);
+		msk &= ~XM_IS_INP_ASS;
+		xm_write16(hw, port, XM_IMSK, msk);
+		xm_read16(hw, port, XM_ISRC);
+	} else {
+link_down:
+		mod_timer(&skge->link_timer,
+			  round_jiffies(jiffies + LINK_HZ));
+	}
+	spin_unlock_irqrestore(&hw->phy_lock, flags);
+}
+
+static void genesis_mac_init(struct skge_hw *hw, int port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct skge_port *skge = netdev_priv(dev);
+	int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
+	int i;
+	u32 r;
+	static const u8 zero[6]  = { 0 };
+
+	for (i = 0; i < 10; i++) {
+		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+			     MFF_SET_MAC_RST);
+		if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
+			goto reset_ok;
+		udelay(1);
+	}
+
+	netdev_warn(dev, "genesis reset failed\n");
+
+ reset_ok:
+	/* Unreset the XMAC. */
+	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+
+	/*
+	 * Perform additional initialization for external PHYs,
+	 * namely for the 1000baseTX cards that use the XMAC's
+	 * GMII mode.
+	 */
+	if (hw->phy_type != SK_PHY_XMAC) {
+		/* Take external Phy out of reset */
+		r = skge_read32(hw, B2_GP_IO);
+		if (port == 0)
+			r |= GP_DIR_0|GP_IO_0;
+		else
+			r |= GP_DIR_2|GP_IO_2;
+
+		skge_write32(hw, B2_GP_IO, r);
+
+		/* Enable GMII interface */
+		xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
+	}
+
+
+	switch (hw->phy_type) {
+	case SK_PHY_XMAC:
+		xm_phy_init(skge);
+		break;
+	case SK_PHY_BCOM:
+		bcom_phy_init(skge);
+		bcom_check_link(hw, port);
+	}
+
+	/* Set Station Address */
+	xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+
+	/* We don't use match addresses so clear */
+	for (i = 1; i < 16; i++)
+		xm_outaddr(hw, port, XM_EXM(i), zero);
+
+	/* Clear MIB counters */
+	xm_write16(hw, port, XM_STAT_CMD,
+			XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+	/* Clear two times according to Errata #3 */
+	xm_write16(hw, port, XM_STAT_CMD,
+			XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+
+	/* configure Rx High Water Mark (XM_RX_HI_WM) */
+	xm_write16(hw, port, XM_RX_HI_WM, 1450);
+
+	/* We don't need the FCS appended to the packet. */
+	r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
+	if (jumbo)
+		r |= XM_RX_BIG_PK_OK;
+
+	if (skge->duplex == DUPLEX_HALF) {
+		/*
+		 * If in manual half duplex mode the other side might be in
+		 * full duplex mode, so ignore if a carrier extension is not seen
+		 * on frames received
+		 */
+		r |= XM_RX_DIS_CEXT;
+	}
+	xm_write16(hw, port, XM_RX_CMD, r);
+
+	/* We want short frames padded to 60 bytes. */
+	xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
+
+	/* Increase threshold for jumbo frames on dual port */
+	if (hw->ports > 1 && jumbo)
+		xm_write16(hw, port, XM_TX_THR, 1020);
+	else
+		xm_write16(hw, port, XM_TX_THR, 512);
+
+	/*
+	 * Enable the reception of all error frames. This is is
+	 * a necessary evil due to the design of the XMAC. The
+	 * XMAC's receive FIFO is only 8K in size, however jumbo
+	 * frames can be up to 9000 bytes in length. When bad
+	 * frame filtering is enabled, the XMAC's RX FIFO operates
+	 * in 'store and forward' mode. For this to work, the
+	 * entire frame has to fit into the FIFO, but that means
+	 * that jumbo frames larger than 8192 bytes will be
+	 * truncated. Disabling all bad frame filtering causes
+	 * the RX FIFO to operate in streaming mode, in which
+	 * case the XMAC will start transferring frames out of the
+	 * RX FIFO as soon as the FIFO threshold is reached.
+	 */
+	xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
+
+
+	/*
+	 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
+	 *	- Enable all bits excepting 'Octets Rx OK Low CntOv'
+	 *	  and 'Octets Rx OK Hi Cnt Ov'.
+	 */
+	xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
+
+	/*
+	 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
+	 *	- Enable all bits excepting 'Octets Tx OK Low CntOv'
+	 *	  and 'Octets Tx OK Hi Cnt Ov'.
+	 */
+	xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
+
+	/* Configure MAC arbiter */
+	skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
+
+	/* configure timeout values */
+	skge_write8(hw, B3_MA_TOINI_RX1, 72);
+	skge_write8(hw, B3_MA_TOINI_RX2, 72);
+	skge_write8(hw, B3_MA_TOINI_TX1, 72);
+	skge_write8(hw, B3_MA_TOINI_TX2, 72);
+
+	skge_write8(hw, B3_MA_RCINI_RX1, 0);
+	skge_write8(hw, B3_MA_RCINI_RX2, 0);
+	skge_write8(hw, B3_MA_RCINI_TX1, 0);
+	skge_write8(hw, B3_MA_RCINI_TX2, 0);
+
+	/* Configure Rx MAC FIFO */
+	skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
+	skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
+	skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+	/* Configure Tx MAC FIFO */
+	skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
+	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
+	skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+	if (jumbo) {
+		/* Enable frame flushing if jumbo frames used */
+		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
+	} else {
+		/* enable timeout timers if normal frames */
+		skge_write16(hw, B3_PA_CTRL,
+			     (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
+	}
+}
+
+static void genesis_stop(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	unsigned retries = 1000;
+	u16 cmd;
+
+	/* Disable Tx and Rx */
+	cmd = xm_read16(hw, port, XM_MMU_CMD);
+	cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+	xm_write16(hw, port, XM_MMU_CMD, cmd);
+
+	genesis_reset(hw, port);
+
+	/* Clear Tx packet arbiter timeout IRQ */
+	skge_write16(hw, B3_PA_CTRL,
+		     port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
+
+	/* Reset the MAC */
+	skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+	do {
+		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
+		if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
+			break;
+	} while (--retries > 0);
+
+	/* For external PHYs there must be special handling */
+	if (hw->phy_type != SK_PHY_XMAC) {
+		u32 reg = skge_read32(hw, B2_GP_IO);
+		if (port == 0) {
+			reg |= GP_DIR_0;
+			reg &= ~GP_IO_0;
+		} else {
+			reg |= GP_DIR_2;
+			reg &= ~GP_IO_2;
+		}
+		skge_write32(hw, B2_GP_IO, reg);
+		skge_read32(hw, B2_GP_IO);
+	}
+
+	xm_write16(hw, port, XM_MMU_CMD,
+			xm_read16(hw, port, XM_MMU_CMD)
+			& ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
+
+	xm_read16(hw, port, XM_MMU_CMD);
+}
+
+
+static void genesis_get_stats(struct skge_port *skge, u64 *data)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	int i;
+	unsigned long timeout = jiffies + HZ;
+
+	xm_write16(hw, port,
+			XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
+
+	/* wait for update to complete */
+	while (xm_read16(hw, port, XM_STAT_CMD)
+	       & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
+		if (time_after(jiffies, timeout))
+			break;
+		udelay(10);
+	}
+
+	/* special case for 64 bit octet counter */
+	data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
+		| xm_read32(hw, port, XM_TXO_OK_LO);
+	data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
+		| xm_read32(hw, port, XM_RXO_OK_LO);
+
+	for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
+		data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
+}
+
+static void genesis_mac_intr(struct skge_hw *hw, int port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct skge_port *skge = netdev_priv(dev);
+	u16 status = xm_read16(hw, port, XM_ISRC);
+
+	netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+		     "mac interrupt status 0x%x\n", status);
+
+	if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
+		xm_link_down(hw, port);
+		mod_timer(&skge->link_timer, jiffies + 1);
+	}
+
+	if (status & XM_IS_TXF_UR) {
+		xm_write32(hw, port, XM_MODE, XM_MD_FTF);
+		++dev->stats.tx_fifo_errors;
+	}
+}
+
+static void genesis_link_up(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 cmd, msk;
+	u32 mode;
+
+	cmd = xm_read16(hw, port, XM_MMU_CMD);
+
+	/*
+	 * enabling pause frame reception is required for 1000BT
+	 * because the XMAC is not reset if the link is going down
+	 */
+	if (skge->flow_status == FLOW_STAT_NONE ||
+	    skge->flow_status == FLOW_STAT_LOC_SEND)
+		/* Disable Pause Frame Reception */
+		cmd |= XM_MMU_IGN_PF;
+	else
+		/* Enable Pause Frame Reception */
+		cmd &= ~XM_MMU_IGN_PF;
+
+	xm_write16(hw, port, XM_MMU_CMD, cmd);
+
+	mode = xm_read32(hw, port, XM_MODE);
+	if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
+	    skge->flow_status == FLOW_STAT_LOC_SEND) {
+		/*
+		 * Configure Pause Frame Generation
+		 * Use internal and external Pause Frame Generation.
+		 * Sending pause frames is edge triggered.
+		 * Send a Pause frame with the maximum pause time if
+		 * internal oder external FIFO full condition occurs.
+		 * Send a zero pause time frame to re-start transmission.
+		 */
+		/* XM_PAUSE_DA = '010000C28001' (default) */
+		/* XM_MAC_PTIME = 0xffff (maximum) */
+		/* remember this value is defined in big endian (!) */
+		xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
+
+		mode |= XM_PAUSE_MODE;
+		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
+	} else {
+		/*
+		 * disable pause frame generation is required for 1000BT
+		 * because the XMAC is not reset if the link is going down
+		 */
+		/* Disable Pause Mode in Mode Register */
+		mode &= ~XM_PAUSE_MODE;
+
+		skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
+	}
+
+	xm_write32(hw, port, XM_MODE, mode);
+
+	/* Turn on detection of Tx underrun */
+	msk = xm_read16(hw, port, XM_IMSK);
+	msk &= ~XM_IS_TXF_UR;
+	xm_write16(hw, port, XM_IMSK, msk);
+
+	xm_read16(hw, port, XM_ISRC);
+
+	/* get MMU Command Reg. */
+	cmd = xm_read16(hw, port, XM_MMU_CMD);
+	if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
+		cmd |= XM_MMU_GMII_FD;
+
+	/*
+	 * Workaround BCOM Errata (#10523) for all BCom Phys
+	 * Enable Power Management after link up
+	 */
+	if (hw->phy_type == SK_PHY_BCOM) {
+		xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
+			     xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
+			     & ~PHY_B_AC_DIS_PM);
+		xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
+	}
+
+	/* enable Rx/Tx */
+	xm_write16(hw, port, XM_MMU_CMD,
+			cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+	skge_link_up(skge);
+}
+
+
+static inline void bcom_phy_intr(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 isrc;
+
+	isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
+	netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+		     "phy interrupt status 0x%x\n", isrc);
+
+	if (isrc & PHY_B_IS_PSE)
+		pr_err("%s: uncorrectable pair swap error\n",
+		       hw->dev[port]->name);
+
+	/* Workaround BCom Errata:
+	 *	enable and disable loopback mode if "NO HCD" occurs.
+	 */
+	if (isrc & PHY_B_IS_NO_HDCL) {
+		u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
+		xm_phy_write(hw, port, PHY_BCOM_CTRL,
+				  ctrl | PHY_CT_LOOP);
+		xm_phy_write(hw, port, PHY_BCOM_CTRL,
+				  ctrl & ~PHY_CT_LOOP);
+	}
+
+	if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
+		bcom_check_link(hw, port);
+
+}
+
+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
+{
+	int i;
+
+	gma_write16(hw, port, GM_SMI_DATA, val);
+	gma_write16(hw, port, GM_SMI_CTRL,
+			 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
+	for (i = 0; i < PHY_RETRIES; i++) {
+		udelay(1);
+
+		if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
+			return 0;
+	}
+
+	pr_warn("%s: phy write timeout\n", hw->dev[port]->name);
+	return -EIO;
+}
+
+static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
+{
+	int i;
+
+	gma_write16(hw, port, GM_SMI_CTRL,
+			 GM_SMI_CT_PHY_AD(hw->phy_addr)
+			 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+	for (i = 0; i < PHY_RETRIES; i++) {
+		udelay(1);
+		if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
+			goto ready;
+	}
+
+	return -ETIMEDOUT;
+ ready:
+	*val = gma_read16(hw, port, GM_SMI_DATA);
+	return 0;
+}
+
+static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
+{
+	u16 v = 0;
+	if (__gm_phy_read(hw, port, reg, &v))
+		pr_warn("%s: phy read timeout\n", hw->dev[port]->name);
+	return v;
+}
+
+/* Marvell Phy Initialization */
+static void yukon_init(struct skge_hw *hw, int port)
+{
+	struct skge_port *skge = netdev_priv(hw->dev[port]);
+	u16 ctrl, ct1000, adv;
+
+	if (skge->autoneg == AUTONEG_ENABLE) {
+		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+			  PHY_M_EC_MAC_S_MSK);
+		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+		ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+	}
+
+	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+	if (skge->autoneg == AUTONEG_DISABLE)
+		ctrl &= ~PHY_CT_ANE;
+
+	ctrl |= PHY_CT_RESET;
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+	ctrl = 0;
+	ct1000 = 0;
+	adv = PHY_AN_CSMA;
+
+	if (skge->autoneg == AUTONEG_ENABLE) {
+		if (hw->copper) {
+			if (skge->advertising & ADVERTISED_1000baseT_Full)
+				ct1000 |= PHY_M_1000C_AFD;
+			if (skge->advertising & ADVERTISED_1000baseT_Half)
+				ct1000 |= PHY_M_1000C_AHD;
+			if (skge->advertising & ADVERTISED_100baseT_Full)
+				adv |= PHY_M_AN_100_FD;
+			if (skge->advertising & ADVERTISED_100baseT_Half)
+				adv |= PHY_M_AN_100_HD;
+			if (skge->advertising & ADVERTISED_10baseT_Full)
+				adv |= PHY_M_AN_10_FD;
+			if (skge->advertising & ADVERTISED_10baseT_Half)
+				adv |= PHY_M_AN_10_HD;
+
+			/* Set Flow-control capabilities */
+			adv |= phy_pause_map[skge->flow_control];
+		} else {
+			if (skge->advertising & ADVERTISED_1000baseT_Full)
+				adv |= PHY_M_AN_1000X_AFD;
+			if (skge->advertising & ADVERTISED_1000baseT_Half)
+				adv |= PHY_M_AN_1000X_AHD;
+
+			adv |= fiber_pause_map[skge->flow_control];
+		}
+
+		/* Restart Auto-negotiation */
+		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+	} else {
+		/* forced speed/duplex settings */
+		ct1000 = PHY_M_1000C_MSE;
+
+		if (skge->duplex == DUPLEX_FULL)
+			ctrl |= PHY_CT_DUP_MD;
+
+		switch (skge->speed) {
+		case SPEED_1000:
+			ctrl |= PHY_CT_SP1000;
+			break;
+		case SPEED_100:
+			ctrl |= PHY_CT_SP100;
+			break;
+		}
+
+		ctrl |= PHY_CT_RESET;
+	}
+
+	gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+	/* Enable phy interrupt on autonegotiation complete (or link up) */
+	if (skge->autoneg == AUTONEG_ENABLE)
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
+	else
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
+}
+
+static void yukon_reset(struct skge_hw *hw, int port)
+{
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
+	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
+	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+	gma_write16(hw, port, GM_RX_CTRL,
+			 gma_read16(hw, port, GM_RX_CTRL)
+			 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+}
+
+/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
+static int is_yukon_lite_a0(struct skge_hw *hw)
+{
+	u32 reg;
+	int ret;
+
+	if (hw->chip_id != CHIP_ID_YUKON)
+		return 0;
+
+	reg = skge_read32(hw, B2_FAR);
+	skge_write8(hw, B2_FAR + 3, 0xff);
+	ret = (skge_read8(hw, B2_FAR + 3) != 0);
+	skge_write32(hw, B2_FAR, reg);
+	return ret;
+}
+
+static void yukon_mac_init(struct skge_hw *hw, int port)
+{
+	struct skge_port *skge = netdev_priv(hw->dev[port]);
+	int i;
+	u32 reg;
+	const u8 *addr = hw->dev[port]->dev_addr;
+
+	/* WA code for COMA mode -- set PHY reset */
+	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+		reg = skge_read32(hw, B2_GP_IO);
+		reg |= GP_DIR_9 | GP_IO_9;
+		skge_write32(hw, B2_GP_IO, reg);
+	}
+
+	/* hard reset */
+	skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+	/* WA code for COMA mode -- clear PHY reset */
+	if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+	    hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+		reg = skge_read32(hw, B2_GP_IO);
+		reg |= GP_DIR_9;
+		reg &= ~GP_IO_9;
+		skge_write32(hw, B2_GP_IO, reg);
+	}
+
+	/* Set hardware config mode */
+	reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
+		GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
+	reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
+
+	/* Clear GMC reset */
+	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
+	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
+	skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
+
+	if (skge->autoneg == AUTONEG_DISABLE) {
+		reg = GM_GPCR_AU_ALL_DIS;
+		gma_write16(hw, port, GM_GP_CTRL,
+				 gma_read16(hw, port, GM_GP_CTRL) | reg);
+
+		switch (skge->speed) {
+		case SPEED_1000:
+			reg &= ~GM_GPCR_SPEED_100;
+			reg |= GM_GPCR_SPEED_1000;
+			break;
+		case SPEED_100:
+			reg &= ~GM_GPCR_SPEED_1000;
+			reg |= GM_GPCR_SPEED_100;
+			break;
+		case SPEED_10:
+			reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
+			break;
+		}
+
+		if (skge->duplex == DUPLEX_FULL)
+			reg |= GM_GPCR_DUP_FULL;
+	} else
+		reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
+
+	switch (skge->flow_control) {
+	case FLOW_MODE_NONE:
+		skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+		reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+		break;
+	case FLOW_MODE_LOC_SEND:
+		/* disable Rx flow-control */
+		reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+		break;
+	case FLOW_MODE_SYMMETRIC:
+	case FLOW_MODE_SYM_OR_REM:
+		/* enable Tx & Rx flow-control */
+		break;
+	}
+
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+	skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+	yukon_init(hw, port);
+
+	/* MIB clear */
+	reg = gma_read16(hw, port, GM_PHY_ADDR);
+	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
+		gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
+	gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+	/* transmit control */
+	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+	/* receive control reg: unicast + multicast + no FCS  */
+	gma_write16(hw, port, GM_RX_CTRL,
+			 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+	/* transmit flow control */
+	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+	/* transmit parameter */
+	gma_write16(hw, port, GM_TX_PARAM,
+			 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+			 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+			 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
+
+	/* configure the Serial Mode Register */
+	reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
+		| GM_SMOD_VLAN_ENA
+		| IPG_DATA_VAL(IPG_DATA_DEF);
+
+	if (hw->dev[port]->mtu > ETH_DATA_LEN)
+		reg |= GM_SMOD_JUMBO_ENA;
+
+	gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+	/* physical address: used for pause frames */
+	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+	/* virtual address for data */
+	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+	/* enable interrupt mask for counter overflows */
+	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+	/* Initialize Mac Fifo */
+
+	/* Configure Rx MAC FIFO */
+	skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
+	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+
+	/* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
+	if (is_yukon_lite_a0(hw))
+		reg &= ~GMF_RX_F_FL_ON;
+
+	skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+	skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
+	/*
+	 * because Pause Packet Truncation in GMAC is not working
+	 * we have to increase the Flush Threshold to 64 bytes
+	 * in order to flush pause packets in Rx FIFO on Yukon-1
+	 */
+	skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
+
+	/* Configure Tx MAC FIFO */
+	skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+	skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+}
+
+/* Go into power down mode */
+static void yukon_suspend(struct skge_hw *hw, int port)
+{
+	u16 ctrl;
+
+	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+	ctrl |= PHY_M_PC_POL_R_DIS;
+	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+	ctrl |= PHY_CT_RESET;
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+	/* switch IEEE compatible power down mode on */
+	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+	ctrl |= PHY_CT_PDOWN;
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+}
+
+static void yukon_stop(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+
+	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+	yukon_reset(hw, port);
+
+	gma_write16(hw, port, GM_GP_CTRL,
+			 gma_read16(hw, port, GM_GP_CTRL)
+			 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
+	gma_read16(hw, port, GM_GP_CTRL);
+
+	yukon_suspend(hw, port);
+
+	/* set GPHY Control reset */
+	skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+	skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+}
+
+static void yukon_get_stats(struct skge_port *skge, u64 *data)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	int i;
+
+	data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
+		| gma_read32(hw, port, GM_TXO_OK_LO);
+	data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
+		| gma_read32(hw, port, GM_RXO_OK_LO);
+
+	for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
+		data[i] = gma_read32(hw, port,
+					  skge_stats[i].gma_offset);
+}
+
+static void yukon_mac_intr(struct skge_hw *hw, int port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct skge_port *skge = netdev_priv(dev);
+	u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+	netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+		     "mac interrupt status 0x%x\n", status);
+
+	if (status & GM_IS_RX_FF_OR) {
+		++dev->stats.rx_fifo_errors;
+		skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+	}
+
+	if (status & GM_IS_TX_FF_UR) {
+		++dev->stats.tx_fifo_errors;
+		skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+	}
+
+}
+
+static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
+{
+	switch (aux & PHY_M_PS_SPEED_MSK) {
+	case PHY_M_PS_SPEED_1000:
+		return SPEED_1000;
+	case PHY_M_PS_SPEED_100:
+		return SPEED_100;
+	default:
+		return SPEED_10;
+	}
+}
+
+static void yukon_link_up(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 reg;
+
+	/* Enable Transmit FIFO Underrun */
+	skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
+	reg = gma_read16(hw, port, GM_GP_CTRL);
+	if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
+		reg |= GM_GPCR_DUP_FULL;
+
+	/* enable Rx/Tx */
+	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
+	skge_link_up(skge);
+}
+
+static void yukon_link_down(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u16 ctrl;
+
+	ctrl = gma_read16(hw, port, GM_GP_CTRL);
+	ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+	gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+	if (skge->flow_status == FLOW_STAT_REM_SEND) {
+		ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+		ctrl |= PHY_M_AN_ASP;
+		/* restore Asymmetric Pause bit */
+		gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
+	}
+
+	skge_link_down(skge);
+
+	yukon_init(hw, port);
+}
+
+static void yukon_phy_intr(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	const char *reason = NULL;
+	u16 istatus, phystat;
+
+	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+	netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+		     "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
+
+	if (istatus & PHY_M_IS_AN_COMPL) {
+		if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
+		    & PHY_M_AN_RF) {
+			reason = "remote fault";
+			goto failed;
+		}
+
+		if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
+			reason = "master/slave fault";
+			goto failed;
+		}
+
+		if (!(phystat & PHY_M_PS_SPDUP_RES)) {
+			reason = "speed/duplex";
+			goto failed;
+		}
+
+		skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
+			? DUPLEX_FULL : DUPLEX_HALF;
+		skge->speed = yukon_speed(hw, phystat);
+
+		/* We are using IEEE 802.3z/D5.0 Table 37-4 */
+		switch (phystat & PHY_M_PS_PAUSE_MSK) {
+		case PHY_M_PS_PAUSE_MSK:
+			skge->flow_status = FLOW_STAT_SYMMETRIC;
+			break;
+		case PHY_M_PS_RX_P_EN:
+			skge->flow_status = FLOW_STAT_REM_SEND;
+			break;
+		case PHY_M_PS_TX_P_EN:
+			skge->flow_status = FLOW_STAT_LOC_SEND;
+			break;
+		default:
+			skge->flow_status = FLOW_STAT_NONE;
+		}
+
+		if (skge->flow_status == FLOW_STAT_NONE ||
+		    (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
+			skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+		else
+			skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+		yukon_link_up(skge);
+		return;
+	}
+
+	if (istatus & PHY_M_IS_LSP_CHANGE)
+		skge->speed = yukon_speed(hw, phystat);
+
+	if (istatus & PHY_M_IS_DUP_CHANGE)
+		skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+	if (istatus & PHY_M_IS_LST_CHANGE) {
+		if (phystat & PHY_M_PS_LINK_UP)
+			yukon_link_up(skge);
+		else
+			yukon_link_down(skge);
+	}
+	return;
+ failed:
+	pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
+
+	/* XXX restart autonegotiation? */
+}
+
+static void skge_phy_reset(struct skge_port *skge)
+{
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	struct net_device *dev = hw->dev[port];
+
+	netif_stop_queue(skge->netdev);
+	netif_carrier_off(skge->netdev);
+
+	spin_lock_bh(&hw->phy_lock);
+	if (is_genesis(hw)) {
+		genesis_reset(hw, port);
+		genesis_mac_init(hw, port);
+	} else {
+		yukon_reset(hw, port);
+		yukon_init(hw, port);
+	}
+	spin_unlock_bh(&hw->phy_lock);
+
+	skge_set_multicast(dev);
+}
+
+/* Basic MII support */
+static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int err = -EOPNOTSUPP;
+
+	if (!netif_running(dev))
+		return -ENODEV;	/* Phy still in reset */
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = hw->phy_addr;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u16 val = 0;
+		spin_lock_bh(&hw->phy_lock);
+
+		if (is_genesis(hw))
+			err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
+		else
+			err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
+		spin_unlock_bh(&hw->phy_lock);
+		data->val_out = val;
+		break;
+	}
+
+	case SIOCSMIIREG:
+		spin_lock_bh(&hw->phy_lock);
+		if (is_genesis(hw))
+			err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
+				   data->val_in);
+		else
+			err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
+				   data->val_in);
+		spin_unlock_bh(&hw->phy_lock);
+		break;
+	}
+	return err;
+}
+
+static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
+{
+	u32 end;
+
+	start /= 8;
+	len /= 8;
+	end = start + len - 1;
+
+	skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+	skge_write32(hw, RB_ADDR(q, RB_START), start);
+	skge_write32(hw, RB_ADDR(q, RB_WP), start);
+	skge_write32(hw, RB_ADDR(q, RB_RP), start);
+	skge_write32(hw, RB_ADDR(q, RB_END), end);
+
+	if (q == Q_R1 || q == Q_R2) {
+		/* Set thresholds on receive queue's */
+		skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
+			     start + (2*len)/3);
+		skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
+			     start + (len/3));
+	} else {
+		/* Enable store & forward on Tx queue's because
+		 * Tx FIFO is only 4K on Genesis and 1K on Yukon
+		 */
+		skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+	}
+
+	skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+}
+
+/* Setup Bus Memory Interface */
+static void skge_qset(struct skge_port *skge, u16 q,
+		      const struct skge_element *e)
+{
+	struct skge_hw *hw = skge->hw;
+	u32 watermark = 0x600;
+	u64 base = skge->dma + (e->desc - skge->mem);
+
+	/* optimization to reduce window on 32bit/33mhz */
+	if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
+		watermark /= 2;
+
+	skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
+	skge_write32(hw, Q_ADDR(q, Q_F), watermark);
+	skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
+	skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
+}
+
+static int skge_up(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	u32 chunk, ram_addr;
+	size_t rx_size, tx_size;
+	int err;
+
+	if (!is_valid_ether_addr(dev->dev_addr))
+		return -EINVAL;
+
+	netif_info(skge, ifup, skge->netdev, "enabling interface\n");
+
+	if (dev->mtu > RX_BUF_SIZE)
+		skge->rx_buf_size = dev->mtu + ETH_HLEN;
+	else
+		skge->rx_buf_size = RX_BUF_SIZE;
+
+
+	rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
+	tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
+	skge->mem_size = tx_size + rx_size;
+	skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
+	if (!skge->mem)
+		return -ENOMEM;
+
+	BUG_ON(skge->dma & 7);
+
+	if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
+		dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
+		err = -EINVAL;
+		goto free_pci_mem;
+	}
+
+	memset(skge->mem, 0, skge->mem_size);
+
+	err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
+	if (err)
+		goto free_pci_mem;
+
+	err = skge_rx_fill(dev);
+	if (err)
+		goto free_rx_ring;
+
+	err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
+			      skge->dma + rx_size);
+	if (err)
+		goto free_rx_ring;
+
+	if (hw->ports == 1) {
+		err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED,
+				  dev->name, hw);
+		if (err) {
+			netdev_err(dev, "Unable to allocate interrupt %d error: %d\n",
+				   hw->pdev->irq, err);
+			goto free_tx_ring;
+		}
+	}
+
+	/* Initialize MAC */
+	netif_carrier_off(dev);
+	spin_lock_bh(&hw->phy_lock);
+	if (is_genesis(hw))
+		genesis_mac_init(hw, port);
+	else
+		yukon_mac_init(hw, port);
+	spin_unlock_bh(&hw->phy_lock);
+
+	/* Configure RAMbuffers - equally between ports and tx/rx */
+	chunk = (hw->ram_size  - hw->ram_offset) / (hw->ports * 2);
+	ram_addr = hw->ram_offset + 2 * chunk * port;
+
+	skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
+	skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
+
+	BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
+	skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
+	skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
+
+	/* Start receiver BMU */
+	wmb();
+	skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
+	skge_led(skge, LED_MODE_ON);
+
+	spin_lock_irq(&hw->hw_lock);
+	hw->intr_mask |= portmask[port];
+	skge_write32(hw, B0_IMSK, hw->intr_mask);
+	skge_read32(hw, B0_IMSK);
+	spin_unlock_irq(&hw->hw_lock);
+
+	napi_enable(&skge->napi);
+
+	skge_set_multicast(dev);
+
+	return 0;
+
+ free_tx_ring:
+	kfree(skge->tx_ring.start);
+ free_rx_ring:
+	skge_rx_clean(skge);
+	kfree(skge->rx_ring.start);
+ free_pci_mem:
+	pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
+	skge->mem = NULL;
+
+	return err;
+}
+
+/* stop receiver */
+static void skge_rx_stop(struct skge_hw *hw, int port)
+{
+	skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
+	skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
+		     RB_RST_SET|RB_DIS_OP_MD);
+	skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
+}
+
+static int skge_down(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+
+	if (skge->mem == NULL)
+		return 0;
+
+	netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
+
+	netif_tx_disable(dev);
+
+	if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
+		del_timer_sync(&skge->link_timer);
+
+	napi_disable(&skge->napi);
+	netif_carrier_off(dev);
+
+	spin_lock_irq(&hw->hw_lock);
+	hw->intr_mask &= ~portmask[port];
+	skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask);
+	skge_read32(hw, B0_IMSK);
+	spin_unlock_irq(&hw->hw_lock);
+
+	if (hw->ports == 1)
+		free_irq(hw->pdev->irq, hw);
+
+	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+	if (is_genesis(hw))
+		genesis_stop(skge);
+	else
+		yukon_stop(skge);
+
+	/* Stop transmitter */
+	skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
+	skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+		     RB_RST_SET|RB_DIS_OP_MD);
+
+
+	/* Disable Force Sync bit and Enable Alloc bit */
+	skge_write8(hw, SK_REG(port, TXA_CTRL),
+		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
+	skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+	skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+	/* Reset PCI FIFO */
+	skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
+	skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+
+	/* Reset the RAM Buffer async Tx queue */
+	skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
+
+	skge_rx_stop(hw, port);
+
+	if (is_genesis(hw)) {
+		skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
+		skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
+	} else {
+		skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+		skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+	}
+
+	skge_led(skge, LED_MODE_OFF);
+
+	netif_tx_lock_bh(dev);
+	skge_tx_clean(dev);
+	netif_tx_unlock_bh(dev);
+
+	skge_rx_clean(skge);
+
+	kfree(skge->rx_ring.start);
+	kfree(skge->tx_ring.start);
+	pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
+	skge->mem = NULL;
+	return 0;
+}
+
+static inline int skge_avail(const struct skge_ring *ring)
+{
+	smp_mb();
+	return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
+		+ (ring->to_clean - ring->to_use) - 1;
+}
+
+static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
+				   struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	struct skge_element *e;
+	struct skge_tx_desc *td;
+	int i;
+	u32 control, len;
+	dma_addr_t map;
+
+	if (skb_padto(skb, ETH_ZLEN))
+		return NETDEV_TX_OK;
+
+	if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
+		return NETDEV_TX_BUSY;
+
+	e = skge->tx_ring.to_use;
+	td = e->desc;
+	BUG_ON(td->control & BMU_OWN);
+	e->skb = skb;
+	len = skb_headlen(skb);
+	map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(hw->pdev, map))
+		goto mapping_error;
+
+	dma_unmap_addr_set(e, mapaddr, map);
+	dma_unmap_len_set(e, maplen, len);
+
+	td->dma_lo = lower_32_bits(map);
+	td->dma_hi = upper_32_bits(map);
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		const int offset = skb_checksum_start_offset(skb);
+
+		/* This seems backwards, but it is what the sk98lin
+		 * does.  Looks like hardware is wrong?
+		 */
+		if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
+		    hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
+			control = BMU_TCP_CHECK;
+		else
+			control = BMU_UDP_CHECK;
+
+		td->csum_offs = 0;
+		td->csum_start = offset;
+		td->csum_write = offset + skb->csum_offset;
+	} else
+		control = BMU_CHECK;
+
+	if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
+		control |= BMU_EOF | BMU_IRQ_EOF;
+	else {
+		struct skge_tx_desc *tf = td;
+
+		control |= BMU_STFWD;
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+			map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+					       skb_frag_size(frag), DMA_TO_DEVICE);
+			if (dma_mapping_error(&hw->pdev->dev, map))
+				goto mapping_unwind;
+
+			e = e->next;
+			e->skb = skb;
+			tf = e->desc;
+			BUG_ON(tf->control & BMU_OWN);
+
+			tf->dma_lo = lower_32_bits(map);
+			tf->dma_hi = upper_32_bits(map);
+			dma_unmap_addr_set(e, mapaddr, map);
+			dma_unmap_len_set(e, maplen, skb_frag_size(frag));
+
+			tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
+		}
+		tf->control |= BMU_EOF | BMU_IRQ_EOF;
+	}
+	/* Make sure all the descriptors written */
+	wmb();
+	td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
+	wmb();
+
+	netdev_sent_queue(dev, skb->len);
+
+	skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
+
+	netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+		     "tx queued, slot %td, len %d\n",
+		     e - skge->tx_ring.start, skb->len);
+
+	skge->tx_ring.to_use = e->next;
+	smp_wmb();
+
+	if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
+		netdev_dbg(dev, "transmit queue full\n");
+		netif_stop_queue(dev);
+	}
+
+	return NETDEV_TX_OK;
+
+mapping_unwind:
+	e = skge->tx_ring.to_use;
+	pci_unmap_single(hw->pdev,
+			 dma_unmap_addr(e, mapaddr),
+			 dma_unmap_len(e, maplen),
+			 PCI_DMA_TODEVICE);
+	while (i-- > 0) {
+		e = e->next;
+		pci_unmap_page(hw->pdev,
+			       dma_unmap_addr(e, mapaddr),
+			       dma_unmap_len(e, maplen),
+			       PCI_DMA_TODEVICE);
+	}
+
+mapping_error:
+	if (net_ratelimit())
+		dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+
+/* Free resources associated with this reing element */
+static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
+				 u32 control)
+{
+	/* skb header vs. fragment */
+	if (control & BMU_STF)
+		pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
+				 dma_unmap_len(e, maplen),
+				 PCI_DMA_TODEVICE);
+	else
+		pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
+			       dma_unmap_len(e, maplen),
+			       PCI_DMA_TODEVICE);
+}
+
+/* Free all buffers in transmit ring */
+static void skge_tx_clean(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_element *e;
+
+	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+		struct skge_tx_desc *td = e->desc;
+
+		skge_tx_unmap(skge->hw->pdev, e, td->control);
+
+		if (td->control & BMU_EOF)
+			dev_kfree_skb(e->skb);
+		td->control = 0;
+	}
+
+	netdev_reset_queue(dev);
+	skge->tx_ring.to_clean = e;
+}
+
+static void skge_tx_timeout(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
+
+	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
+	skge_tx_clean(dev);
+	netif_wake_queue(dev);
+}
+
+static int skge_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int err;
+
+	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		dev->mtu = new_mtu;
+		return 0;
+	}
+
+	skge_down(dev);
+
+	dev->mtu = new_mtu;
+
+	err = skge_up(dev);
+	if (err)
+		dev_close(dev);
+
+	return err;
+}
+
+static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+static void genesis_add_filter(u8 filter[8], const u8 *addr)
+{
+	u32 crc, bit;
+
+	crc = ether_crc_le(ETH_ALEN, addr);
+	bit = ~crc & 0x3f;
+	filter[bit/8] |= 1 << (bit%8);
+}
+
+static void genesis_set_multicast(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	struct netdev_hw_addr *ha;
+	u32 mode;
+	u8 filter[8];
+
+	mode = xm_read32(hw, port, XM_MODE);
+	mode |= XM_MD_ENA_HASH;
+	if (dev->flags & IFF_PROMISC)
+		mode |= XM_MD_ENA_PROM;
+	else
+		mode &= ~XM_MD_ENA_PROM;
+
+	if (dev->flags & IFF_ALLMULTI)
+		memset(filter, 0xff, sizeof(filter));
+	else {
+		memset(filter, 0, sizeof(filter));
+
+		if (skge->flow_status == FLOW_STAT_REM_SEND ||
+		    skge->flow_status == FLOW_STAT_SYMMETRIC)
+			genesis_add_filter(filter, pause_mc_addr);
+
+		netdev_for_each_mc_addr(ha, dev)
+			genesis_add_filter(filter, ha->addr);
+	}
+
+	xm_write32(hw, port, XM_MODE, mode);
+	xm_outhash(hw, port, XM_HSM, filter);
+}
+
+static void yukon_add_filter(u8 filter[8], const u8 *addr)
+{
+	 u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
+	 filter[bit/8] |= 1 << (bit%8);
+}
+
+static void yukon_set_multicast(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	int port = skge->port;
+	struct netdev_hw_addr *ha;
+	int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
+			skge->flow_status == FLOW_STAT_SYMMETRIC);
+	u16 reg;
+	u8 filter[8];
+
+	memset(filter, 0, sizeof(filter));
+
+	reg = gma_read16(hw, port, GM_RX_CTRL);
+	reg |= GM_RXCR_UCF_ENA;
+
+	if (dev->flags & IFF_PROMISC) 		/* promiscuous */
+		reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+	else if (dev->flags & IFF_ALLMULTI)	/* all multicast */
+		memset(filter, 0xff, sizeof(filter));
+	else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
+		reg &= ~GM_RXCR_MCF_ENA;
+	else {
+		reg |= GM_RXCR_MCF_ENA;
+
+		if (rx_pause)
+			yukon_add_filter(filter, pause_mc_addr);
+
+		netdev_for_each_mc_addr(ha, dev)
+			yukon_add_filter(filter, ha->addr);
+	}
+
+
+	gma_write16(hw, port, GM_MC_ADDR_H1,
+			 (u16)filter[0] | ((u16)filter[1] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H2,
+			 (u16)filter[2] | ((u16)filter[3] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H3,
+			 (u16)filter[4] | ((u16)filter[5] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H4,
+			 (u16)filter[6] | ((u16)filter[7] << 8));
+
+	gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static inline u16 phy_length(const struct skge_hw *hw, u32 status)
+{
+	if (is_genesis(hw))
+		return status >> XMR_FS_LEN_SHIFT;
+	else
+		return status >> GMR_FS_LEN_SHIFT;
+}
+
+static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
+{
+	if (is_genesis(hw))
+		return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
+	else
+		return (status & GMR_FS_ANY_ERR) ||
+			(status & GMR_FS_RX_OK) == 0;
+}
+
+static void skge_set_multicast(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	if (is_genesis(skge->hw))
+		genesis_set_multicast(dev);
+	else
+		yukon_set_multicast(dev);
+
+}
+
+
+/* Get receive buffer from descriptor.
+ * Handles copy of small buffers and reallocation failures
+ */
+static struct sk_buff *skge_rx_get(struct net_device *dev,
+				   struct skge_element *e,
+				   u32 control, u32 status, u16 csum)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct sk_buff *skb;
+	u16 len = control & BMU_BBC;
+
+	netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+		     "rx slot %td status 0x%x len %d\n",
+		     e - skge->rx_ring.start, status, len);
+
+	if (len > skge->rx_buf_size)
+		goto error;
+
+	if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
+		goto error;
+
+	if (bad_phy_status(skge->hw, status))
+		goto error;
+
+	if (phy_length(skge->hw, status) != len)
+		goto error;
+
+	if (len < RX_COPY_THRESHOLD) {
+		skb = netdev_alloc_skb_ip_align(dev, len);
+		if (!skb)
+			goto resubmit;
+
+		pci_dma_sync_single_for_cpu(skge->hw->pdev,
+					    dma_unmap_addr(e, mapaddr),
+					    dma_unmap_len(e, maplen),
+					    PCI_DMA_FROMDEVICE);
+		skb_copy_from_linear_data(e->skb, skb->data, len);
+		pci_dma_sync_single_for_device(skge->hw->pdev,
+					       dma_unmap_addr(e, mapaddr),
+					       dma_unmap_len(e, maplen),
+					       PCI_DMA_FROMDEVICE);
+		skge_rx_reuse(e, skge->rx_buf_size);
+	} else {
+		struct skge_element ee;
+		struct sk_buff *nskb;
+
+		nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
+		if (!nskb)
+			goto resubmit;
+
+		ee = *e;
+
+		skb = ee.skb;
+		prefetch(skb->data);
+
+		if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+			dev_kfree_skb(nskb);
+			goto resubmit;
+		}
+
+		pci_unmap_single(skge->hw->pdev,
+				 dma_unmap_addr(&ee, mapaddr),
+				 dma_unmap_len(&ee, maplen),
+				 PCI_DMA_FROMDEVICE);
+	}
+
+	skb_put(skb, len);
+
+	if (dev->features & NETIF_F_RXCSUM) {
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+
+	skb->protocol = eth_type_trans(skb, dev);
+
+	return skb;
+error:
+
+	netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+		     "rx err, slot %td control 0x%x status 0x%x\n",
+		     e - skge->rx_ring.start, control, status);
+
+	if (is_genesis(skge->hw)) {
+		if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
+			dev->stats.rx_length_errors++;
+		if (status & XMR_FS_FRA_ERR)
+			dev->stats.rx_frame_errors++;
+		if (status & XMR_FS_FCS_ERR)
+			dev->stats.rx_crc_errors++;
+	} else {
+		if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
+			dev->stats.rx_length_errors++;
+		if (status & GMR_FS_FRAGMENT)
+			dev->stats.rx_frame_errors++;
+		if (status & GMR_FS_CRC_ERR)
+			dev->stats.rx_crc_errors++;
+	}
+
+resubmit:
+	skge_rx_reuse(e, skge->rx_buf_size);
+	return NULL;
+}
+
+/* Free all buffers in Tx ring which are no longer owned by device */
+static void skge_tx_done(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_ring *ring = &skge->tx_ring;
+	struct skge_element *e;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
+
+	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
+		u32 control = ((const struct skge_tx_desc *) e->desc)->control;
+
+		if (control & BMU_OWN)
+			break;
+
+		skge_tx_unmap(skge->hw->pdev, e, control);
+
+		if (control & BMU_EOF) {
+			netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+				     "tx done slot %td\n",
+				     e - skge->tx_ring.start);
+
+			pkts_compl++;
+			bytes_compl += e->skb->len;
+
+			dev_consume_skb_any(e->skb);
+		}
+	}
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
+	skge->tx_ring.to_clean = e;
+
+	/* Can run lockless until we need to synchronize to restart queue. */
+	smp_mb();
+
+	if (unlikely(netif_queue_stopped(dev) &&
+		     skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+		netif_tx_lock(dev);
+		if (unlikely(netif_queue_stopped(dev) &&
+			     skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+			netif_wake_queue(dev);
+
+		}
+		netif_tx_unlock(dev);
+	}
+}
+
+static int skge_poll(struct napi_struct *napi, int to_do)
+{
+	struct skge_port *skge = container_of(napi, struct skge_port, napi);
+	struct net_device *dev = skge->netdev;
+	struct skge_hw *hw = skge->hw;
+	struct skge_ring *ring = &skge->rx_ring;
+	struct skge_element *e;
+	int work_done = 0;
+
+	skge_tx_done(dev);
+
+	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+	for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
+		struct skge_rx_desc *rd = e->desc;
+		struct sk_buff *skb;
+		u32 control;
+
+		rmb();
+		control = rd->control;
+		if (control & BMU_OWN)
+			break;
+
+		skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
+		if (likely(skb)) {
+			napi_gro_receive(napi, skb);
+			++work_done;
+		}
+	}
+	ring->to_clean = e;
+
+	/* restart receiver */
+	wmb();
+	skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
+
+	if (work_done < to_do) {
+		unsigned long flags;
+
+		napi_gro_flush(napi, false);
+		spin_lock_irqsave(&hw->hw_lock, flags);
+		__napi_complete(napi);
+		hw->intr_mask |= napimask[skge->port];
+		skge_write32(hw, B0_IMSK, hw->intr_mask);
+		skge_read32(hw, B0_IMSK);
+		spin_unlock_irqrestore(&hw->hw_lock, flags);
+	}
+
+	return work_done;
+}
+
+/* Parity errors seem to happen when Genesis is connected to a switch
+ * with no other ports present. Heartbeat error??
+ */
+static void skge_mac_parity(struct skge_hw *hw, int port)
+{
+	struct net_device *dev = hw->dev[port];
+
+	++dev->stats.tx_heartbeat_errors;
+
+	if (is_genesis(hw))
+		skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+			     MFF_CLR_PERR);
+	else
+		/* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
+		skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
+			    (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
+			    ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
+}
+
+static void skge_mac_intr(struct skge_hw *hw, int port)
+{
+	if (is_genesis(hw))
+		genesis_mac_intr(hw, port);
+	else
+		yukon_mac_intr(hw, port);
+}
+
+/* Handle device specific framing and timeout interrupts */
+static void skge_error_irq(struct skge_hw *hw)
+{
+	struct pci_dev *pdev = hw->pdev;
+	u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
+
+	if (is_genesis(hw)) {
+		/* clear xmac errors */
+		if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
+			skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
+		if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
+			skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
+	} else {
+		/* Timestamp (unused) overflow */
+		if (hwstatus & IS_IRQ_TIST_OV)
+			skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+	}
+
+	if (hwstatus & IS_RAM_RD_PAR) {
+		dev_err(&pdev->dev, "Ram read data parity error\n");
+		skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
+	}
+
+	if (hwstatus & IS_RAM_WR_PAR) {
+		dev_err(&pdev->dev, "Ram write data parity error\n");
+		skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
+	}
+
+	if (hwstatus & IS_M1_PAR_ERR)
+		skge_mac_parity(hw, 0);
+
+	if (hwstatus & IS_M2_PAR_ERR)
+		skge_mac_parity(hw, 1);
+
+	if (hwstatus & IS_R1_PAR_ERR) {
+		dev_err(&pdev->dev, "%s: receive queue parity error\n",
+			hw->dev[0]->name);
+		skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
+	}
+
+	if (hwstatus & IS_R2_PAR_ERR) {
+		dev_err(&pdev->dev, "%s: receive queue parity error\n",
+			hw->dev[1]->name);
+		skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
+	}
+
+	if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
+		u16 pci_status, pci_cmd;
+
+		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+		pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+		dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
+			pci_cmd, pci_status);
+
+		/* Write the error bits back to clear them. */
+		pci_status &= PCI_STATUS_ERROR_BITS;
+		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		pci_write_config_word(pdev, PCI_COMMAND,
+				      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+		pci_write_config_word(pdev, PCI_STATUS, pci_status);
+		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+		/* if error still set then just ignore it */
+		hwstatus = skge_read32(hw, B0_HWE_ISRC);
+		if (hwstatus & IS_IRQ_STAT) {
+			dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
+			hw->intr_mask &= ~IS_HW_ERR;
+		}
+	}
+}
+
+/*
+ * Interrupt from PHY are handled in tasklet (softirq)
+ * because accessing phy registers requires spin wait which might
+ * cause excess interrupt latency.
+ */
+static void skge_extirq(unsigned long arg)
+{
+	struct skge_hw *hw = (struct skge_hw *) arg;
+	int port;
+
+	for (port = 0; port < hw->ports; port++) {
+		struct net_device *dev = hw->dev[port];
+
+		if (netif_running(dev)) {
+			struct skge_port *skge = netdev_priv(dev);
+
+			spin_lock(&hw->phy_lock);
+			if (!is_genesis(hw))
+				yukon_phy_intr(skge);
+			else if (hw->phy_type == SK_PHY_BCOM)
+				bcom_phy_intr(skge);
+			spin_unlock(&hw->phy_lock);
+		}
+	}
+
+	spin_lock_irq(&hw->hw_lock);
+	hw->intr_mask |= IS_EXT_REG;
+	skge_write32(hw, B0_IMSK, hw->intr_mask);
+	skge_read32(hw, B0_IMSK);
+	spin_unlock_irq(&hw->hw_lock);
+}
+
+static irqreturn_t skge_intr(int irq, void *dev_id)
+{
+	struct skge_hw *hw = dev_id;
+	u32 status;
+	int handled = 0;
+
+	spin_lock(&hw->hw_lock);
+	/* Reading this register masks IRQ */
+	status = skge_read32(hw, B0_SP_ISRC);
+	if (status == 0 || status == ~0)
+		goto out;
+
+	handled = 1;
+	status &= hw->intr_mask;
+	if (status & IS_EXT_REG) {
+		hw->intr_mask &= ~IS_EXT_REG;
+		tasklet_schedule(&hw->phy_task);
+	}
+
+	if (status & (IS_XA1_F|IS_R1_F)) {
+		struct skge_port *skge = netdev_priv(hw->dev[0]);
+		hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
+		napi_schedule(&skge->napi);
+	}
+
+	if (status & IS_PA_TO_TX1)
+		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
+
+	if (status & IS_PA_TO_RX1) {
+		++hw->dev[0]->stats.rx_over_errors;
+		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
+	}
+
+
+	if (status & IS_MAC1)
+		skge_mac_intr(hw, 0);
+
+	if (hw->dev[1]) {
+		struct skge_port *skge = netdev_priv(hw->dev[1]);
+
+		if (status & (IS_XA2_F|IS_R2_F)) {
+			hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
+			napi_schedule(&skge->napi);
+		}
+
+		if (status & IS_PA_TO_RX2) {
+			++hw->dev[1]->stats.rx_over_errors;
+			skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+		}
+
+		if (status & IS_PA_TO_TX2)
+			skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+		if (status & IS_MAC2)
+			skge_mac_intr(hw, 1);
+	}
+
+	if (status & IS_HW_ERR)
+		skge_error_irq(hw);
+out:
+	skge_write32(hw, B0_IMSK, hw->intr_mask);
+	skge_read32(hw, B0_IMSK);
+	spin_unlock(&hw->hw_lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void skge_netpoll(struct net_device *dev)
+{
+	struct skge_port *skge = netdev_priv(dev);
+
+	disable_irq(dev->irq);
+	skge_intr(dev->irq, skge->hw);
+	enable_irq(dev->irq);
+}
+#endif
+
+static int skge_set_mac_address(struct net_device *dev, void *p)
+{
+	struct skge_port *skge = netdev_priv(dev);
+	struct skge_hw *hw = skge->hw;
+	unsigned port = skge->port;
+	const struct sockaddr *addr = p;
+	u16 ctrl;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+	if (!netif_running(dev)) {
+		memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
+		memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
+	} else {
+		/* disable Rx */
+		spin_lock_bh(&hw->phy_lock);
+		ctrl = gma_read16(hw, port, GM_GP_CTRL);
+		gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
+
+		memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
+		memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
+
+		if (is_genesis(hw))
+			xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+		else {
+			gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+			gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+		}
+
+		gma_write16(hw, port, GM_GP_CTRL, ctrl);
+		spin_unlock_bh(&hw->phy_lock);
+	}
+
+	return 0;
+}
+
+static const struct {
+	u8 id;
+	const char *name;
+} skge_chips[] = {
+	{ CHIP_ID_GENESIS,	"Genesis" },
+	{ CHIP_ID_YUKON,	 "Yukon" },
+	{ CHIP_ID_YUKON_LITE,	 "Yukon-Lite"},
+	{ CHIP_ID_YUKON_LP,	 "Yukon-LP"},
+};
+
+static const char *skge_board_name(const struct skge_hw *hw)
+{
+	int i;
+	static char buf[16];
+
+	for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
+		if (skge_chips[i].id == hw->chip_id)
+			return skge_chips[i].name;
+
+	snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
+	return buf;
+}
+
+
+/*
+ * Setup the board data structure, but don't bring up
+ * the port(s)
+ */
+static int skge_reset(struct skge_hw *hw)
+{
+	u32 reg;
+	u16 ctst, pci_status;
+	u8 t8, mac_cfg, pmd_type;
+	int i;
+
+	ctst = skge_read16(hw, B0_CTST);
+
+	/* do a SW reset */
+	skge_write8(hw, B0_CTST, CS_RST_SET);
+	skge_write8(hw, B0_CTST, CS_RST_CLR);
+
+	/* clear PCI errors, if any */
+	skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+	skge_write8(hw, B2_TST_CTRL2, 0);
+
+	pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
+	pci_write_config_word(hw->pdev, PCI_STATUS,
+			      pci_status | PCI_STATUS_ERROR_BITS);
+	skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+	skge_write8(hw, B0_CTST, CS_MRST_CLR);
+
+	/* restore CLK_RUN bits (for Yukon-Lite) */
+	skge_write16(hw, B0_CTST,
+		     ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
+
+	hw->chip_id = skge_read8(hw, B2_CHIP_ID);
+	hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
+	pmd_type = skge_read8(hw, B2_PMD_TYP);
+	hw->copper = (pmd_type == 'T' || pmd_type == '1');
+
+	switch (hw->chip_id) {
+	case CHIP_ID_GENESIS:
+#ifdef CONFIG_SKGE_GENESIS
+		switch (hw->phy_type) {
+		case SK_PHY_XMAC:
+			hw->phy_addr = PHY_ADDR_XMAC;
+			break;
+		case SK_PHY_BCOM:
+			hw->phy_addr = PHY_ADDR_BCOM;
+			break;
+		default:
+			dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
+			       hw->phy_type);
+			return -EOPNOTSUPP;
+		}
+		break;
+#else
+		dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n");
+		return -EOPNOTSUPP;
+#endif
+
+	case CHIP_ID_YUKON:
+	case CHIP_ID_YUKON_LITE:
+	case CHIP_ID_YUKON_LP:
+		if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
+			hw->copper = 1;
+
+		hw->phy_addr = PHY_ADDR_MARV;
+		break;
+
+	default:
+		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+		       hw->chip_id);
+		return -EOPNOTSUPP;
+	}
+
+	mac_cfg = skge_read8(hw, B2_MAC_CFG);
+	hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
+	hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
+
+	/* read the adapters RAM size */
+	t8 = skge_read8(hw, B2_E_0);
+	if (is_genesis(hw)) {
+		if (t8 == 3) {
+			/* special case: 4 x 64k x 36, offset = 0x80000 */
+			hw->ram_size = 0x100000;
+			hw->ram_offset = 0x80000;
+		} else
+			hw->ram_size = t8 * 512;
+	} else if (t8 == 0)
+		hw->ram_size = 0x20000;
+	else
+		hw->ram_size = t8 * 4096;
+
+	hw->intr_mask = IS_HW_ERR;
+
+	/* Use PHY IRQ for all but fiber based Genesis board */
+	if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC))
+		hw->intr_mask |= IS_EXT_REG;
+
+	if (is_genesis(hw))
+		genesis_init(hw);
+	else {
+		/* switch power to VCC (WA for VAUX problem) */
+		skge_write8(hw, B0_POWER_CTRL,
+			    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+		/* avoid boards with stuck Hardware error bits */
+		if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
+		    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
+			dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
+			hw->intr_mask &= ~IS_HW_ERR;
+		}
+
+		/* Clear PHY COMA */
+		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
+		reg &= ~PCI_PHY_COMA;
+		pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
+		skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+
+		for (i = 0; i < hw->ports; i++) {
+			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+		}
+	}
+
+	/* turn off hardware timer (unused) */
+	skge_write8(hw, B2_TI_CTRL, TIM_STOP);
+	skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+	skge_write8(hw, B0_LED, LED_STAT_ON);
+
+	/* enable the Tx Arbiters */
+	for (i = 0; i < hw->ports; i++)
+		skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+	/* Initialize ram interface */
+	skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
+
+	skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
+	skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
+
+	skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
+
+	/* Set interrupt moderation for Transmit only
+	 * Receive interrupts avoided by NAPI
+	 */
+	skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
+	skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
+	skge_write32(hw, B2_IRQM_CTRL, TIM_START);
+
+	/* Leave irq disabled until first port is brought up. */
+	skge_write32(hw, B0_IMSK, 0);
+
+	for (i = 0; i < hw->ports; i++) {
+		if (is_genesis(hw))
+			genesis_reset(hw, i);
+		else
+			yukon_reset(hw, i);
+	}
+
+	return 0;
+}
+
+
+#ifdef CONFIG_SKGE_DEBUG
+
+static struct dentry *skge_debug;
+
+static int skge_debug_show(struct seq_file *seq, void *v)
+{
+	struct net_device *dev = seq->private;
+	const struct skge_port *skge = netdev_priv(dev);
+	const struct skge_hw *hw = skge->hw;
+	const struct skge_element *e;
+
+	if (!netif_running(dev))
+		return -ENETDOWN;
+
+	seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC),
+		   skge_read32(hw, B0_IMSK));
+
+	seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
+	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+		const struct skge_tx_desc *t = e->desc;
+		seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
+			   t->control, t->dma_hi, t->dma_lo, t->status,
+			   t->csum_offs, t->csum_write, t->csum_start);
+	}
+
+	seq_printf(seq, "\nRx Ring:\n");
+	for (e = skge->rx_ring.to_clean; ; e = e->next) {
+		const struct skge_rx_desc *r = e->desc;
+
+		if (r->control & BMU_OWN)
+			break;
+
+		seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
+			   r->control, r->dma_hi, r->dma_lo, r->status,
+			   r->timestamp, r->csum1, r->csum1_start);
+	}
+
+	return 0;
+}
+
+static int skge_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, skge_debug_show, inode->i_private);
+}
+
+static const struct file_operations skge_debug_fops = {
+	.owner		= THIS_MODULE,
+	.open		= skge_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*
+ * Use network device events to create/remove/rename
+ * debugfs file entries
+ */
+static int skge_device_event(struct notifier_block *unused,
+			     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct skge_port *skge;
+	struct dentry *d;
+
+	if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
+		goto done;
+
+	skge = netdev_priv(dev);
+	switch (event) {
+	case NETDEV_CHANGENAME:
+		if (skge->debugfs) {
+			d = debugfs_rename(skge_debug, skge->debugfs,
+					   skge_debug, dev->name);
+			if (d)
+				skge->debugfs = d;
+			else {
+				netdev_info(dev, "rename failed\n");
+				debugfs_remove(skge->debugfs);
+			}
+		}
+		break;
+
+	case NETDEV_GOING_DOWN:
+		if (skge->debugfs) {
+			debugfs_remove(skge->debugfs);
+			skge->debugfs = NULL;
+		}
+		break;
+
+	case NETDEV_UP:
+		d = debugfs_create_file(dev->name, S_IRUGO,
+					skge_debug, dev,
+					&skge_debug_fops);
+		if (!d || IS_ERR(d))
+			netdev_info(dev, "debugfs create failed\n");
+		else
+			skge->debugfs = d;
+		break;
+	}
+
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block skge_notifier = {
+	.notifier_call = skge_device_event,
+};
+
+
+static __init void skge_debug_init(void)
+{
+	struct dentry *ent;
+
+	ent = debugfs_create_dir("skge", NULL);
+	if (!ent || IS_ERR(ent)) {
+		pr_info("debugfs create directory failed\n");
+		return;
+	}
+
+	skge_debug = ent;
+	register_netdevice_notifier(&skge_notifier);
+}
+
+static __exit void skge_debug_cleanup(void)
+{
+	if (skge_debug) {
+		unregister_netdevice_notifier(&skge_notifier);
+		debugfs_remove(skge_debug);
+		skge_debug = NULL;
+	}
+}
+
+#else
+#define skge_debug_init()
+#define skge_debug_cleanup()
+#endif
+
+static const struct net_device_ops skge_netdev_ops = {
+	.ndo_open		= skge_up,
+	.ndo_stop		= skge_down,
+	.ndo_start_xmit		= skge_xmit_frame,
+	.ndo_do_ioctl		= skge_ioctl,
+	.ndo_get_stats		= skge_get_stats,
+	.ndo_tx_timeout		= skge_tx_timeout,
+	.ndo_change_mtu		= skge_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_rx_mode	= skge_set_multicast,
+	.ndo_set_mac_address	= skge_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= skge_netpoll,
+#endif
+};
+
+
+/* Initialize network device */
+static struct net_device *skge_devinit(struct skge_hw *hw, int port,
+				       int highmem)
+{
+	struct skge_port *skge;
+	struct net_device *dev = alloc_etherdev(sizeof(*skge));
+
+	if (!dev)
+		return NULL;
+
+	SET_NETDEV_DEV(dev, &hw->pdev->dev);
+	dev->netdev_ops = &skge_netdev_ops;
+	dev->ethtool_ops = &skge_ethtool_ops;
+	dev->watchdog_timeo = TX_WATCHDOG;
+	dev->irq = hw->pdev->irq;
+
+	if (highmem)
+		dev->features |= NETIF_F_HIGHDMA;
+
+	skge = netdev_priv(dev);
+	netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
+	skge->netdev = dev;
+	skge->hw = hw;
+	skge->msg_enable = netif_msg_init(debug, default_msg);
+
+	skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
+	skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
+
+	/* Auto speed and flow control */
+	skge->autoneg = AUTONEG_ENABLE;
+	skge->flow_control = FLOW_MODE_SYM_OR_REM;
+	skge->duplex = -1;
+	skge->speed = -1;
+	skge->advertising = skge_supported_modes(hw);
+
+	if (device_can_wakeup(&hw->pdev->dev)) {
+		skge->wol = wol_supported(hw) & WAKE_MAGIC;
+		device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+	}
+
+	hw->dev[port] = dev;
+
+	skge->port = port;
+
+	/* Only used for Genesis XMAC */
+	if (is_genesis(hw))
+	    setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+	else {
+		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+		                   NETIF_F_RXCSUM;
+		dev->features |= dev->hw_features;
+	}
+
+	/* read the mac address */
+	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+
+	return dev;
+}
+
+static void skge_show_addr(struct net_device *dev)
+{
+	const struct skge_port *skge = netdev_priv(dev);
+
+	netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
+}
+
+static int only_32bit_dma;
+
+static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *dev, *dev1;
+	struct skge_hw *hw;
+	int err, using_dac = 0;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto err_out;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+		goto err_out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		using_dac = 1;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	} else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+		using_dac = 0;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	}
+
+	if (err) {
+		dev_err(&pdev->dev, "no usable DMA configuration\n");
+		goto err_out_free_regions;
+	}
+
+#ifdef __BIG_ENDIAN
+	/* byte swap descriptors in hardware */
+	{
+		u32 reg;
+
+		pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+		reg |= PCI_REV_DESC;
+		pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+	}
+#endif
+
+	err = -ENOMEM;
+	/* space for skge@pci:0000:04:00.0 */
+	hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+		     + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
+	if (!hw)
+		goto err_out_free_regions;
+
+	sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
+
+	hw->pdev = pdev;
+	spin_lock_init(&hw->hw_lock);
+	spin_lock_init(&hw->phy_lock);
+	tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
+
+	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+	if (!hw->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		goto err_out_free_hw;
+	}
+
+	err = skge_reset(hw);
+	if (err)
+		goto err_out_iounmap;
+
+	pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
+		DRV_VERSION,
+		(unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
+		skge_board_name(hw), hw->chip_rev);
+
+	dev = skge_devinit(hw, 0, using_dac);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_out_led_off;
+	}
+
+	/* Some motherboards are broken and has zero in ROM. */
+	if (!is_valid_ether_addr(dev->dev_addr))
+		dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register net device\n");
+		goto err_out_free_netdev;
+	}
+
+	skge_show_addr(dev);
+
+	if (hw->ports > 1) {
+		dev1 = skge_devinit(hw, 1, using_dac);
+		if (!dev1) {
+			err = -ENOMEM;
+			goto err_out_unregister;
+		}
+
+		err = register_netdev(dev1);
+		if (err) {
+			dev_err(&pdev->dev, "cannot register second net device\n");
+			goto err_out_free_dev1;
+		}
+
+		err = request_irq(pdev->irq, skge_intr, IRQF_SHARED,
+				  hw->irq_name, hw);
+		if (err) {
+			dev_err(&pdev->dev, "cannot assign irq %d\n",
+				pdev->irq);
+			goto err_out_unregister_dev1;
+		}
+
+		skge_show_addr(dev1);
+	}
+	pci_set_drvdata(pdev, hw);
+
+	return 0;
+
+err_out_unregister_dev1:
+	unregister_netdev(dev1);
+err_out_free_dev1:
+	free_netdev(dev1);
+err_out_unregister:
+	unregister_netdev(dev);
+err_out_free_netdev:
+	free_netdev(dev);
+err_out_led_off:
+	skge_write16(hw, B0_LED, LED_STAT_OFF);
+err_out_iounmap:
+	iounmap(hw->regs);
+err_out_free_hw:
+	kfree(hw);
+err_out_free_regions:
+	pci_release_regions(pdev);
+err_out_disable_pdev:
+	pci_disable_device(pdev);
+err_out:
+	return err;
+}
+
+static void skge_remove(struct pci_dev *pdev)
+{
+	struct skge_hw *hw  = pci_get_drvdata(pdev);
+	struct net_device *dev0, *dev1;
+
+	if (!hw)
+		return;
+
+	dev1 = hw->dev[1];
+	if (dev1)
+		unregister_netdev(dev1);
+	dev0 = hw->dev[0];
+	unregister_netdev(dev0);
+
+	tasklet_kill(&hw->phy_task);
+
+	spin_lock_irq(&hw->hw_lock);
+	hw->intr_mask = 0;
+
+	if (hw->ports > 1) {
+		skge_write32(hw, B0_IMSK, 0);
+		skge_read32(hw, B0_IMSK);
+		free_irq(pdev->irq, hw);
+	}
+	spin_unlock_irq(&hw->hw_lock);
+
+	skge_write16(hw, B0_LED, LED_STAT_OFF);
+	skge_write8(hw, B0_CTST, CS_RST_SET);
+
+	if (hw->ports > 1)
+		free_irq(pdev->irq, hw);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	if (dev1)
+		free_netdev(dev1);
+	free_netdev(dev0);
+
+	iounmap(hw->regs);
+	kfree(hw);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int skge_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct skge_hw *hw  = pci_get_drvdata(pdev);
+	int i;
+
+	if (!hw)
+		return 0;
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct skge_port *skge = netdev_priv(dev);
+
+		if (netif_running(dev))
+			skge_down(dev);
+
+		if (skge->wol)
+			skge_wol_init(skge);
+	}
+
+	skge_write32(hw, B0_IMSK, 0);
+
+	return 0;
+}
+
+static int skge_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct skge_hw *hw  = pci_get_drvdata(pdev);
+	int i, err;
+
+	if (!hw)
+		return 0;
+
+	err = skge_reset(hw);
+	if (err)
+		goto out;
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+
+		if (netif_running(dev)) {
+			err = skge_up(dev);
+
+			if (err) {
+				netdev_err(dev, "could not up: %d\n", err);
+				dev_close(dev);
+				goto out;
+			}
+		}
+	}
+out:
+	return err;
+}
+
+static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
+#define SKGE_PM_OPS (&skge_pm_ops)
+
+#else
+
+#define SKGE_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static void skge_shutdown(struct pci_dev *pdev)
+{
+	struct skge_hw *hw  = pci_get_drvdata(pdev);
+	int i;
+
+	if (!hw)
+		return;
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct skge_port *skge = netdev_priv(dev);
+
+		if (skge->wol)
+			skge_wol_init(skge);
+	}
+
+	pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+	pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static struct pci_driver skge_driver = {
+	.name =         DRV_NAME,
+	.id_table =     skge_id_table,
+	.probe =        skge_probe,
+	.remove =       skge_remove,
+	.shutdown =	skge_shutdown,
+	.driver.pm =	SKGE_PM_OPS,
+};
+
+static struct dmi_system_id skge_32bit_dma_boards[] = {
+	{
+		.ident = "Gigabyte nForce boards",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+			DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+		},
+	},
+	{
+		.ident = "ASUS P5NSLI",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
+		},
+	},
+	{
+		.ident = "FUJITSU SIEMENS A8NE-FM",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
+		},
+	},
+	{}
+};
+
+static int __init skge_init_module(void)
+{
+	if (dmi_check_system(skge_32bit_dma_boards))
+		only_32bit_dma = 1;
+	skge_debug_init();
+	return pci_register_driver(&skge_driver);
+}
+
+static void __exit skge_cleanup_module(void)
+{
+	pci_unregister_driver(&skge_driver);
+	skge_debug_cleanup();
+}
+
+module_init(skge_init_module);
+module_exit(skge_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
new file mode 100644
index 0000000..a2eb341
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -0,0 +1,2584 @@
+/*
+ * Definitions for the new Marvell Yukon / SysKonnect driver.
+ */
+#ifndef _SKGE_H
+#define _SKGE_H
+#include <linux/interrupt.h>
+
+/* PCI config registers */
+#define PCI_DEV_REG1	0x40
+#define  PCI_PHY_COMA	0x8000000
+#define  PCI_VIO	0x2000000
+
+#define PCI_DEV_REG2	0x44
+#define  PCI_VPD_ROM_SZ	7L<<14	/* VPD ROM size 0=256, 1=512, ... */
+#define  PCI_REV_DESC	1<<2	/* Reverse Descriptor bytes */
+
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+			       PCI_STATUS_SIG_SYSTEM_ERROR | \
+			       PCI_STATUS_REC_MASTER_ABORT | \
+			       PCI_STATUS_REC_TARGET_ABORT | \
+			       PCI_STATUS_PARITY)
+
+enum csr_regs {
+	B0_RAP	= 0x0000,
+	B0_CTST	= 0x0004,
+	B0_LED	= 0x0006,
+	B0_POWER_CTRL	= 0x0007,
+	B0_ISRC	= 0x0008,
+	B0_IMSK	= 0x000c,
+	B0_HWE_ISRC	= 0x0010,
+	B0_HWE_IMSK	= 0x0014,
+	B0_SP_ISRC	= 0x0018,
+	B0_XM1_IMSK	= 0x0020,
+	B0_XM1_ISRC	= 0x0028,
+	B0_XM1_PHY_ADDR	= 0x0030,
+	B0_XM1_PHY_DATA	= 0x0034,
+	B0_XM2_IMSK	= 0x0040,
+	B0_XM2_ISRC	= 0x0048,
+	B0_XM2_PHY_ADDR	= 0x0050,
+	B0_XM2_PHY_DATA	= 0x0054,
+	B0_R1_CSR	= 0x0060,
+	B0_R2_CSR	= 0x0064,
+	B0_XS1_CSR	= 0x0068,
+	B0_XA1_CSR	= 0x006c,
+	B0_XS2_CSR	= 0x0070,
+	B0_XA2_CSR	= 0x0074,
+
+	B2_MAC_1	= 0x0100,
+	B2_MAC_2	= 0x0108,
+	B2_MAC_3	= 0x0110,
+	B2_CONN_TYP	= 0x0118,
+	B2_PMD_TYP	= 0x0119,
+	B2_MAC_CFG	= 0x011a,
+	B2_CHIP_ID	= 0x011b,
+	B2_E_0		= 0x011c,
+	B2_E_1		= 0x011d,
+	B2_E_2		= 0x011e,
+	B2_E_3		= 0x011f,
+	B2_FAR		= 0x0120,
+	B2_FDP		= 0x0124,
+	B2_LD_CTRL	= 0x0128,
+	B2_LD_TEST	= 0x0129,
+	B2_TI_INI	= 0x0130,
+	B2_TI_VAL	= 0x0134,
+	B2_TI_CTRL	= 0x0138,
+	B2_TI_TEST	= 0x0139,
+	B2_IRQM_INI	= 0x0140,
+	B2_IRQM_VAL	= 0x0144,
+	B2_IRQM_CTRL	= 0x0148,
+	B2_IRQM_TEST	= 0x0149,
+	B2_IRQM_MSK	= 0x014c,
+	B2_IRQM_HWE_MSK	= 0x0150,
+	B2_TST_CTRL1	= 0x0158,
+	B2_TST_CTRL2	= 0x0159,
+	B2_GP_IO	= 0x015c,
+	B2_I2C_CTRL	= 0x0160,
+	B2_I2C_DATA	= 0x0164,
+	B2_I2C_IRQ	= 0x0168,
+	B2_I2C_SW	= 0x016c,
+	B2_BSC_INI	= 0x0170,
+	B2_BSC_VAL	= 0x0174,
+	B2_BSC_CTRL	= 0x0178,
+	B2_BSC_STAT	= 0x0179,
+	B2_BSC_TST	= 0x017a,
+
+	B3_RAM_ADDR	= 0x0180,
+	B3_RAM_DATA_LO	= 0x0184,
+	B3_RAM_DATA_HI	= 0x0188,
+	B3_RI_WTO_R1	= 0x0190,
+	B3_RI_WTO_XA1	= 0x0191,
+	B3_RI_WTO_XS1	= 0x0192,
+	B3_RI_RTO_R1	= 0x0193,
+	B3_RI_RTO_XA1	= 0x0194,
+	B3_RI_RTO_XS1	= 0x0195,
+	B3_RI_WTO_R2	= 0x0196,
+	B3_RI_WTO_XA2	= 0x0197,
+	B3_RI_WTO_XS2	= 0x0198,
+	B3_RI_RTO_R2	= 0x0199,
+	B3_RI_RTO_XA2	= 0x019a,
+	B3_RI_RTO_XS2	= 0x019b,
+	B3_RI_TO_VAL	= 0x019c,
+	B3_RI_CTRL	= 0x01a0,
+	B3_RI_TEST	= 0x01a2,
+	B3_MA_TOINI_RX1	= 0x01b0,
+	B3_MA_TOINI_RX2	= 0x01b1,
+	B3_MA_TOINI_TX1	= 0x01b2,
+	B3_MA_TOINI_TX2	= 0x01b3,
+	B3_MA_TOVAL_RX1	= 0x01b4,
+	B3_MA_TOVAL_RX2	= 0x01b5,
+	B3_MA_TOVAL_TX1	= 0x01b6,
+	B3_MA_TOVAL_TX2	= 0x01b7,
+	B3_MA_TO_CTRL	= 0x01b8,
+	B3_MA_TO_TEST	= 0x01ba,
+	B3_MA_RCINI_RX1	= 0x01c0,
+	B3_MA_RCINI_RX2	= 0x01c1,
+	B3_MA_RCINI_TX1	= 0x01c2,
+	B3_MA_RCINI_TX2	= 0x01c3,
+	B3_MA_RCVAL_RX1	= 0x01c4,
+	B3_MA_RCVAL_RX2	= 0x01c5,
+	B3_MA_RCVAL_TX1	= 0x01c6,
+	B3_MA_RCVAL_TX2	= 0x01c7,
+	B3_MA_RC_CTRL	= 0x01c8,
+	B3_MA_RC_TEST	= 0x01ca,
+	B3_PA_TOINI_RX1	= 0x01d0,
+	B3_PA_TOINI_RX2	= 0x01d4,
+	B3_PA_TOINI_TX1	= 0x01d8,
+	B3_PA_TOINI_TX2	= 0x01dc,
+	B3_PA_TOVAL_RX1	= 0x01e0,
+	B3_PA_TOVAL_RX2	= 0x01e4,
+	B3_PA_TOVAL_TX1	= 0x01e8,
+	B3_PA_TOVAL_TX2	= 0x01ec,
+	B3_PA_CTRL	= 0x01f0,
+	B3_PA_TEST	= 0x01f2,
+};
+
+/*	B0_CTST			16 bit	Control/Status register */
+enum {
+	CS_CLK_RUN_HOT	= 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */
+	CS_CLK_RUN_RST	= 1<<12,/* CLK_RUN reset  (YUKON-Lite only) */
+	CS_CLK_RUN_ENA	= 1<<11,/* CLK_RUN enable (YUKON-Lite only) */
+	CS_VAUX_AVAIL	= 1<<10,/* VAUX available (YUKON only) */
+	CS_BUS_CLOCK	= 1<<9,	/* Bus Clock 0/1 = 33/66 MHz */
+	CS_BUS_SLOT_SZ	= 1<<8,	/* Slot Size 0/1 = 32/64 bit slot */
+	CS_ST_SW_IRQ	= 1<<7,	/* Set IRQ SW Request */
+	CS_CL_SW_IRQ	= 1<<6,	/* Clear IRQ SW Request */
+	CS_STOP_DONE	= 1<<5,	/* Stop Master is finished */
+	CS_STOP_MAST	= 1<<4,	/* Command Bit to stop the master */
+	CS_MRST_CLR	= 1<<3,	/* Clear Master reset	*/
+	CS_MRST_SET	= 1<<2,	/* Set Master reset	*/
+	CS_RST_CLR	= 1<<1,	/* Clear Software reset	*/
+	CS_RST_SET	= 1,	/* Set   Software reset	*/
+
+/*	B0_LED			 8 Bit	LED register */
+/* Bit  7.. 2:	reserved */
+	LED_STAT_ON	= 1<<1,	/* Status LED on	*/
+	LED_STAT_OFF	= 1,		/* Status LED off	*/
+
+/*	B0_POWER_CTRL	 8 Bit	Power Control reg (YUKON only) */
+	PC_VAUX_ENA	= 1<<7,	/* Switch VAUX Enable  */
+	PC_VAUX_DIS	= 1<<6,	/* Switch VAUX Disable */
+	PC_VCC_ENA	= 1<<5,	/* Switch VCC Enable  */
+	PC_VCC_DIS	= 1<<4,	/* Switch VCC Disable */
+	PC_VAUX_ON	= 1<<3,	/* Switch VAUX On  */
+	PC_VAUX_OFF	= 1<<2,	/* Switch VAUX Off */
+	PC_VCC_ON	= 1<<1,	/* Switch VCC On  */
+	PC_VCC_OFF	= 1<<0,	/* Switch VCC Off */
+};
+
+/*	B2_IRQM_MSK 	32 bit	IRQ Moderation Mask */
+enum {
+	IS_ALL_MSK	= 0xbffffffful,	/* All Interrupt bits */
+	IS_HW_ERR	= 1<<31,	/* Interrupt HW Error */
+					/* Bit 30:	reserved */
+	IS_PA_TO_RX1	= 1<<29,	/* Packet Arb Timeout Rx1 */
+	IS_PA_TO_RX2	= 1<<28,	/* Packet Arb Timeout Rx2 */
+	IS_PA_TO_TX1	= 1<<27,	/* Packet Arb Timeout Tx1 */
+	IS_PA_TO_TX2	= 1<<26,	/* Packet Arb Timeout Tx2 */
+	IS_I2C_READY	= 1<<25,	/* IRQ on end of I2C Tx */
+	IS_IRQ_SW	= 1<<24,	/* SW forced IRQ	*/
+	IS_EXT_REG	= 1<<23,	/* IRQ from LM80 or PHY (GENESIS only) */
+					/* IRQ from PHY (YUKON only) */
+	IS_TIMINT	= 1<<22,	/* IRQ from Timer	*/
+	IS_MAC1		= 1<<21,	/* IRQ from MAC 1	*/
+	IS_LNK_SYNC_M1	= 1<<20,	/* Link Sync Cnt wrap MAC 1 */
+	IS_MAC2		= 1<<19,	/* IRQ from MAC 2	*/
+	IS_LNK_SYNC_M2	= 1<<18,	/* Link Sync Cnt wrap MAC 2 */
+/* Receive Queue 1 */
+	IS_R1_B		= 1<<17,	/* Q_R1 End of Buffer */
+	IS_R1_F		= 1<<16,	/* Q_R1 End of Frame */
+	IS_R1_C		= 1<<15,	/* Q_R1 Encoding Error */
+/* Receive Queue 2 */
+	IS_R2_B		= 1<<14,	/* Q_R2 End of Buffer */
+	IS_R2_F		= 1<<13,	/* Q_R2 End of Frame */
+	IS_R2_C		= 1<<12,	/* Q_R2 Encoding Error */
+/* Synchronous Transmit Queue 1 */
+	IS_XS1_B	= 1<<11,	/* Q_XS1 End of Buffer */
+	IS_XS1_F	= 1<<10,	/* Q_XS1 End of Frame */
+	IS_XS1_C	= 1<<9,		/* Q_XS1 Encoding Error */
+/* Asynchronous Transmit Queue 1 */
+	IS_XA1_B	= 1<<8,		/* Q_XA1 End of Buffer */
+	IS_XA1_F	= 1<<7,		/* Q_XA1 End of Frame */
+	IS_XA1_C	= 1<<6,		/* Q_XA1 Encoding Error */
+/* Synchronous Transmit Queue 2 */
+	IS_XS2_B	= 1<<5,		/* Q_XS2 End of Buffer */
+	IS_XS2_F	= 1<<4,		/* Q_XS2 End of Frame */
+	IS_XS2_C	= 1<<3,		/* Q_XS2 Encoding Error */
+/* Asynchronous Transmit Queue 2 */
+	IS_XA2_B	= 1<<2,		/* Q_XA2 End of Buffer */
+	IS_XA2_F	= 1<<1,		/* Q_XA2 End of Frame */
+	IS_XA2_C	= 1<<0,		/* Q_XA2 Encoding Error */
+
+	IS_TO_PORT1	= IS_PA_TO_RX1 | IS_PA_TO_TX1,
+	IS_TO_PORT2	= IS_PA_TO_RX2 | IS_PA_TO_TX2,
+
+	IS_PORT_1	= IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
+	IS_PORT_2	= IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
+};
+
+
+/*	B2_IRQM_HWE_MSK	32 bit	IRQ Moderation HW Error Mask */
+enum {
+	IS_IRQ_TIST_OV	= 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+	IS_IRQ_SENSOR	= 1<<12, /* IRQ from Sensor (YUKON only) */
+	IS_IRQ_MST_ERR	= 1<<11, /* IRQ master error detected */
+	IS_IRQ_STAT	= 1<<10, /* IRQ status exception */
+	IS_NO_STAT_M1	= 1<<9,	/* No Rx Status from MAC 1 */
+	IS_NO_STAT_M2	= 1<<8,	/* No Rx Status from MAC 2 */
+	IS_NO_TIST_M1	= 1<<7,	/* No Time Stamp from MAC 1 */
+	IS_NO_TIST_M2	= 1<<6,	/* No Time Stamp from MAC 2 */
+	IS_RAM_RD_PAR	= 1<<5,	/* RAM Read  Parity Error */
+	IS_RAM_WR_PAR	= 1<<4,	/* RAM Write Parity Error */
+	IS_M1_PAR_ERR	= 1<<3,	/* MAC 1 Parity Error */
+	IS_M2_PAR_ERR	= 1<<2,	/* MAC 2 Parity Error */
+	IS_R1_PAR_ERR	= 1<<1,	/* Queue R1 Parity Error */
+	IS_R2_PAR_ERR	= 1<<0,	/* Queue R2 Parity Error */
+
+	IS_ERR_MSK	= IS_IRQ_MST_ERR | IS_IRQ_STAT
+			| IS_RAM_RD_PAR | IS_RAM_WR_PAR
+			| IS_M1_PAR_ERR | IS_M2_PAR_ERR
+			| IS_R1_PAR_ERR | IS_R2_PAR_ERR,
+};
+
+/*	B2_TST_CTRL1	 8 bit	Test Control Register 1 */
+enum {
+	TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+	TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+	TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+	TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+	TST_FRC_APERR_M	 = 1<<3, /* force ADDRPERR on MST */
+	TST_FRC_APERR_T	 = 1<<2, /* force ADDRPERR on TRG */
+	TST_CFG_WRITE_ON = 1<<1, /* Enable  Config Reg WR */
+	TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/*	B2_MAC_CFG		 8 bit	MAC Configuration / Chip Revision */
+enum {
+	CFG_CHIP_R_MSK	  = 0xf<<4,	/* Bit 7.. 4: Chip Revision */
+					/* Bit 3.. 2:	reserved */
+	CFG_DIS_M2_CLK	  = 1<<1,	/* Disable Clock for 2nd MAC */
+	CFG_SNG_MAC	  = 1<<0,	/* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/*	B2_CHIP_ID		 8 bit 	Chip Identification Number */
+enum {
+	CHIP_ID_GENESIS	   = 0x0a, /* Chip ID for GENESIS */
+	CHIP_ID_YUKON	   = 0xb0, /* Chip ID for YUKON */
+	CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
+	CHIP_ID_YUKON_LP   = 0xb2, /* Chip ID for YUKON-LP */
+	CHIP_ID_YUKON_XL   = 0xb3, /* Chip ID for YUKON-2 XL */
+	CHIP_ID_YUKON_EC   = 0xb6, /* Chip ID for YUKON-2 EC */
+ 	CHIP_ID_YUKON_FE   = 0xb7, /* Chip ID for YUKON-2 FE */
+
+	CHIP_REV_YU_LITE_A1  = 3,	/* Chip Rev. for YUKON-Lite A1,A2 */
+	CHIP_REV_YU_LITE_A3  = 7,	/* Chip Rev. for YUKON-Lite A3 */
+};
+
+/*	B2_TI_CTRL		 8 bit	Timer control */
+/*	B2_IRQM_CTRL	 8 bit	IRQ Moderation Timer Control */
+enum {
+	TIM_START	= 1<<2,	/* Start Timer */
+	TIM_STOP	= 1<<1,	/* Stop  Timer */
+	TIM_CLR_IRQ	= 1<<0,	/* Clear Timer IRQ (!IRQM) */
+};
+
+/*	B2_TI_TEST		 8 Bit	Timer Test */
+/*	B2_IRQM_TEST	 8 bit	IRQ Moderation Timer Test */
+/*	B28_DPT_TST		 8 bit	Descriptor Poll Timer Test Reg */
+enum {
+	TIM_T_ON	= 1<<2,	/* Test mode on */
+	TIM_T_OFF	= 1<<1,	/* Test mode off */
+	TIM_T_STEP	= 1<<0,	/* Test step */
+};
+
+/*	B2_GP_IO		32 bit	General Purpose I/O Register */
+enum {
+	GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
+	GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */
+	GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */
+	GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */
+	GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */
+	GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */
+	GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */
+	GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */
+	GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */
+	GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */
+
+	GP_IO_9	= 1<<9,	/* IO_9 pin */
+	GP_IO_8	= 1<<8,	/* IO_8 pin */
+	GP_IO_7	= 1<<7,	/* IO_7 pin */
+	GP_IO_6	= 1<<6,	/* IO_6 pin */
+	GP_IO_5	= 1<<5,	/* IO_5 pin */
+	GP_IO_4	= 1<<4,	/* IO_4 pin */
+	GP_IO_3	= 1<<3,	/* IO_3 pin */
+	GP_IO_2	= 1<<2,	/* IO_2 pin */
+	GP_IO_1	= 1<<1,	/* IO_1 pin */
+	GP_IO_0	= 1<<0,	/* IO_0 pin */
+};
+
+/* Descriptor Bit Definition */
+/*	TxCtrl		Transmit Buffer Control Field */
+/*	RxCtrl		Receive  Buffer Control Field */
+enum {
+	BMU_OWN		= 1<<31, /* OWN bit: 0=host/1=BMU */
+	BMU_STF		= 1<<30, /* Start of Frame */
+	BMU_EOF		= 1<<29, /* End of Frame */
+	BMU_IRQ_EOB	= 1<<28, /* Req "End of Buffer" IRQ */
+	BMU_IRQ_EOF	= 1<<27, /* Req "End of Frame" IRQ */
+				/* TxCtrl specific bits */
+	BMU_STFWD	= 1<<26, /* (Tx)	Store & Forward Frame */
+	BMU_NO_FCS	= 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */
+	BMU_SW	= 1<<24, /* (Tx)	1 bit res. for SW use */
+				/* RxCtrl specific bits */
+	BMU_DEV_0	= 1<<26, /* (Rx)	Transfer data to Dev0 */
+	BMU_STAT_VAL	= 1<<25, /* (Rx)	Rx Status Valid */
+	BMU_TIST_VAL	= 1<<24, /* (Rx)	Rx TimeStamp Valid */
+			/* Bit 23..16:	BMU Check Opcodes */
+	BMU_CHECK	= 0x55<<16, /* Default BMU check */
+	BMU_TCP_CHECK	= 0x56<<16, /* Descr with TCP ext */
+	BMU_UDP_CHECK	= 0x57<<16, /* Descr with UDP ext (YUKON only) */
+	BMU_BBC		= 0xffffL, /* Bit 15.. 0:	Buffer Byte Counter */
+};
+
+/*	B2_BSC_CTRL		 8 bit	Blink Source Counter Control */
+enum {
+	 BSC_START	= 1<<1,	/* Start Blink Source Counter */
+	 BSC_STOP	= 1<<0,	/* Stop  Blink Source Counter */
+};
+
+/*	B2_BSC_STAT		 8 bit	Blink Source Counter Status */
+enum {
+	BSC_SRC		= 1<<0,	/* Blink Source, 0=Off / 1=On */
+};
+
+/*	B2_BSC_TST		16 bit	Blink Source Counter Test Reg */
+enum {
+	BSC_T_ON	= 1<<2,	/* Test mode on */
+	BSC_T_OFF	= 1<<1,	/* Test mode off */
+	BSC_T_STEP	= 1<<0,	/* Test step */
+};
+
+/*	B3_RAM_ADDR		32 bit	RAM Address, to read or write */
+					/* Bit 31..19:	reserved */
+#define RAM_ADR_RAN	0x0007ffffL	/* Bit 18.. 0:	RAM Address Range */
+/* RAM Interface Registers */
+
+/*	B3_RI_CTRL		16 bit	RAM Iface Control Register */
+enum {
+	RI_CLR_RD_PERR	= 1<<9,	/* Clear IRQ RAM Read Parity Err */
+	RI_CLR_WR_PERR	= 1<<8,	/* Clear IRQ RAM Write Parity Err*/
+
+	RI_RST_CLR	= 1<<1,	/* Clear RAM Interface Reset */
+	RI_RST_SET	= 1<<0,	/* Set   RAM Interface Reset */
+};
+
+/* MAC Arbiter Registers */
+/*	B3_MA_TO_CTRL	16 bit	MAC Arbiter Timeout Ctrl Reg */
+enum {
+	MA_FOE_ON	= 1<<3,	/* XMAC Fast Output Enable ON */
+	MA_FOE_OFF	= 1<<2,	/* XMAC Fast Output Enable OFF */
+	MA_RST_CLR	= 1<<1,	/* Clear MAC Arbiter Reset */
+	MA_RST_SET	= 1<<0,	/* Set   MAC Arbiter Reset */
+
+};
+
+/* Timeout values */
+#define SK_MAC_TO_53	72		/* MAC arbiter timeout */
+#define SK_PKT_TO_53	0x2000		/* Packet arbiter timeout */
+#define SK_PKT_TO_MAX	0xffff		/* Maximum value */
+#define SK_RI_TO_53	36		/* RAM interface timeout */
+
+/* Packet Arbiter Registers */
+/*	B3_PA_CTRL		16 bit	Packet Arbiter Ctrl Register */
+enum {
+	PA_CLR_TO_TX2	= 1<<13,/* Clear IRQ Packet Timeout TX2 */
+	PA_CLR_TO_TX1	= 1<<12,/* Clear IRQ Packet Timeout TX1 */
+	PA_CLR_TO_RX2	= 1<<11,/* Clear IRQ Packet Timeout RX2 */
+	PA_CLR_TO_RX1	= 1<<10,/* Clear IRQ Packet Timeout RX1 */
+	PA_ENA_TO_TX2	= 1<<9,	/* Enable  Timeout Timer TX2 */
+	PA_DIS_TO_TX2	= 1<<8,	/* Disable Timeout Timer TX2 */
+	PA_ENA_TO_TX1	= 1<<7,	/* Enable  Timeout Timer TX1 */
+	PA_DIS_TO_TX1	= 1<<6,	/* Disable Timeout Timer TX1 */
+	PA_ENA_TO_RX2	= 1<<5,	/* Enable  Timeout Timer RX2 */
+	PA_DIS_TO_RX2	= 1<<4,	/* Disable Timeout Timer RX2 */
+	PA_ENA_TO_RX1	= 1<<3,	/* Enable  Timeout Timer RX1 */
+	PA_DIS_TO_RX1	= 1<<2,	/* Disable Timeout Timer RX1 */
+	PA_RST_CLR	= 1<<1,	/* Clear MAC Arbiter Reset */
+	PA_RST_SET	= 1<<0,	/* Set   MAC Arbiter Reset */
+};
+
+#define PA_ENA_TO_ALL	(PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
+						PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
+
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/*	TXA_ITI_INI		32 bit	Tx Arb Interval Timer Init Val */
+/*	TXA_ITI_VAL		32 bit	Tx Arb Interval Timer Value */
+/*	TXA_LIM_INI		32 bit	Tx Arb Limit Counter Init Val */
+/*	TXA_LIM_VAL		32 bit	Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL	0x00ffffffUL	/* Bit 23.. 0:	Max TXA Timer/Cnt Val */
+
+/*	TXA_CTRL		 8 bit	Tx Arbiter Control Register */
+enum {
+	TXA_ENA_FSYNC	= 1<<7,	/* Enable  force of sync Tx queue */
+	TXA_DIS_FSYNC	= 1<<6,	/* Disable force of sync Tx queue */
+	TXA_ENA_ALLOC	= 1<<5,	/* Enable  alloc of free bandwidth */
+	TXA_DIS_ALLOC	= 1<<4,	/* Disable alloc of free bandwidth */
+	TXA_START_RC	= 1<<3,	/* Start sync Rate Control */
+	TXA_STOP_RC	= 1<<2,	/* Stop  sync Rate Control */
+	TXA_ENA_ARB	= 1<<1,	/* Enable  Tx Arbiter */
+	TXA_DIS_ARB	= 1<<0,	/* Disable Tx Arbiter */
+};
+
+/*
+ *	Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+	TXA_ITI_INI	= 0x0200,/* 32 bit	Tx Arb Interval Timer Init Val*/
+	TXA_ITI_VAL	= 0x0204,/* 32 bit	Tx Arb Interval Timer Value */
+	TXA_LIM_INI	= 0x0208,/* 32 bit	Tx Arb Limit Counter Init Val */
+	TXA_LIM_VAL	= 0x020c,/* 32 bit	Tx Arb Limit Counter Value */
+	TXA_CTRL	= 0x0210,/*  8 bit	Tx Arbiter Control Register */
+	TXA_TEST	= 0x0211,/*  8 bit	Tx Arbiter Test Register */
+	TXA_STAT	= 0x0212,/*  8 bit	Tx Arbiter Status Register */
+};
+
+
+enum {
+	B6_EXT_REG	= 0x0300,/* External registers (GENESIS only) */
+	B7_CFG_SPC	= 0x0380,/* copy of the Configuration register */
+	B8_RQ1_REGS	= 0x0400,/* Receive Queue 1 */
+	B8_RQ2_REGS	= 0x0480,/* Receive Queue 2 */
+	B8_TS1_REGS	= 0x0600,/* Transmit sync queue 1 */
+	B8_TA1_REGS	= 0x0680,/* Transmit async queue 1 */
+	B8_TS2_REGS	= 0x0700,/* Transmit sync queue 2 */
+	B8_TA2_REGS	= 0x0780,/* Transmit sync queue 2 */
+	B16_RAM_REGS	= 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+	B8_Q_REGS = 0x0400, /* base of Queue registers */
+	Q_D	= 0x00,	/* 8*32	bit	Current Descriptor */
+	Q_DA_L	= 0x20,	/* 32 bit	Current Descriptor Address Low dWord */
+	Q_DA_H	= 0x24,	/* 32 bit	Current Descriptor Address High dWord */
+	Q_AC_L	= 0x28,	/* 32 bit	Current Address Counter Low dWord */
+	Q_AC_H	= 0x2c,	/* 32 bit	Current Address Counter High dWord */
+	Q_BC	= 0x30,	/* 32 bit	Current Byte Counter */
+	Q_CSR	= 0x34,	/* 32 bit	BMU Control/Status Register */
+	Q_F	= 0x38,	/* 32 bit	Flag Register */
+	Q_T1	= 0x3c,	/* 32 bit	Test Register 1 */
+	Q_T1_TR	= 0x3c,	/*  8 bit	Test Register 1 Transfer SM */
+	Q_T1_WR	= 0x3d,	/*  8 bit	Test Register 1 Write Descriptor SM */
+	Q_T1_RD	= 0x3e,	/*  8 bit	Test Register 1 Read Descriptor SM */
+	Q_T1_SV	= 0x3f,	/*  8 bit	Test Register 1 Supervisor SM */
+	Q_T2	= 0x40,	/* 32 bit	Test Register 2	*/
+	Q_T3	= 0x44,	/* 32 bit	Test Register 3	*/
+
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+	RB_START= 0x00,/* 32 bit	RAM Buffer Start Address */
+	RB_END	= 0x04,/* 32 bit	RAM Buffer End Address */
+	RB_WP	= 0x08,/* 32 bit	RAM Buffer Write Pointer */
+	RB_RP	= 0x0c,/* 32 bit	RAM Buffer Read Pointer */
+	RB_RX_UTPP= 0x10,/* 32 bit	Rx Upper Threshold, Pause Packet */
+	RB_RX_LTPP= 0x14,/* 32 bit	Rx Lower Threshold, Pause Packet */
+	RB_RX_UTHP= 0x18,/* 32 bit	Rx Upper Threshold, High Prio */
+	RB_RX_LTHP= 0x1c,/* 32 bit	Rx Lower Threshold, High Prio */
+	/* 0x10 - 0x1f:	reserved at Tx RAM Buffer Registers */
+	RB_PC	= 0x20,/* 32 bit	RAM Buffer Packet Counter */
+	RB_LEV	= 0x24,/* 32 bit	RAM Buffer Level Register */
+	RB_CTRL	= 0x28,/* 32 bit	RAM Buffer Control Register */
+	RB_TST1	= 0x29,/*  8 bit	RAM Buffer Test Register 1 */
+	RB_TST2	= 0x2a,/*  8 bit	RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+	Q_R1	= 0x0000,	/* Receive Queue 1 */
+	Q_R2	= 0x0080,	/* Receive Queue 2 */
+	Q_XS1	= 0x0200,	/* Synchronous Transmit Queue 1 */
+	Q_XA1	= 0x0280,	/* Asynchronous Transmit Queue 1 */
+	Q_XS2	= 0x0300,	/* Synchronous Transmit Queue 2 */
+	Q_XA2	= 0x0380,	/* Asynchronous Transmit Queue 2 */
+};
+
+/* Different MAC Types */
+enum {
+	SK_MAC_XMAC =	0,	/* Xaqti XMAC II */
+	SK_MAC_GMAC =	1,	/* Marvell GMAC */
+};
+
+/* Different PHY Types */
+enum {
+	SK_PHY_XMAC	= 0,/* integrated in XMAC II */
+	SK_PHY_BCOM	= 1,/* Broadcom BCM5400 */
+	SK_PHY_LONE	= 2,/* Level One LXT1000  [not supported]*/
+	SK_PHY_NAT	= 3,/* National DP83891  [not supported] */
+	SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */
+	SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */
+};
+
+/* PHY addresses (bits 12..8 of PHY address reg) */
+enum {
+	PHY_ADDR_XMAC	= 0<<8,
+	PHY_ADDR_BCOM	= 1<<8,
+
+/* GPHY address (bits 15..11 of SMI control reg) */
+	PHY_ADDR_MARV	= 0,
+};
+
+#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
+
+/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
+enum {
+	RX_MFF_EA	= 0x0c00,/* 32 bit	Receive MAC FIFO End Address */
+	RX_MFF_WP	= 0x0c04,/* 32 bit	Receive MAC FIFO Write Pointer */
+
+	RX_MFF_RP	= 0x0c0c,/* 32 bit	Receive MAC FIFO Read Pointer */
+	RX_MFF_PC	= 0x0c10,/* 32 bit	Receive MAC FIFO Packet Cnt */
+	RX_MFF_LEV	= 0x0c14,/* 32 bit	Receive MAC FIFO Level */
+	RX_MFF_CTRL1	= 0x0c18,/* 16 bit	Receive MAC FIFO Control Reg 1*/
+	RX_MFF_STAT_TO	= 0x0c1a,/*  8 bit	Receive MAC Status Timeout */
+	RX_MFF_TIST_TO	= 0x0c1b,/*  8 bit	Receive MAC Time Stamp Timeout */
+	RX_MFF_CTRL2	= 0x0c1c,/*  8 bit	Receive MAC FIFO Control Reg 2*/
+	RX_MFF_TST1	= 0x0c1d,/*  8 bit	Receive MAC FIFO Test Reg 1 */
+	RX_MFF_TST2	= 0x0c1e,/*  8 bit	Receive MAC FIFO Test Reg 2 */
+
+	RX_LED_INI	= 0x0c20,/* 32 bit	Receive LED Cnt Init Value */
+	RX_LED_VAL	= 0x0c24,/* 32 bit	Receive LED Cnt Current Value */
+	RX_LED_CTRL	= 0x0c28,/*  8 bit	Receive LED Cnt Control Reg */
+	RX_LED_TST	= 0x0c29,/*  8 bit	Receive LED Cnt Test Register */
+
+	LNK_SYNC_INI	= 0x0c30,/* 32 bit	Link Sync Cnt Init Value */
+	LNK_SYNC_VAL	= 0x0c34,/* 32 bit	Link Sync Cnt Current Value */
+	LNK_SYNC_CTRL	= 0x0c38,/*  8 bit	Link Sync Cnt Control Register */
+	LNK_SYNC_TST	= 0x0c39,/*  8 bit	Link Sync Cnt Test Register */
+	LNK_LED_REG	= 0x0c3c,/*  8 bit	Link LED Register */
+};
+
+/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
+/*	RX_MFF_CTRL1	16 bit	Receive MAC FIFO Control Reg 1 */
+enum {
+	MFF_ENA_RDY_PAT	= 1<<13,	/* Enable  Ready Patch */
+	MFF_DIS_RDY_PAT	= 1<<12,	/* Disable Ready Patch */
+	MFF_ENA_TIM_PAT	= 1<<11,	/* Enable  Timing Patch */
+	MFF_DIS_TIM_PAT	= 1<<10,	/* Disable Timing Patch */
+	MFF_ENA_ALM_FUL	= 1<<9,	/* Enable  AlmostFull Sign */
+	MFF_DIS_ALM_FUL	= 1<<8,	/* Disable AlmostFull Sign */
+	MFF_ENA_PAUSE	= 1<<7,	/* Enable  Pause Signaling */
+	MFF_DIS_PAUSE	= 1<<6,	/* Disable Pause Signaling */
+	MFF_ENA_FLUSH	= 1<<5,	/* Enable  Frame Flushing */
+	MFF_DIS_FLUSH	= 1<<4,	/* Disable Frame Flushing */
+	MFF_ENA_TIST	= 1<<3,	/* Enable  Time Stamp Gener */
+	MFF_DIS_TIST	= 1<<2,	/* Disable Time Stamp Gener */
+	MFF_CLR_INTIST	= 1<<1,	/* Clear IRQ No Time Stamp */
+	MFF_CLR_INSTAT	= 1<<0,	/* Clear IRQ No Status */
+	MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT,
+};
+
+/*	TX_MFF_CTRL1	16 bit	Transmit MAC FIFO Control Reg 1 */
+enum {
+	MFF_CLR_PERR	= 1<<15, /* Clear Parity Error IRQ */
+
+	MFF_ENA_PKT_REC	= 1<<13, /* Enable  Packet Recovery */
+	MFF_DIS_PKT_REC	= 1<<12, /* Disable Packet Recovery */
+
+	MFF_ENA_W4E	= 1<<7,	/* Enable  Wait for Empty */
+	MFF_DIS_W4E	= 1<<6,	/* Disable Wait for Empty */
+
+	MFF_ENA_LOOPB	= 1<<3,	/* Enable  Loopback */
+	MFF_DIS_LOOPB	= 1<<2,	/* Disable Loopback */
+	MFF_CLR_MAC_RST	= 1<<1,	/* Clear XMAC Reset */
+	MFF_SET_MAC_RST	= 1<<0,	/* Set   XMAC Reset */
+
+	MFF_TX_CTRL_DEF	 = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH,
+};
+
+
+/*	RX_MFF_TST2	 	 8 bit	Receive MAC FIFO Test Register 2 */
+/*	TX_MFF_TST2	 	 8 bit	Transmit MAC FIFO Test Register 2 */
+enum {
+	MFF_WSP_T_ON	= 1<<6,	/* Tx: Write Shadow Ptr TestOn */
+	MFF_WSP_T_OFF	= 1<<5,	/* Tx: Write Shadow Ptr TstOff */
+	MFF_WSP_INC	= 1<<4,	/* Tx: Write Shadow Ptr Increment */
+	MFF_PC_DEC	= 1<<3,	/* Packet Counter Decrement */
+	MFF_PC_T_ON	= 1<<2,	/* Packet Counter Test On */
+	MFF_PC_T_OFF	= 1<<1,	/* Packet Counter Test Off */
+	MFF_PC_INC	= 1<<0,	/* Packet Counter Increment */
+};
+
+/*	RX_MFF_TST1	 	 8 bit	Receive MAC FIFO Test Register 1 */
+/*	TX_MFF_TST1	 	 8 bit	Transmit MAC FIFO Test Register 1 */
+enum {
+	MFF_WP_T_ON	= 1<<6,	/* Write Pointer Test On */
+	MFF_WP_T_OFF	= 1<<5,	/* Write Pointer Test Off */
+	MFF_WP_INC	= 1<<4,	/* Write Pointer Increm */
+
+	MFF_RP_T_ON	= 1<<2,	/* Read Pointer Test On */
+	MFF_RP_T_OFF	= 1<<1,	/* Read Pointer Test Off */
+	MFF_RP_DEC	= 1<<0,	/* Read Pointer Decrement */
+};
+
+/*	RX_MFF_CTRL2	 8 bit	Receive MAC FIFO Control Reg 2 */
+/*	TX_MFF_CTRL2	 8 bit	Transmit MAC FIFO Control Reg 2 */
+enum {
+	MFF_ENA_OP_MD	= 1<<3,	/* Enable  Operation Mode */
+	MFF_DIS_OP_MD	= 1<<2,	/* Disable Operation Mode */
+	MFF_RST_CLR	= 1<<1,	/* Clear MAC FIFO Reset */
+	MFF_RST_SET	= 1<<0,	/* Set   MAC FIFO Reset */
+};
+
+
+/*	Link LED Counter Registers (GENESIS only) */
+
+/*	RX_LED_CTRL		 8 bit	Receive LED Cnt Control Reg */
+/*	TX_LED_CTRL		 8 bit	Transmit LED Cnt Control Reg */
+/*	LNK_SYNC_CTRL	 8 bit	Link Sync Cnt Control Register */
+enum {
+	LED_START	= 1<<2,	/* Start Timer */
+	LED_STOP	= 1<<1,	/* Stop Timer */
+	LED_STATE	= 1<<0,	/* Rx/Tx: LED State, 1=LED on */
+};
+
+/*	RX_LED_TST		 8 bit	Receive LED Cnt Test Register */
+/*	TX_LED_TST		 8 bit	Transmit LED Cnt Test Register */
+/*	LNK_SYNC_TST	 8 bit	Link Sync Cnt Test Register */
+enum {
+	LED_T_ON	= 1<<2,	/* LED Counter Test mode On */
+	LED_T_OFF	= 1<<1,	/* LED Counter Test mode Off */
+	LED_T_STEP	= 1<<0,	/* LED Counter Step */
+};
+
+/*	LNK_LED_REG	 	 8 bit	Link LED Register */
+enum {
+	LED_BLK_ON	= 1<<5,	/* Link LED Blinking On */
+	LED_BLK_OFF	= 1<<4,	/* Link LED Blinking Off */
+	LED_SYNC_ON	= 1<<3,	/* Use Sync Wire to switch LED */
+	LED_SYNC_OFF	= 1<<2,	/* Disable Sync Wire Input */
+	LED_ON	= 1<<1,	/* switch LED on */
+	LED_OFF	= 1<<0,	/* switch LED off */
+};
+
+/* Receive GMAC FIFO (YUKON) */
+enum {
+	RX_GMF_EA	= 0x0c40,/* 32 bit	Rx GMAC FIFO End Address */
+	RX_GMF_AF_THR	= 0x0c44,/* 32 bit	Rx GMAC FIFO Almost Full Thresh. */
+	RX_GMF_CTRL_T	= 0x0c48,/* 32 bit	Rx GMAC FIFO Control/Test */
+	RX_GMF_FL_MSK	= 0x0c4c,/* 32 bit	Rx GMAC FIFO Flush Mask */
+	RX_GMF_FL_THR	= 0x0c50,/* 32 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_WP	= 0x0c60,/* 32 bit	Rx GMAC FIFO Write Pointer */
+	RX_GMF_WLEV	= 0x0c68,/* 32 bit	Rx GMAC FIFO Write Level */
+	RX_GMF_RP	= 0x0c70,/* 32 bit	Rx GMAC FIFO Read Pointer */
+	RX_GMF_RLEV	= 0x0c78,/* 32 bit	Rx GMAC FIFO Read Level */
+};
+
+
+/*	TXA_TEST		 8 bit	Tx Arbiter Test Register */
+enum {
+	TXA_INT_T_ON	= 1<<5,	/* Tx Arb Interval Timer Test On */
+	TXA_INT_T_OFF	= 1<<4,	/* Tx Arb Interval Timer Test Off */
+	TXA_INT_T_STEP	= 1<<3,	/* Tx Arb Interval Timer Step */
+	TXA_LIM_T_ON	= 1<<2,	/* Tx Arb Limit Timer Test On */
+	TXA_LIM_T_OFF	= 1<<1,	/* Tx Arb Limit Timer Test Off */
+	TXA_LIM_T_STEP	= 1<<0,	/* Tx Arb Limit Timer Step */
+};
+
+/*	TXA_STAT		 8 bit	Tx Arbiter Status Register */
+enum {
+	TXA_PRIO_XS	= 1<<0,	/* sync queue has prio to send */
+};
+
+
+/*	Q_BC			32 bit	Current Byte Counter */
+
+/* BMU Control Status Registers */
+/*	B0_R1_CSR		32 bit	BMU Ctrl/Stat Rx Queue 1 */
+/*	B0_R2_CSR		32 bit	BMU Ctrl/Stat Rx Queue 2 */
+/*	B0_XA1_CSR		32 bit	BMU Ctrl/Stat Sync Tx Queue 1 */
+/*	B0_XS1_CSR		32 bit	BMU Ctrl/Stat Async Tx Queue 1 */
+/*	B0_XA2_CSR		32 bit	BMU Ctrl/Stat Sync Tx Queue 2 */
+/*	B0_XS2_CSR		32 bit	BMU Ctrl/Stat Async Tx Queue 2 */
+/*	Q_CSR			32 bit	BMU Control/Status Register */
+
+enum {
+	CSR_SV_IDLE	= 1<<24,	/* BMU SM Idle */
+
+	CSR_DESC_CLR	= 1<<21,	/* Clear Reset for Descr */
+	CSR_DESC_SET	= 1<<20,	/* Set   Reset for Descr */
+	CSR_FIFO_CLR	= 1<<19,	/* Clear Reset for FIFO */
+	CSR_FIFO_SET	= 1<<18,	/* Set   Reset for FIFO */
+	CSR_HPI_RUN	= 1<<17,	/* Release HPI SM */
+	CSR_HPI_RST	= 1<<16,	/* Reset   HPI SM to Idle */
+	CSR_SV_RUN	= 1<<15,	/* Release Supervisor SM */
+	CSR_SV_RST	= 1<<14,	/* Reset   Supervisor SM */
+	CSR_DREAD_RUN	= 1<<13,	/* Release Descr Read SM */
+	CSR_DREAD_RST	= 1<<12,	/* Reset   Descr Read SM */
+	CSR_DWRITE_RUN	= 1<<11,	/* Release Descr Write SM */
+	CSR_DWRITE_RST	= 1<<10,	/* Reset   Descr Write SM */
+	CSR_TRANS_RUN	= 1<<9,		/* Release Transfer SM */
+	CSR_TRANS_RST	= 1<<8,		/* Reset   Transfer SM */
+	CSR_ENA_POL	= 1<<7,		/* Enable  Descr Polling */
+	CSR_DIS_POL	= 1<<6,		/* Disable Descr Polling */
+	CSR_STOP	= 1<<5,		/* Stop  Rx/Tx Queue */
+	CSR_START	= 1<<4,		/* Start Rx/Tx Queue */
+	CSR_IRQ_CL_P	= 1<<3,		/* (Rx)	Clear Parity IRQ */
+	CSR_IRQ_CL_B	= 1<<2,		/* Clear EOB IRQ */
+	CSR_IRQ_CL_F	= 1<<1,		/* Clear EOF IRQ */
+	CSR_IRQ_CL_C	= 1<<0,		/* Clear ERR IRQ */
+};
+
+#define CSR_SET_RESET	(CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
+			CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
+			CSR_TRANS_RST)
+#define CSR_CLR_RESET	(CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
+			CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
+			CSR_TRANS_RUN)
+
+/*	Q_F				32 bit	Flag Register */
+enum {
+	F_ALM_FULL	= 1<<27,	/* Rx FIFO: almost full */
+	F_EMPTY		= 1<<27,	/* Tx FIFO: empty flag */
+	F_FIFO_EOF	= 1<<26,	/* Tag (EOF Flag) bit in FIFO */
+	F_WM_REACHED	= 1<<25,	/* Watermark reached */
+
+	F_FIFO_LEVEL	= 0x1fL<<16,	/* Bit 23..16:	# of Qwords in FIFO */
+	F_WATER_MARK	= 0x0007ffL,	/* Bit 10.. 0:	Watermark */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/*	RB_START		32 bit	RAM Buffer Start Address */
+/*	RB_END			32 bit	RAM Buffer End Address */
+/*	RB_WP			32 bit	RAM Buffer Write Pointer */
+/*	RB_RP			32 bit	RAM Buffer Read Pointer */
+/*	RB_RX_UTPP		32 bit	Rx Upper Threshold, Pause Pack */
+/*	RB_RX_LTPP		32 bit	Rx Lower Threshold, Pause Pack */
+/*	RB_RX_UTHP		32 bit	Rx Upper Threshold, High Prio */
+/*	RB_RX_LTHP		32 bit	Rx Lower Threshold, High Prio */
+/*	RB_PC			32 bit	RAM Buffer Packet Counter */
+/*	RB_LEV			32 bit	RAM Buffer Level Register */
+
+#define RB_MSK	0x0007ffff	/* Bit 18.. 0:	RAM Buffer Pointer Bits */
+/*	RB_TST2			 8 bit	RAM Buffer Test Register 2 */
+/*	RB_TST1			 8 bit	RAM Buffer Test Register 1 */
+
+/*	RB_CTRL			 8 bit	RAM Buffer Control Register */
+enum {
+	RB_ENA_STFWD	= 1<<5,	/* Enable  Store & Forward */
+	RB_DIS_STFWD	= 1<<4,	/* Disable Store & Forward */
+	RB_ENA_OP_MD	= 1<<3,	/* Enable  Operation Mode */
+	RB_DIS_OP_MD	= 1<<2,	/* Disable Operation Mode */
+	RB_RST_CLR	= 1<<1,	/* Clear RAM Buf STM Reset */
+	RB_RST_SET	= 1<<0,	/* Set   RAM Buf STM Reset */
+};
+
+/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */
+enum {
+	TX_MFF_EA	= 0x0d00,/* 32 bit	Transmit MAC FIFO End Address */
+	TX_MFF_WP	= 0x0d04,/* 32 bit	Transmit MAC FIFO WR Pointer */
+	TX_MFF_WSP	= 0x0d08,/* 32 bit	Transmit MAC FIFO WR Shadow Ptr */
+	TX_MFF_RP	= 0x0d0c,/* 32 bit	Transmit MAC FIFO RD Pointer */
+	TX_MFF_PC	= 0x0d10,/* 32 bit	Transmit MAC FIFO Packet Cnt */
+	TX_MFF_LEV	= 0x0d14,/* 32 bit	Transmit MAC FIFO Level */
+	TX_MFF_CTRL1	= 0x0d18,/* 16 bit	Transmit MAC FIFO Ctrl Reg 1 */
+	TX_MFF_WAF	= 0x0d1a,/*  8 bit	Transmit MAC Wait after flush */
+
+	TX_MFF_CTRL2	= 0x0d1c,/*  8 bit	Transmit MAC FIFO Ctrl Reg 2 */
+	TX_MFF_TST1	= 0x0d1d,/*  8 bit	Transmit MAC FIFO Test Reg 1 */
+	TX_MFF_TST2	= 0x0d1e,/*  8 bit	Transmit MAC FIFO Test Reg 2 */
+
+	TX_LED_INI	= 0x0d20,/* 32 bit	Transmit LED Cnt Init Value */
+	TX_LED_VAL	= 0x0d24,/* 32 bit	Transmit LED Cnt Current Val */
+	TX_LED_CTRL	= 0x0d28,/*  8 bit	Transmit LED Cnt Control Reg */
+	TX_LED_TST	= 0x0d29,/*  8 bit	Transmit LED Cnt Test Reg */
+};
+
+/* Counter and Timer constants, for a host clock of 62.5 MHz */
+#define SK_XMIT_DUR		0x002faf08UL	/*  50 ms */
+#define SK_BLK_DUR		0x01dcd650UL	/* 500 ms */
+
+#define SK_DPOLL_DEF	0x00ee6b28UL	/* 250 ms at 62.5 MHz */
+
+#define SK_DPOLL_MAX	0x00ffffffUL	/* 268 ms at 62.5 MHz */
+					/* 215 ms at 78.12 MHz */
+
+#define SK_FACT_62		100	/* is given in percent */
+#define SK_FACT_53		 85     /* on GENESIS:	53.12 MHz */
+#define SK_FACT_78		125	/* on YUKON:	78.12 MHz */
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+	TX_GMF_EA	= 0x0d40,/* 32 bit	Tx GMAC FIFO End Address */
+	TX_GMF_AE_THR	= 0x0d44,/* 32 bit	Tx GMAC FIFO Almost Empty Thresh.*/
+	TX_GMF_CTRL_T	= 0x0d48,/* 32 bit	Tx GMAC FIFO Control/Test */
+
+	TX_GMF_WP	= 0x0d60,/* 32 bit 	Tx GMAC FIFO Write Pointer */
+	TX_GMF_WSP	= 0x0d64,/* 32 bit 	Tx GMAC FIFO Write Shadow Ptr. */
+	TX_GMF_WLEV	= 0x0d68,/* 32 bit 	Tx GMAC FIFO Write Level */
+
+	TX_GMF_RP	= 0x0d70,/* 32 bit 	Tx GMAC FIFO Read Pointer */
+	TX_GMF_RSTP	= 0x0d74,/* 32 bit 	Tx GMAC FIFO Restart Pointer */
+	TX_GMF_RLEV	= 0x0d78,/* 32 bit 	Tx GMAC FIFO Read Level */
+
+	/* Descriptor Poll Timer Registers */
+	B28_DPT_INI	= 0x0e00,/* 24 bit	Descriptor Poll Timer Init Val */
+	B28_DPT_VAL	= 0x0e04,/* 24 bit	Descriptor Poll Timer Curr Val */
+	B28_DPT_CTRL	= 0x0e08,/*  8 bit	Descriptor Poll Timer Ctrl Reg */
+
+	B28_DPT_TST	= 0x0e0a,/*  8 bit	Descriptor Poll Timer Test Reg */
+
+	/* Time Stamp Timer Registers (YUKON only) */
+	GMAC_TI_ST_VAL	= 0x0e14,/* 32 bit	Time Stamp Timer Curr Val */
+	GMAC_TI_ST_CTRL	= 0x0e18,/*  8 bit	Time Stamp Timer Ctrl Reg */
+	GMAC_TI_ST_TST	= 0x0e1a,/*  8 bit	Time Stamp Timer Test Reg */
+};
+
+
+enum {
+	LINKLED_OFF 	     = 0x01,
+	LINKLED_ON  	     = 0x02,
+	LINKLED_LINKSYNC_OFF = 0x04,
+	LINKLED_LINKSYNC_ON  = 0x08,
+	LINKLED_BLINK_OFF    = 0x10,
+	LINKLED_BLINK_ON     = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+	GMAC_CTRL	= 0x0f00,/* 32 bit	GMAC Control Reg */
+	GPHY_CTRL	= 0x0f04,/* 32 bit	GPHY Control Reg */
+	GMAC_IRQ_SRC	= 0x0f08,/*  8 bit	GMAC Interrupt Source Reg */
+	GMAC_IRQ_MSK	= 0x0f0c,/*  8 bit	GMAC Interrupt Mask Reg */
+	GMAC_LINK_CTRL	= 0x0f10,/* 16 bit	Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+
+	WOL_REG_OFFS	= 0x20,/* HW-Bug: Address is + 0x20 against spec. */
+
+	WOL_CTRL_STAT	= 0x0f20,/* 16 bit	WOL Control/Status Reg */
+	WOL_MATCH_CTL	= 0x0f22,/*  8 bit	WOL Match Control Reg */
+	WOL_MATCH_RES	= 0x0f23,/*  8 bit	WOL Match Result Reg */
+	WOL_MAC_ADDR	= 0x0f24,/* 32 bit	WOL MAC Address */
+	WOL_PATT_RPTR	= 0x0f2c,/*  8 bit	WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+
+	WOL_PATT_LEN_LO	= 0x0f30,/* 32 bit	WOL Pattern Length 3..0 */
+	WOL_PATT_LEN_HI	= 0x0f34,/* 24 bit	WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+
+	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
+	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
+};
+#define WOL_REGS(port, x)	(x + (port)*0x80)
+
+enum {
+	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
+	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
+};
+#define WOL_PATT_RAM_BASE(port)	(WOL_PATT_RAM_1 + (port)*0x400)
+
+enum {
+	BASE_XMAC_1	= 0x2000,/* XMAC 1 registers */
+	BASE_GMAC_1	= 0x2800,/* GMAC 1 registers */
+	BASE_XMAC_2	= 0x3000,/* XMAC 2 registers */
+	BASE_GMAC_2	= 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Receive Frame Status Encoding
+ */
+enum {
+	XMR_FS_LEN	= 0x3fff<<18,	/* Bit 31..18:	Rx Frame Length */
+	XMR_FS_LEN_SHIFT = 18,
+	XMR_FS_2L_VLAN	= 1<<17, /* Bit 17:	tagged wh 2Lev VLAN ID*/
+	XMR_FS_1_VLAN	= 1<<16, /* Bit 16:	tagged wh 1ev VLAN ID*/
+	XMR_FS_BC	= 1<<15, /* Bit 15:	Broadcast Frame */
+	XMR_FS_MC	= 1<<14, /* Bit 14:	Multicast Frame */
+	XMR_FS_UC	= 1<<13, /* Bit 13:	Unicast Frame */
+
+	XMR_FS_BURST	= 1<<11, /* Bit 11:	Burst Mode */
+	XMR_FS_CEX_ERR	= 1<<10, /* Bit 10:	Carrier Ext. Error */
+	XMR_FS_802_3	= 1<<9, /* Bit  9:	802.3 Frame */
+	XMR_FS_COL_ERR	= 1<<8, /* Bit  8:	Collision Error */
+	XMR_FS_CAR_ERR	= 1<<7, /* Bit  7:	Carrier Event Error */
+	XMR_FS_LEN_ERR	= 1<<6, /* Bit  6:	In-Range Length Error */
+	XMR_FS_FRA_ERR	= 1<<5, /* Bit  5:	Framing Error */
+	XMR_FS_RUNT	= 1<<4, /* Bit  4:	Runt Frame */
+	XMR_FS_LNG_ERR	= 1<<3, /* Bit  3:	Giant (Jumbo) Frame */
+	XMR_FS_FCS_ERR	= 1<<2, /* Bit  2:	Frame Check Sequ Err */
+	XMR_FS_ERR	= 1<<1, /* Bit  1:	Frame Error */
+	XMR_FS_MCTRL	= 1<<0, /* Bit  0:	MAC Control Packet */
+
+/*
+ * XMR_FS_ERR will be set if
+ *	XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
+ *	XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
+ * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
+ * XMR_FS_ERR unless the corresponding bit in the Receive Command
+ * Register is set.
+ */
+};
+
+/*
+,* XMAC-PHY Registers, indirect addressed over the XMAC
+ */
+enum {
+	PHY_XMAC_CTRL		= 0x00,/* 16 bit r/w	PHY Control Register */
+	PHY_XMAC_STAT		= 0x01,/* 16 bit r/w	PHY Status Register */
+	PHY_XMAC_ID0		= 0x02,/* 16 bit r/o	PHY ID0 Register */
+	PHY_XMAC_ID1		= 0x03,/* 16 bit r/o	PHY ID1 Register */
+	PHY_XMAC_AUNE_ADV	= 0x04,/* 16 bit r/w	Auto-Neg. Advertisement */
+	PHY_XMAC_AUNE_LP	= 0x05,/* 16 bit r/o	Link Partner Abi Reg */
+	PHY_XMAC_AUNE_EXP	= 0x06,/* 16 bit r/o	Auto-Neg. Expansion Reg */
+	PHY_XMAC_NEPG		= 0x07,/* 16 bit r/w	Next Page Register */
+	PHY_XMAC_NEPG_LP	= 0x08,/* 16 bit r/o	Next Page Link Partner */
+
+	PHY_XMAC_EXT_STAT	= 0x0f,/* 16 bit r/o	Ext Status Register */
+	PHY_XMAC_RES_ABI	= 0x10,/* 16 bit r/o	PHY Resolved Ability */
+};
+/*
+ * Broadcom-PHY Registers, indirect addressed over XMAC
+ */
+enum {
+	PHY_BCOM_CTRL		= 0x00,/* 16 bit r/w	PHY Control Register */
+	PHY_BCOM_STAT		= 0x01,/* 16 bit r/o	PHY Status Register */
+	PHY_BCOM_ID0		= 0x02,/* 16 bit r/o	PHY ID0 Register */
+	PHY_BCOM_ID1		= 0x03,/* 16 bit r/o	PHY ID1 Register */
+	PHY_BCOM_AUNE_ADV	= 0x04,/* 16 bit r/w	Auto-Neg. Advertisement */
+	PHY_BCOM_AUNE_LP	= 0x05,/* 16 bit r/o	Link Part Ability Reg */
+	PHY_BCOM_AUNE_EXP	= 0x06,/* 16 bit r/o	Auto-Neg. Expansion Reg */
+	PHY_BCOM_NEPG		= 0x07,/* 16 bit r/w	Next Page Register */
+	PHY_BCOM_NEPG_LP	= 0x08,/* 16 bit r/o	Next Page Link Partner */
+	/* Broadcom-specific registers */
+	PHY_BCOM_1000T_CTRL	= 0x09,/* 16 bit r/w	1000Base-T Control Reg */
+	PHY_BCOM_1000T_STAT	= 0x0a,/* 16 bit r/o	1000Base-T Status Reg */
+	PHY_BCOM_EXT_STAT	= 0x0f,/* 16 bit r/o	Extended Status Reg */
+	PHY_BCOM_P_EXT_CTRL	= 0x10,/* 16 bit r/w	PHY Extended Ctrl Reg */
+	PHY_BCOM_P_EXT_STAT	= 0x11,/* 16 bit r/o	PHY Extended Stat Reg */
+	PHY_BCOM_RE_CTR		= 0x12,/* 16 bit r/w	Receive Error Counter */
+	PHY_BCOM_FC_CTR		= 0x13,/* 16 bit r/w	False Carrier Sense Cnt */
+	PHY_BCOM_RNO_CTR	= 0x14,/* 16 bit r/w	Receiver NOT_OK Cnt */
+
+	PHY_BCOM_AUX_CTRL	= 0x18,/* 16 bit r/w	Auxiliary Control Reg */
+	PHY_BCOM_AUX_STAT	= 0x19,/* 16 bit r/o	Auxiliary Stat Summary */
+	PHY_BCOM_INT_STAT	= 0x1a,/* 16 bit r/o	Interrupt Status Reg */
+	PHY_BCOM_INT_MASK	= 0x1b,/* 16 bit r/w	Interrupt Mask Reg */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+	PHY_MARV_CTRL		= 0x00,/* 16 bit r/w	PHY Control Register */
+	PHY_MARV_STAT		= 0x01,/* 16 bit r/o	PHY Status Register */
+	PHY_MARV_ID0		= 0x02,/* 16 bit r/o	PHY ID0 Register */
+	PHY_MARV_ID1		= 0x03,/* 16 bit r/o	PHY ID1 Register */
+	PHY_MARV_AUNE_ADV	= 0x04,/* 16 bit r/w	Auto-Neg. Advertisement */
+	PHY_MARV_AUNE_LP	= 0x05,/* 16 bit r/o	Link Part Ability Reg */
+	PHY_MARV_AUNE_EXP	= 0x06,/* 16 bit r/o	Auto-Neg. Expansion Reg */
+	PHY_MARV_NEPG		= 0x07,/* 16 bit r/w	Next Page Register */
+	PHY_MARV_NEPG_LP	= 0x08,/* 16 bit r/o	Next Page Link Partner */
+	/* Marvel-specific registers */
+	PHY_MARV_1000T_CTRL	= 0x09,/* 16 bit r/w	1000Base-T Control Reg */
+	PHY_MARV_1000T_STAT	= 0x0a,/* 16 bit r/o	1000Base-T Status Reg */
+	PHY_MARV_EXT_STAT	= 0x0f,/* 16 bit r/o	Extended Status Reg */
+	PHY_MARV_PHY_CTRL	= 0x10,/* 16 bit r/w	PHY Specific Ctrl Reg */
+	PHY_MARV_PHY_STAT	= 0x11,/* 16 bit r/o	PHY Specific Stat Reg */
+	PHY_MARV_INT_MASK	= 0x12,/* 16 bit r/w	Interrupt Mask Reg */
+	PHY_MARV_INT_STAT	= 0x13,/* 16 bit r/o	Interrupt Status Reg */
+	PHY_MARV_EXT_CTRL	= 0x14,/* 16 bit r/w	Ext. PHY Specific Ctrl */
+	PHY_MARV_RXE_CNT	= 0x15,/* 16 bit r/w	Receive Error Counter */
+	PHY_MARV_EXT_ADR	= 0x16,/* 16 bit r/w	Ext. Ad. for Cable Diag. */
+	PHY_MARV_PORT_IRQ	= 0x17,/* 16 bit r/o	Port 0 IRQ (88E1111 only) */
+	PHY_MARV_LED_CTRL	= 0x18,/* 16 bit r/w	LED Control Reg */
+	PHY_MARV_LED_OVER	= 0x19,/* 16 bit r/w	Manual LED Override Reg */
+	PHY_MARV_EXT_CTRL_2	= 0x1a,/* 16 bit r/w	Ext. PHY Specific Ctrl 2 */
+	PHY_MARV_EXT_P_STAT	= 0x1b,/* 16 bit r/w	Ext. PHY Spec. Stat Reg */
+	PHY_MARV_CABLE_DIAG	= 0x1c,/* 16 bit r/o	Cable Diagnostic Reg */
+	PHY_MARV_PAGE_ADDR	= 0x1d,/* 16 bit r/w	Extended Page Address Reg */
+	PHY_MARV_PAGE_DATA	= 0x1e,/* 16 bit r/w	Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+	PHY_MARV_FE_LED_PAR	= 0x16,/* 16 bit r/w	LED Parallel Select Reg. */
+	PHY_MARV_FE_LED_SER	= 0x17,/* 16 bit r/w	LED Stream Select S. LED */
+	PHY_MARV_FE_VCT_TX	= 0x1a,/* 16 bit r/w	VCT Reg. for TXP/N Pins */
+	PHY_MARV_FE_VCT_RX	= 0x1b,/* 16 bit r/o	VCT Reg. for RXP/N Pins */
+	PHY_MARV_FE_SPEC_2	= 0x1c,/* 16 bit r/w	Specific Control Reg. 2 */
+};
+
+enum {
+	PHY_CT_RESET	= 1<<15, /* Bit 15: (sc)	clear all PHY related regs */
+	PHY_CT_LOOP	= 1<<14, /* Bit 14:	enable Loopback over PHY */
+	PHY_CT_SPS_LSB	= 1<<13, /* Bit 13:	Speed select, lower bit */
+	PHY_CT_ANE	= 1<<12, /* Bit 12:	Auto-Negotiation Enabled */
+	PHY_CT_PDOWN	= 1<<11, /* Bit 11:	Power Down Mode */
+	PHY_CT_ISOL	= 1<<10, /* Bit 10:	Isolate Mode */
+	PHY_CT_RE_CFG	= 1<<9, /* Bit  9:	(sc) Restart Auto-Negotiation */
+	PHY_CT_DUP_MD	= 1<<8, /* Bit  8:	Duplex Mode */
+	PHY_CT_COL_TST	= 1<<7, /* Bit  7:	Collision Test enabled */
+	PHY_CT_SPS_MSB	= 1<<6, /* Bit  6:	Speed select, upper bit */
+};
+
+enum {
+	PHY_CT_SP1000	= PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+	PHY_CT_SP100	= PHY_CT_SPS_LSB, /* enable speed of  100 Mbps */
+	PHY_CT_SP10	= 0,		  /* enable speed of   10 Mbps */
+};
+
+enum {
+	PHY_ST_EXT_ST	= 1<<8, /* Bit  8:	Extended Status Present */
+
+	PHY_ST_PRE_SUP	= 1<<6, /* Bit  6:	Preamble Suppression */
+	PHY_ST_AN_OVER	= 1<<5, /* Bit  5:	Auto-Negotiation Over */
+	PHY_ST_REM_FLT	= 1<<4, /* Bit  4:	Remote Fault Condition Occurred */
+	PHY_ST_AN_CAP	= 1<<3, /* Bit  3:	Auto-Negotiation Capability */
+	PHY_ST_LSYNC	= 1<<2, /* Bit  2:	Link Synchronized */
+	PHY_ST_JAB_DET	= 1<<1, /* Bit  1:	Jabber Detected */
+	PHY_ST_EXT_REG	= 1<<0, /* Bit  0:	Extended Register available */
+};
+
+enum {
+	PHY_I1_OUI_MSK	= 0x3f<<10, /* Bit 15..10:	Organization Unique ID */
+	PHY_I1_MOD_NUM	= 0x3f<<4, /* Bit  9.. 4:	Model Number */
+	PHY_I1_REV_MSK	= 0xf, /* Bit  3.. 0:	Revision Number */
+};
+
+/* different Broadcom PHY Ids */
+enum {
+	PHY_BCOM_ID1_A1	= 0x6041,
+	PHY_BCOM_ID1_B2 = 0x6043,
+	PHY_BCOM_ID1_C0	= 0x6044,
+	PHY_BCOM_ID1_C5	= 0x6047,
+};
+
+/* different Marvell PHY Ids */
+enum {
+	PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+	PHY_MARV_ID1_B0	= 0x0C23, /* Yukon (PHY 88E1011) */
+	PHY_MARV_ID1_B2	= 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+	PHY_MARV_ID1_C2	= 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+	PHY_MARV_ID1_Y2	= 0x0C91, /* Yukon-2 (PHY 88E1112) */
+};
+
+/* Advertisement register bits */
+enum {
+	PHY_AN_NXT_PG	= 1<<15, /* Bit 15:	Request Next Page */
+	PHY_AN_ACK	= 1<<14, /* Bit 14:	(ro) Acknowledge Received */
+	PHY_AN_RF	= 1<<13, /* Bit 13:	Remote Fault Bits */
+
+	PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11:	Try for asymmetric */
+	PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10:	Try for pause */
+	PHY_AN_100BASE4	= 1<<9, /* Bit 9:	Try for 100mbps 4k packets */
+	PHY_AN_100FULL	= 1<<8, /* Bit 8:	Try for 100mbps full-duplex */
+	PHY_AN_100HALF	= 1<<7, /* Bit 7:	Try for 100mbps half-duplex */
+	PHY_AN_10FULL	= 1<<6, /* Bit 6:	Try for 10mbps full-duplex */
+	PHY_AN_10HALF	= 1<<5, /* Bit 5:	Try for 10mbps half-duplex */
+	PHY_AN_CSMA	= 1<<0, /* Bit 0:	Only selector supported */
+	PHY_AN_SEL	= 0x1f, /* Bit 4..0:	Selector Field, 00001=Ethernet*/
+	PHY_AN_FULL	= PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+	PHY_AN_ALL	= PHY_AN_10HALF | PHY_AN_10FULL |
+		  	  PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/* Xmac Specific */
+enum {
+	PHY_X_AN_NXT_PG	= 1<<15, /* Bit 15:	Request Next Page */
+	PHY_X_AN_ACK	= 1<<14, /* Bit 14:	(ro) Acknowledge Received */
+	PHY_X_AN_RFB	= 3<<12,/* Bit 13..12:	Remote Fault Bits */
+
+	PHY_X_AN_PAUSE	= 3<<7,/* Bit  8.. 7:	Pause Bits */
+	PHY_X_AN_HD	= 1<<6, /* Bit  6:	Half Duplex */
+	PHY_X_AN_FD	= 1<<5, /* Bit  5:	Full Duplex */
+};
+
+/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
+enum {
+	PHY_X_P_NO_PAUSE= 0<<7,/* Bit  8..7:	no Pause Mode */
+	PHY_X_P_SYM_MD	= 1<<7, /* Bit  8..7:	symmetric Pause Mode */
+	PHY_X_P_ASYM_MD	= 2<<7,/* Bit  8..7:	asymmetric Pause Mode */
+	PHY_X_P_BOTH_MD	= 3<<7,/* Bit  8..7:	both Pause Mode */
+};
+
+
+/*****  PHY_XMAC_EXT_STAT	16 bit r/w	Extended Status Register *****/
+enum {
+	PHY_X_EX_FD	= 1<<15, /* Bit 15:	Device Supports Full Duplex */
+	PHY_X_EX_HD	= 1<<14, /* Bit 14:	Device Supports Half Duplex */
+};
+
+/*****  PHY_XMAC_RES_ABI	16 bit r/o	PHY Resolved Ability *****/
+enum {
+	PHY_X_RS_PAUSE	= 3<<7,	/* Bit  8..7:	selected Pause Mode */
+	PHY_X_RS_HD	= 1<<6,	/* Bit  6:	Half Duplex Mode selected */
+	PHY_X_RS_FD	= 1<<5,	/* Bit  5:	Full Duplex Mode selected */
+	PHY_X_RS_ABLMIS = 1<<4,	/* Bit  4:	duplex or pause cap mismatch */
+	PHY_X_RS_PAUMIS = 1<<3,	/* Bit  3:	pause capability mismatch */
+};
+
+/* Remote Fault Bits (PHY_X_AN_RFB) encoding */
+enum {
+	X_RFB_OK	= 0<<12,/* Bit 13..12	No errors, Link OK */
+	X_RFB_LF	= 1<<12,/* Bit 13..12	Link Failure */
+	X_RFB_OFF	= 2<<12,/* Bit 13..12	Offline */
+	X_RFB_AN_ERR	= 3<<12,/* Bit 13..12	Auto-Negotiation Error */
+};
+
+/* Broadcom-Specific */
+/*****  PHY_BCOM_1000T_CTRL	16 bit r/w	1000Base-T Control Reg *****/
+enum {
+	PHY_B_1000C_TEST	= 7<<13,/* Bit 15..13:	Test Modes */
+	PHY_B_1000C_MSE	= 1<<12, /* Bit 12:	Master/Slave Enable */
+	PHY_B_1000C_MSC	= 1<<11, /* Bit 11:	M/S Configuration */
+	PHY_B_1000C_RD	= 1<<10, /* Bit 10:	Repeater/DTE */
+	PHY_B_1000C_AFD	= 1<<9, /* Bit  9:	Advertise Full Duplex */
+	PHY_B_1000C_AHD	= 1<<8, /* Bit  8:	Advertise Half Duplex */
+};
+
+/*****  PHY_BCOM_1000T_STAT	16 bit r/o	1000Base-T Status Reg *****/
+/*****  PHY_MARV_1000T_STAT	16 bit r/o	1000Base-T Status Reg *****/
+enum {
+	PHY_B_1000S_MSF	= 1<<15, /* Bit 15:	Master/Slave Fault */
+	PHY_B_1000S_MSR	= 1<<14, /* Bit 14:	Master/Slave Result */
+	PHY_B_1000S_LRS	= 1<<13, /* Bit 13:	Local Receiver Status */
+	PHY_B_1000S_RRS	= 1<<12, /* Bit 12:	Remote Receiver Status */
+	PHY_B_1000S_LP_FD	= 1<<11, /* Bit 11:	Link Partner can FD */
+	PHY_B_1000S_LP_HD	= 1<<10, /* Bit 10:	Link Partner can HD */
+									/* Bit  9..8:	reserved */
+	PHY_B_1000S_IEC	= 0xff, /* Bit  7..0:	Idle Error Count */
+};
+
+/*****  PHY_BCOM_EXT_STAT	16 bit r/o	Extended Status Register *****/
+enum {
+	PHY_B_ES_X_FD_CAP	= 1<<15, /* Bit 15:	1000Base-X FD capable */
+	PHY_B_ES_X_HD_CAP	= 1<<14, /* Bit 14:	1000Base-X HD capable */
+	PHY_B_ES_T_FD_CAP	= 1<<13, /* Bit 13:	1000Base-T FD capable */
+	PHY_B_ES_T_HD_CAP	= 1<<12, /* Bit 12:	1000Base-T HD capable */
+};
+
+/*****  PHY_BCOM_P_EXT_CTRL	16 bit r/w	PHY Extended Control Reg *****/
+enum {
+	PHY_B_PEC_MAC_PHY	= 1<<15, /* Bit 15:	10BIT/GMI-Interface */
+	PHY_B_PEC_DIS_CROSS	= 1<<14, /* Bit 14:	Disable MDI Crossover */
+	PHY_B_PEC_TX_DIS	= 1<<13, /* Bit 13:	Tx output Disabled */
+	PHY_B_PEC_INT_DIS	= 1<<12, /* Bit 12:	Interrupts Disabled */
+	PHY_B_PEC_F_INT	= 1<<11, /* Bit 11:	Force Interrupt */
+	PHY_B_PEC_BY_45	= 1<<10, /* Bit 10:	Bypass 4B5B-Decoder */
+	PHY_B_PEC_BY_SCR	= 1<<9, /* Bit  9:	Bypass Scrambler */
+	PHY_B_PEC_BY_MLT3	= 1<<8, /* Bit  8:	Bypass MLT3 Encoder */
+	PHY_B_PEC_BY_RXA	= 1<<7, /* Bit  7:	Bypass Rx Alignm. */
+	PHY_B_PEC_RES_SCR	= 1<<6, /* Bit  6:	Reset Scrambler */
+	PHY_B_PEC_EN_LTR	= 1<<5, /* Bit  5:	Ena LED Traffic Mode */
+	PHY_B_PEC_LED_ON	= 1<<4, /* Bit  4:	Force LED's on */
+	PHY_B_PEC_LED_OFF	= 1<<3, /* Bit  3:	Force LED's off */
+	PHY_B_PEC_EX_IPG	= 1<<2, /* Bit  2:	Extend Tx IPG Mode */
+	PHY_B_PEC_3_LED	= 1<<1, /* Bit  1:	Three Link LED mode */
+	PHY_B_PEC_HIGH_LA	= 1<<0, /* Bit  0:	GMII FIFO Elasticy */
+};
+
+/*****  PHY_BCOM_P_EXT_STAT	16 bit r/o	PHY Extended Status Reg *****/
+enum {
+	PHY_B_PES_CROSS_STAT	= 1<<13, /* Bit 13:	MDI Crossover Status */
+	PHY_B_PES_INT_STAT	= 1<<12, /* Bit 12:	Interrupt Status */
+	PHY_B_PES_RRS	= 1<<11, /* Bit 11:	Remote Receiver Stat. */
+	PHY_B_PES_LRS	= 1<<10, /* Bit 10:	Local Receiver Stat. */
+	PHY_B_PES_LOCKED	= 1<<9, /* Bit  9:	Locked */
+	PHY_B_PES_LS	= 1<<8, /* Bit  8:	Link Status */
+	PHY_B_PES_RF	= 1<<7, /* Bit  7:	Remote Fault */
+	PHY_B_PES_CE_ER	= 1<<6, /* Bit  6:	Carrier Ext Error */
+	PHY_B_PES_BAD_SSD	= 1<<5, /* Bit  5:	Bad SSD */
+	PHY_B_PES_BAD_ESD	= 1<<4, /* Bit  4:	Bad ESD */
+	PHY_B_PES_RX_ER	= 1<<3, /* Bit  3:	Receive Error */
+	PHY_B_PES_TX_ER	= 1<<2, /* Bit  2:	Transmit Error */
+	PHY_B_PES_LOCK_ER	= 1<<1, /* Bit  1:	Lock Error */
+	PHY_B_PES_MLT3_ER	= 1<<0, /* Bit  0:	MLT3 code Error */
+};
+
+/*  PHY_BCOM_AUNE_ADV	16 bit r/w	Auto-Negotiation Advertisement *****/
+/*  PHY_BCOM_AUNE_LP	16 bit r/o	Link Partner Ability Reg *****/
+enum {
+	PHY_B_AN_RF	= 1<<13, /* Bit 13:	Remote Fault */
+
+	PHY_B_AN_ASP	= 1<<11, /* Bit 11:	Asymmetric Pause */
+	PHY_B_AN_PC	= 1<<10, /* Bit 10:	Pause Capable */
+};
+
+
+/*****  PHY_BCOM_FC_CTR		16 bit r/w	False Carrier Counter *****/
+enum {
+	PHY_B_FC_CTR	= 0xff, /* Bit  7..0:	False Carrier Counter */
+
+/*****  PHY_BCOM_RNO_CTR	16 bit r/w	Receive NOT_OK Counter *****/
+	PHY_B_RC_LOC_MSK	= 0xff00, /* Bit 15..8:	Local Rx NOT_OK cnt */
+	PHY_B_RC_REM_MSK	= 0x00ff, /* Bit  7..0:	Remote Rx NOT_OK cnt */
+
+/*****  PHY_BCOM_AUX_CTRL	16 bit r/w	Auxiliary Control Reg *****/
+	PHY_B_AC_L_SQE		= 1<<15, /* Bit 15:	Low Squelch */
+	PHY_B_AC_LONG_PACK	= 1<<14, /* Bit 14:	Rx Long Packets */
+	PHY_B_AC_ER_CTRL	= 3<<12,/* Bit 13..12:	Edgerate Control */
+									/* Bit 11:	reserved */
+	PHY_B_AC_TX_TST	= 1<<10, /* Bit 10:	Tx test bit, always 1 */
+									/* Bit  9.. 8:	reserved */
+	PHY_B_AC_DIS_PRF	= 1<<7, /* Bit  7:	dis part resp filter */
+									/* Bit  6:	reserved */
+	PHY_B_AC_DIS_PM	= 1<<5, /* Bit  5:	dis power management */
+									/* Bit  4:	reserved */
+	PHY_B_AC_DIAG	= 1<<3, /* Bit  3:	Diagnostic Mode */
+};
+
+/*****  PHY_BCOM_AUX_STAT	16 bit r/o	Auxiliary Status Reg *****/
+enum {
+	PHY_B_AS_AN_C	= 1<<15, /* Bit 15:	AutoNeg complete */
+	PHY_B_AS_AN_CA	= 1<<14, /* Bit 14:	AN Complete Ack */
+	PHY_B_AS_ANACK_D	= 1<<13, /* Bit 13:	AN Ack Detect */
+	PHY_B_AS_ANAB_D	= 1<<12, /* Bit 12:	AN Ability Detect */
+	PHY_B_AS_NPW	= 1<<11, /* Bit 11:	AN Next Page Wait */
+	PHY_B_AS_AN_RES_MSK	= 7<<8,/* Bit 10..8:	AN HDC */
+	PHY_B_AS_PDF	= 1<<7, /* Bit  7:	Parallel Detect. Fault */
+	PHY_B_AS_RF	= 1<<6, /* Bit  6:	Remote Fault */
+	PHY_B_AS_ANP_R	= 1<<5, /* Bit  5:	AN Page Received */
+	PHY_B_AS_LP_ANAB	= 1<<4, /* Bit  4:	LP AN Ability */
+	PHY_B_AS_LP_NPAB	= 1<<3, /* Bit  3:	LP Next Page Ability */
+	PHY_B_AS_LS	= 1<<2, /* Bit  2:	Link Status */
+	PHY_B_AS_PRR	= 1<<1, /* Bit  1:	Pause Resolution-Rx */
+	PHY_B_AS_PRT	= 1<<0, /* Bit  0:	Pause Resolution-Tx */
+};
+#define PHY_B_AS_PAUSE_MSK	(PHY_B_AS_PRR | PHY_B_AS_PRT)
+
+/*****  PHY_BCOM_INT_STAT	16 bit r/o	Interrupt Status Reg *****/
+/*****  PHY_BCOM_INT_MASK	16 bit r/w	Interrupt Mask Reg *****/
+enum {
+	PHY_B_IS_PSE	= 1<<14, /* Bit 14:	Pair Swap Error */
+	PHY_B_IS_MDXI_SC	= 1<<13, /* Bit 13:	MDIX Status Change */
+	PHY_B_IS_HCT	= 1<<12, /* Bit 12:	counter above 32k */
+	PHY_B_IS_LCT	= 1<<11, /* Bit 11:	counter above 128 */
+	PHY_B_IS_AN_PR	= 1<<10, /* Bit 10:	Page Received */
+	PHY_B_IS_NO_HDCL	= 1<<9, /* Bit  9:	No HCD Link */
+	PHY_B_IS_NO_HDC	= 1<<8, /* Bit  8:	No HCD */
+	PHY_B_IS_NEG_USHDC	= 1<<7, /* Bit  7:	Negotiated Unsup. HCD */
+	PHY_B_IS_SCR_S_ER	= 1<<6, /* Bit  6:	Scrambler Sync Error */
+	PHY_B_IS_RRS_CHANGE	= 1<<5, /* Bit  5:	Remote Rx Stat Change */
+	PHY_B_IS_LRS_CHANGE	= 1<<4, /* Bit  4:	Local Rx Stat Change */
+	PHY_B_IS_DUP_CHANGE	= 1<<3, /* Bit  3:	Duplex Mode Change */
+	PHY_B_IS_LSP_CHANGE	= 1<<2, /* Bit  2:	Link Speed Change */
+	PHY_B_IS_LST_CHANGE	= 1<<1, /* Bit  1:	Link Status Changed */
+	PHY_B_IS_CRC_ER	= 1<<0, /* Bit  0:	CRC Error */
+};
+#define PHY_B_DEF_MSK	\
+	(~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
+	    PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
+
+/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
+enum {
+	PHY_B_P_NO_PAUSE	= 0<<10,/* Bit 11..10:	no Pause Mode */
+	PHY_B_P_SYM_MD	= 1<<10, /* Bit 11..10:	symmetric Pause Mode */
+	PHY_B_P_ASYM_MD	= 2<<10,/* Bit 11..10:	asymmetric Pause Mode */
+	PHY_B_P_BOTH_MD	= 3<<10,/* Bit 11..10:	both Pause Mode */
+};
+/*
+ * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
+ */
+enum {
+	PHY_B_RES_1000FD	= 7<<8,/* Bit 10..8:	1000Base-T Full Dup. */
+	PHY_B_RES_1000HD	= 6<<8,/* Bit 10..8:	1000Base-T Half Dup. */
+};
+
+/** Marvell-Specific */
+enum {
+	PHY_M_AN_NXT_PG	= 1<<15, /* Request Next Page */
+	PHY_M_AN_ACK	= 1<<14, /* (ro)	Acknowledge Received */
+	PHY_M_AN_RF	= 1<<13, /* Remote Fault */
+
+	PHY_M_AN_ASP	= 1<<11, /* Asymmetric Pause */
+	PHY_M_AN_PC	= 1<<10, /* MAC Pause implemented */
+	PHY_M_AN_100_T4	= 1<<9, /* Not cap. 100Base-T4 (always 0) */
+	PHY_M_AN_100_FD	= 1<<8, /* Advertise 100Base-TX Full Duplex */
+	PHY_M_AN_100_HD	= 1<<7, /* Advertise 100Base-TX Half Duplex */
+	PHY_M_AN_10_FD	= 1<<6, /* Advertise 10Base-TX Full Duplex */
+	PHY_M_AN_10_HD	= 1<<5, /* Advertise 10Base-TX Half Duplex */
+	PHY_M_AN_SEL_MSK =0x1f<<4,	/* Bit  4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+	PHY_M_AN_ASP_X		= 1<<8, /* Asymmetric Pause */
+	PHY_M_AN_PC_X		= 1<<7, /* MAC Pause implemented */
+	PHY_M_AN_1000X_AHD	= 1<<6, /* Advertise 10000Base-X Half Duplex */
+	PHY_M_AN_1000X_AFD	= 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+	PHY_M_P_NO_PAUSE_X	= 0<<7,/* Bit  8.. 7:	no Pause Mode */
+	PHY_M_P_SYM_MD_X	= 1<<7, /* Bit  8.. 7:	symmetric Pause Mode */
+	PHY_M_P_ASYM_MD_X	= 2<<7,/* Bit  8.. 7:	asymmetric Pause Mode */
+	PHY_M_P_BOTH_MD_X	= 3<<7,/* Bit  8.. 7:	both Pause Mode */
+};
+
+/*****  PHY_MARV_1000T_CTRL	16 bit r/w	1000Base-T Control Reg *****/
+enum {
+	PHY_M_1000C_TEST= 7<<13,/* Bit 15..13:	Test Modes */
+	PHY_M_1000C_MSE	= 1<<12, /* Manual Master/Slave Enable */
+	PHY_M_1000C_MSC	= 1<<11, /* M/S Configuration (1=Master) */
+	PHY_M_1000C_MPD	= 1<<10, /* Multi-Port Device */
+	PHY_M_1000C_AFD	= 1<<9, /* Advertise Full Duplex */
+	PHY_M_1000C_AHD	= 1<<8, /* Advertise Half Duplex */
+};
+
+/*****  PHY_MARV_PHY_CTRL	16 bit r/w	PHY Specific Ctrl Reg *****/
+enum {
+	PHY_M_PC_TX_FFD_MSK	= 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+	PHY_M_PC_RX_FFD_MSK	= 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+	PHY_M_PC_ASS_CRS_TX	= 1<<11, /* Assert CRS on Transmit */
+	PHY_M_PC_FL_GOOD	= 1<<10, /* Force Link Good */
+	PHY_M_PC_EN_DET_MSK	= 3<<8,/* Bit  9.. 8: Energy Detect Mask */
+	PHY_M_PC_ENA_EXT_D	= 1<<7, /* Enable Ext. Distance (10BT) */
+	PHY_M_PC_MDIX_MSK	= 3<<5,/* Bit  6.. 5: MDI/MDIX Config. Mask */
+	PHY_M_PC_DIS_125CLK	= 1<<4, /* Disable 125 CLK */
+	PHY_M_PC_MAC_POW_UP	= 1<<3, /* MAC Power up */
+	PHY_M_PC_SQE_T_ENA	= 1<<2, /* SQE Test Enabled */
+	PHY_M_PC_POL_R_DIS	= 1<<1, /* Polarity Reversal Disabled */
+	PHY_M_PC_DIS_JABBER	= 1<<0, /* Disable Jabber */
+};
+
+enum {
+	PHY_M_PC_EN_DET		= 2<<8,	/* Energy Detect (Mode 1) */
+	PHY_M_PC_EN_DET_PLUS	= 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+enum {
+	PHY_M_PC_MAN_MDI	= 0, /* 00 = Manual MDI configuration */
+	PHY_M_PC_MAN_MDIX	= 1, /* 01 = Manual MDIX configuration */
+	PHY_M_PC_ENA_AUTO	= 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+	PHY_M_PC_ENA_DTE_DT	= 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+	PHY_M_PC_ENA_ENE_DT	= 1<<14, /* Enable Energy Detect (sense & pulse) */
+	PHY_M_PC_DIS_NLP_CK	= 1<<13, /* Disable Normal Link Puls (NLP) Check */
+	PHY_M_PC_ENA_LIP_NP	= 1<<12, /* Enable Link Partner Next Page Reg. */
+	PHY_M_PC_DIS_NLP_GN	= 1<<11, /* Disable Normal Link Puls Generation */
+
+	PHY_M_PC_DIS_SCRAMB	= 1<<9, /* Disable Scrambler */
+	PHY_M_PC_DIS_FEFI	= 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+	PHY_M_PC_SH_TP_SEL	= 1<<6, /* Shielded Twisted Pair Select */
+	PHY_M_PC_RX_FD_MSK	= 3<<2,/* Bit  3.. 2: Rx FIFO Depth Mask */
+};
+
+/*****  PHY_MARV_PHY_STAT	16 bit r/o	PHY Specific Status Reg *****/
+enum {
+	PHY_M_PS_SPEED_MSK	= 3<<14, /* Bit 15..14: Speed Mask */
+	PHY_M_PS_SPEED_1000	= 1<<15, /*		10 = 1000 Mbps */
+	PHY_M_PS_SPEED_100	= 1<<14, /*		01 =  100 Mbps */
+	PHY_M_PS_SPEED_10	= 0,	 /*		00 =   10 Mbps */
+	PHY_M_PS_FULL_DUP	= 1<<13, /* Full Duplex */
+	PHY_M_PS_PAGE_REC	= 1<<12, /* Page Received */
+	PHY_M_PS_SPDUP_RES	= 1<<11, /* Speed & Duplex Resolved */
+	PHY_M_PS_LINK_UP	= 1<<10, /* Link Up */
+	PHY_M_PS_CABLE_MSK	= 7<<7,  /* Bit  9.. 7: Cable Length Mask */
+	PHY_M_PS_MDI_X_STAT	= 1<<6,  /* MDI Crossover Stat (1=MDIX) */
+	PHY_M_PS_DOWNS_STAT	= 1<<5,  /* Downshift Status (1=downsh.) */
+	PHY_M_PS_ENDET_STAT	= 1<<4,  /* Energy Detect Status (1=act) */
+	PHY_M_PS_TX_P_EN	= 1<<3,  /* Tx Pause Enabled */
+	PHY_M_PS_RX_P_EN	= 1<<2,  /* Rx Pause Enabled */
+	PHY_M_PS_POL_REV	= 1<<1,  /* Polarity Reversed */
+	PHY_M_PS_JABBER		= 1<<0,  /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK	(PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+	PHY_M_PS_DTE_DETECT	= 1<<15, /* Data Terminal Equipment (DTE) Detected */
+	PHY_M_PS_RES_SPEED	= 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+	PHY_M_IS_AN_ERROR	= 1<<15, /* Auto-Negotiation Error */
+	PHY_M_IS_LSP_CHANGE	= 1<<14, /* Link Speed Changed */
+	PHY_M_IS_DUP_CHANGE	= 1<<13, /* Duplex Mode Changed */
+	PHY_M_IS_AN_PR		= 1<<12, /* Page Received */
+	PHY_M_IS_AN_COMPL	= 1<<11, /* Auto-Negotiation Completed */
+	PHY_M_IS_LST_CHANGE	= 1<<10, /* Link Status Changed */
+	PHY_M_IS_SYMB_ERROR	= 1<<9, /* Symbol Error */
+	PHY_M_IS_FALSE_CARR	= 1<<8, /* False Carrier */
+	PHY_M_IS_FIFO_ERROR	= 1<<7, /* FIFO Overflow/Underrun Error */
+	PHY_M_IS_MDI_CHANGE	= 1<<6, /* MDI Crossover Changed */
+	PHY_M_IS_DOWNSH_DET	= 1<<5, /* Downshift Detected */
+	PHY_M_IS_END_CHANGE	= 1<<4, /* Energy Detect Changed */
+
+	PHY_M_IS_DTE_CHANGE	= 1<<2, /* DTE Power Det. Status Changed */
+	PHY_M_IS_POL_CHANGE	= 1<<1, /* Polarity Changed */
+	PHY_M_IS_JABBER		= 1<<0, /* Jabber */
+
+	PHY_M_IS_DEF_MSK	= PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE |
+				  PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR,
+
+	PHY_M_IS_AN_MSK		= PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+/*****  PHY_MARV_EXT_CTRL	16 bit r/w	Ext. PHY Specific Ctrl *****/
+enum {
+	PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+	PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+	PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+	PHY_M_EC_M_DSC_MSK  = 3<<10, /* Bit 11..10:	Master Downshift Counter */
+					/* (88E1011 only) */
+	PHY_M_EC_S_DSC_MSK  = 3<<8,  /* Bit  9.. 8:	Slave  Downshift Counter */
+				       /* (88E1011 only) */
+	PHY_M_EC_M_DSC_MSK2  = 7<<9, /* Bit 11.. 9:	Master Downshift Counter */
+					/* (88E1111 only) */
+	PHY_M_EC_DOWN_S_ENA  = 1<<8, /* Downshift Enable (88E1111 only) */
+					/* !!! Errata in spec. (1 = disable) */
+	PHY_M_EC_RX_TIM_CT   = 1<<7, /* RGMII Rx Timing Control*/
+	PHY_M_EC_MAC_S_MSK   = 7<<4, /* Bit  6.. 4:	Def. MAC interface speed */
+	PHY_M_EC_FIB_AN_ENA  = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+	PHY_M_EC_DTE_D_ENA   = 1<<2, /* DTE Detect Enable (88E1111 only) */
+	PHY_M_EC_TX_TIM_CT   = 1<<1, /* RGMII Tx Timing Control */
+	PHY_M_EC_TRANS_DIS   = 1<<0, /* Transmitter Disable (88E1111 only) */};
+
+#define PHY_M_EC_M_DSC(x)	((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x)	((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_MAC_S(x)	((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+#define PHY_M_EC_M_DSC_2(x)	((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
+											/* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+	MAC_TX_CLK_0_MHZ	= 2,
+	MAC_TX_CLK_2_5_MHZ	= 6,
+	MAC_TX_CLK_25_MHZ 	= 7,
+};
+
+/*****  PHY_MARV_LED_CTRL	16 bit r/w	LED Control Reg *****/
+enum {
+	PHY_M_LEDC_DIS_LED	= 1<<15, /* Disable LED */
+	PHY_M_LEDC_PULS_MSK	= 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+	PHY_M_LEDC_F_INT	= 1<<11, /* Force Interrupt */
+	PHY_M_LEDC_BL_R_MSK	= 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+	PHY_M_LEDC_DP_C_LSB	= 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+	PHY_M_LEDC_TX_C_LSB	= 1<<6, /* Tx Control (LSB, 88E1111 only) */
+	PHY_M_LEDC_LK_C_MSK	= 7<<3,/* Bit  5.. 3: Link Control Mask */
+					/* (88E1111 only) */
+};
+#define PHY_M_LED_PULS_DUR(x)	(((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_BLINK_RT(x)	(((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+	PHY_M_LEDC_LINK_MSK	= 3<<3, /* Bit  4.. 3: Link Control Mask */
+					/* (88E1011 only) */
+	PHY_M_LEDC_DP_CTRL	= 1<<2, /* Duplex Control */
+	PHY_M_LEDC_DP_C_MSB	= 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+	PHY_M_LEDC_RX_CTRL	= 1<<1, /* Rx Activity / Link */
+	PHY_M_LEDC_TX_CTRL	= 1<<0, /* Tx Activity / Link */
+	PHY_M_LEDC_TX_C_MSB	= 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+enum {
+	PULS_NO_STR	= 0, /* no pulse stretching */
+	PULS_21MS	= 1, /* 21 ms to 42 ms */
+	PULS_42MS	= 2, /* 42 ms to 84 ms */
+	PULS_84MS	= 3, /* 84 ms to 170 ms */
+	PULS_170MS	= 4, /* 170 ms to 340 ms */
+	PULS_340MS	= 5, /* 340 ms to 670 ms */
+	PULS_670MS	= 6, /* 670 ms to 1.3 s */
+	PULS_1300MS	= 7, /* 1.3 s to 2.7 s */
+};
+
+
+enum {
+	BLINK_42MS	= 0, /* 42 ms */
+	BLINK_84MS	= 1, /* 84 ms */
+	BLINK_170MS	= 2, /* 170 ms */
+	BLINK_340MS	= 3, /* 340 ms */
+	BLINK_670MS	= 4, /* 670 ms */
+};
+
+/*****  PHY_MARV_LED_OVER	16 bit r/w	Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x)	((x)<<14) /* Bit 15..14:  SGMII AN Timer */
+										/* Bit 13..12:	reserved */
+#define PHY_M_LED_MO_DUP(x)	((x)<<10) /* Bit 11..10:  Duplex */
+#define PHY_M_LED_MO_10(x)	((x)<<8) /* Bit  9.. 8:  Link 10 */
+#define PHY_M_LED_MO_100(x)	((x)<<6) /* Bit  7.. 6:  Link 100 */
+#define PHY_M_LED_MO_1000(x)	((x)<<4) /* Bit  5.. 4:  Link 1000 */
+#define PHY_M_LED_MO_RX(x)	((x)<<2) /* Bit  3.. 2:  Rx */
+#define PHY_M_LED_MO_TX(x)	((x)<<0) /* Bit  1.. 0:  Tx */
+
+enum {
+	MO_LED_NORM	= 0,
+	MO_LED_BLINK	= 1,
+	MO_LED_OFF	= 2,
+	MO_LED_ON	= 3,
+};
+
+/*****  PHY_MARV_EXT_CTRL_2	16 bit r/w	Ext. PHY Specific Ctrl 2 *****/
+enum {
+	PHY_M_EC2_FI_IMPED	= 1<<6, /* Fiber Input  Impedance */
+	PHY_M_EC2_FO_IMPED	= 1<<5, /* Fiber Output Impedance */
+	PHY_M_EC2_FO_M_CLK	= 1<<4, /* Fiber Mode Clock Enable */
+	PHY_M_EC2_FO_BOOST	= 1<<3, /* Fiber Output Boost */
+	PHY_M_EC2_FO_AM_MSK	= 7, /* Bit  2.. 0:	Fiber Output Amplitude */
+};
+
+/*****  PHY_MARV_EXT_P_STAT 16 bit r/w	Ext. PHY Specific Status *****/
+enum {
+	PHY_M_FC_AUTO_SEL	= 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+	PHY_M_FC_AN_REG_ACC	= 1<<14, /* Fiber/Copper AN Reg. Access */
+	PHY_M_FC_RESOLUTION	= 1<<13, /* Fiber/Copper Resolution */
+	PHY_M_SER_IF_AN_BP	= 1<<12, /* Ser. IF AN Bypass Enable */
+	PHY_M_SER_IF_BP_ST	= 1<<11, /* Ser. IF AN Bypass Status */
+	PHY_M_IRQ_POLARITY	= 1<<10, /* IRQ polarity */
+	PHY_M_DIS_AUT_MED	= 1<<9, /* Disable Aut. Medium Reg. Selection */
+									/* (88E1111 only) */
+								/* Bit  9.. 4: reserved (88E1011 only) */
+	PHY_M_UNDOC1	= 1<<7, /* undocumented bit !! */
+	PHY_M_DTE_POW_STAT	= 1<<4, /* DTE Power Status (88E1111 only) */
+	PHY_M_MODE_MASK	= 0xf, /* Bit  3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/*****  PHY_MARV_CABLE_DIAG	16 bit r/o	Cable Diagnostic Reg *****/
+enum {
+	PHY_M_CABD_ENA_TEST	= 1<<15, /* Enable Test (Page 0) */
+	PHY_M_CABD_DIS_WAIT	= 1<<15, /* Disable Waiting Period (Page 1) */
+					/* (88E1111 only) */
+	PHY_M_CABD_STAT_MSK	= 3<<13, /* Bit 14..13: Status Mask */
+	PHY_M_CABD_AMPL_MSK	= 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */
+					/* (88E1111 only) */
+	PHY_M_CABD_DIST_MSK	= 0xff, /* Bit  7.. 0: Distance Mask */
+};
+
+/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
+enum {
+	CABD_STAT_NORMAL= 0,
+	CABD_STAT_SHORT	= 1,
+	CABD_STAT_OPEN	= 2,
+	CABD_STAT_FAIL	= 3,
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/*****  PHY_MARV_FE_LED_PAR		16 bit r/w	LED Parallel Select Reg. *****/
+									/* Bit 15..12: reserved (used internally) */
+enum {
+	PHY_M_FELP_LED2_MSK = 0xf<<8,	/* Bit 11.. 8: LED2 Mask (LINK) */
+	PHY_M_FELP_LED1_MSK = 0xf<<4,	/* Bit  7.. 4: LED1 Mask (ACT) */
+	PHY_M_FELP_LED0_MSK = 0xf, /* Bit  3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x)	(((x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x)	(((x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x)	(((x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+	LED_PAR_CTRL_COLX	= 0x00,
+	LED_PAR_CTRL_ERROR	= 0x01,
+	LED_PAR_CTRL_DUPLEX	= 0x02,
+	LED_PAR_CTRL_DP_COL	= 0x03,
+	LED_PAR_CTRL_SPEED	= 0x04,
+	LED_PAR_CTRL_LINK	= 0x05,
+	LED_PAR_CTRL_TX		= 0x06,
+	LED_PAR_CTRL_RX		= 0x07,
+	LED_PAR_CTRL_ACT	= 0x08,
+	LED_PAR_CTRL_LNK_RX	= 0x09,
+	LED_PAR_CTRL_LNK_AC	= 0x0a,
+	LED_PAR_CTRL_ACT_BL	= 0x0b,
+	LED_PAR_CTRL_TX_BL	= 0x0c,
+	LED_PAR_CTRL_RX_BL	= 0x0d,
+	LED_PAR_CTRL_COL_BL	= 0x0e,
+	LED_PAR_CTRL_INACT	= 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2		16 bit r/w	Specific Control Reg. 2 *****/
+enum {
+	PHY_M_FESC_DIS_WAIT	= 1<<2, /* Disable TDR Waiting Period */
+	PHY_M_FESC_ENA_MCLK	= 1<<1, /* Enable MAC Rx Clock in sleep mode */
+	PHY_M_FESC_SEL_CL_A	= 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+
+/*****  PHY_MARV_PHY_CTRL (page 3)		16 bit r/w	LED Control Reg. *****/
+enum {
+	PHY_M_LEDC_LOS_MSK	= 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */
+	PHY_M_LEDC_INIT_MSK	= 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+	PHY_M_LEDC_STA1_MSK	= 0xf<<4, /* Bit  7.. 4: STAT1 LED Ctrl. Mask */
+	PHY_M_LEDC_STA0_MSK	= 0xf, /* Bit  3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x)	(((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x)	(((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x)	(((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x)	(((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers  */
+/* Port Registers */
+enum {
+	GM_GP_STAT	= 0x0000,	/* 16 bit r/o	General Purpose Status */
+	GM_GP_CTRL	= 0x0004,	/* 16 bit r/w	General Purpose Control */
+	GM_TX_CTRL	= 0x0008,	/* 16 bit r/w	Transmit Control Reg. */
+	GM_RX_CTRL	= 0x000c,	/* 16 bit r/w	Receive Control Reg. */
+	GM_TX_FLOW_CTRL	= 0x0010,	/* 16 bit r/w	Transmit Flow-Control */
+	GM_TX_PARAM	= 0x0014,	/* 16 bit r/w	Transmit Parameter Reg. */
+	GM_SERIAL_MODE	= 0x0018,	/* 16 bit r/w	Serial Mode Register */
+/* Source Address Registers */
+	GM_SRC_ADDR_1L	= 0x001c,	/* 16 bit r/w	Source Address 1 (low) */
+	GM_SRC_ADDR_1M	= 0x0020,	/* 16 bit r/w	Source Address 1 (middle) */
+	GM_SRC_ADDR_1H	= 0x0024,	/* 16 bit r/w	Source Address 1 (high) */
+	GM_SRC_ADDR_2L	= 0x0028,	/* 16 bit r/w	Source Address 2 (low) */
+	GM_SRC_ADDR_2M	= 0x002c,	/* 16 bit r/w	Source Address 2 (middle) */
+	GM_SRC_ADDR_2H	= 0x0030,	/* 16 bit r/w	Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+	GM_MC_ADDR_H1	= 0x0034,	/* 16 bit r/w	Multicast Address Hash 1 */
+	GM_MC_ADDR_H2	= 0x0038,	/* 16 bit r/w	Multicast Address Hash 2 */
+	GM_MC_ADDR_H3	= 0x003c,	/* 16 bit r/w	Multicast Address Hash 3 */
+	GM_MC_ADDR_H4	= 0x0040,	/* 16 bit r/w	Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+	GM_TX_IRQ_SRC	= 0x0044,	/* 16 bit r/o	Tx Overflow IRQ Source */
+	GM_RX_IRQ_SRC	= 0x0048,	/* 16 bit r/o	Rx Overflow IRQ Source */
+	GM_TR_IRQ_SRC	= 0x004c,	/* 16 bit r/o	Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+	GM_TX_IRQ_MSK	= 0x0050,	/* 16 bit r/w	Tx Overflow IRQ Mask */
+	GM_RX_IRQ_MSK	= 0x0054,	/* 16 bit r/w	Rx Overflow IRQ Mask */
+	GM_TR_IRQ_MSK	= 0x0058,	/* 16 bit r/w	Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+	GM_SMI_CTRL	= 0x0080,	/* 16 bit r/w	SMI Control Register */
+	GM_SMI_DATA	= 0x0084,	/* 16 bit r/w	SMI Data Register */
+	GM_PHY_ADDR	= 0x0088,	/* 16 bit r/w	GPHY Address Register */
+};
+
+/* MIB Counters */
+#define GM_MIB_CNT_BASE	0x0100		/* Base Address of MIB Counters */
+#define GM_MIB_CNT_SIZE	44		/* Number of MIB Counters */
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word	(32 bit r/o)
+ */
+enum {
+	GM_RXF_UC_OK  = GM_MIB_CNT_BASE + 0,	/* Unicast Frames Received OK */
+	GM_RXF_BC_OK	= GM_MIB_CNT_BASE + 8,	/* Broadcast Frames Received OK */
+	GM_RXF_MPAUSE	= GM_MIB_CNT_BASE + 16,	/* Pause MAC Ctrl Frames Received */
+	GM_RXF_MC_OK	= GM_MIB_CNT_BASE + 24,	/* Multicast Frames Received OK */
+	GM_RXF_FCS_ERR	= GM_MIB_CNT_BASE + 32,	/* Rx Frame Check Seq. Error */
+	/* GM_MIB_CNT_BASE + 40:	reserved */
+	GM_RXO_OK_LO	= GM_MIB_CNT_BASE + 48,	/* Octets Received OK Low */
+	GM_RXO_OK_HI	= GM_MIB_CNT_BASE + 56,	/* Octets Received OK High */
+	GM_RXO_ERR_LO	= GM_MIB_CNT_BASE + 64,	/* Octets Received Invalid Low */
+	GM_RXO_ERR_HI	= GM_MIB_CNT_BASE + 72,	/* Octets Received Invalid High */
+	GM_RXF_SHT	= GM_MIB_CNT_BASE + 80,	/* Frames <64 Byte Received OK */
+	GM_RXE_FRAG	= GM_MIB_CNT_BASE + 88,	/* Frames <64 Byte Received with FCS Err */
+	GM_RXF_64B	= GM_MIB_CNT_BASE + 96,	/* 64 Byte Rx Frame */
+	GM_RXF_127B	= GM_MIB_CNT_BASE + 104,	/* 65-127 Byte Rx Frame */
+	GM_RXF_255B	= GM_MIB_CNT_BASE + 112,	/* 128-255 Byte Rx Frame */
+	GM_RXF_511B	= GM_MIB_CNT_BASE + 120,	/* 256-511 Byte Rx Frame */
+	GM_RXF_1023B	= GM_MIB_CNT_BASE + 128,	/* 512-1023 Byte Rx Frame */
+	GM_RXF_1518B	= GM_MIB_CNT_BASE + 136,	/* 1024-1518 Byte Rx Frame */
+	GM_RXF_MAX_SZ	= GM_MIB_CNT_BASE + 144,	/* 1519-MaxSize Byte Rx Frame */
+	GM_RXF_LNG_ERR	= GM_MIB_CNT_BASE + 152,	/* Rx Frame too Long Error */
+	GM_RXF_JAB_PKT	= GM_MIB_CNT_BASE + 160,	/* Rx Jabber Packet Frame */
+	/* GM_MIB_CNT_BASE + 168:	reserved */
+	GM_RXE_FIFO_OV	= GM_MIB_CNT_BASE + 176,	/* Rx FIFO overflow Event */
+	/* GM_MIB_CNT_BASE + 184:	reserved */
+	GM_TXF_UC_OK	= GM_MIB_CNT_BASE + 192,	/* Unicast Frames Xmitted OK */
+	GM_TXF_BC_OK	= GM_MIB_CNT_BASE + 200,	/* Broadcast Frames Xmitted OK */
+	GM_TXF_MPAUSE	= GM_MIB_CNT_BASE + 208,	/* Pause MAC Ctrl Frames Xmitted */
+	GM_TXF_MC_OK	= GM_MIB_CNT_BASE + 216,	/* Multicast Frames Xmitted OK */
+	GM_TXO_OK_LO	= GM_MIB_CNT_BASE + 224,	/* Octets Transmitted OK Low */
+	GM_TXO_OK_HI	= GM_MIB_CNT_BASE + 232,	/* Octets Transmitted OK High */
+	GM_TXF_64B	= GM_MIB_CNT_BASE + 240,	/* 64 Byte Tx Frame */
+	GM_TXF_127B	= GM_MIB_CNT_BASE + 248,	/* 65-127 Byte Tx Frame */
+	GM_TXF_255B	= GM_MIB_CNT_BASE + 256,	/* 128-255 Byte Tx Frame */
+	GM_TXF_511B	= GM_MIB_CNT_BASE + 264,	/* 256-511 Byte Tx Frame */
+	GM_TXF_1023B	= GM_MIB_CNT_BASE + 272,	/* 512-1023 Byte Tx Frame */
+	GM_TXF_1518B	= GM_MIB_CNT_BASE + 280,	/* 1024-1518 Byte Tx Frame */
+	GM_TXF_MAX_SZ	= GM_MIB_CNT_BASE + 288,	/* 1519-MaxSize Byte Tx Frame */
+
+	GM_TXF_COL	= GM_MIB_CNT_BASE + 304,	/* Tx Collision */
+	GM_TXF_LAT_COL	= GM_MIB_CNT_BASE + 312,	/* Tx Late Collision */
+	GM_TXF_ABO_COL	= GM_MIB_CNT_BASE + 320,	/* Tx aborted due to Exces. Col. */
+	GM_TXF_MUL_COL	= GM_MIB_CNT_BASE + 328,	/* Tx Multiple Collision */
+	GM_TXF_SNG_COL	= GM_MIB_CNT_BASE + 336,	/* Tx Single Collision */
+	GM_TXE_FIFO_UR	= GM_MIB_CNT_BASE + 344,	/* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/*	GM_GP_STAT	16 bit r/o	General Purpose Status Register */
+enum {
+	GM_GPSR_SPEED		= 1<<15, /* Bit 15:	Port Speed (1 = 100 Mbps) */
+	GM_GPSR_DUPLEX		= 1<<14, /* Bit 14:	Duplex Mode (1 = Full) */
+	GM_GPSR_FC_TX_DIS	= 1<<13, /* Bit 13:	Tx Flow-Control Mode Disabled */
+	GM_GPSR_LINK_UP		= 1<<12, /* Bit 12:	Link Up Status */
+	GM_GPSR_PAUSE		= 1<<11, /* Bit 11:	Pause State */
+	GM_GPSR_TX_ACTIVE	= 1<<10, /* Bit 10:	Tx in Progress */
+	GM_GPSR_EXC_COL		= 1<<9,	/* Bit  9:	Excessive Collisions Occurred */
+	GM_GPSR_LAT_COL		= 1<<8,	/* Bit  8:	Late Collisions Occurred */
+
+	GM_GPSR_PHY_ST_CH	= 1<<5,	/* Bit  5:	PHY Status Change */
+	GM_GPSR_GIG_SPEED	= 1<<4,	/* Bit  4:	Gigabit Speed (1 = 1000 Mbps) */
+	GM_GPSR_PART_MODE	= 1<<3,	/* Bit  3:	Partition mode */
+	GM_GPSR_FC_RX_DIS	= 1<<2,	/* Bit  2:	Rx Flow-Control Mode Disabled */
+	GM_GPSR_PROM_EN		= 1<<1,	/* Bit  1:	Promiscuous Mode Enabled */
+};
+
+/*	GM_GP_CTRL	16 bit r/w	General Purpose Control Register */
+enum {
+	GM_GPCR_PROM_ENA	= 1<<14,	/* Bit 14:	Enable Promiscuous Mode */
+	GM_GPCR_FC_TX_DIS	= 1<<13, /* Bit 13:	Disable Tx Flow-Control Mode */
+	GM_GPCR_TX_ENA		= 1<<12, /* Bit 12:	Enable Transmit */
+	GM_GPCR_RX_ENA		= 1<<11, /* Bit 11:	Enable Receive */
+	GM_GPCR_BURST_ENA	= 1<<10, /* Bit 10:	Enable Burst Mode */
+	GM_GPCR_LOOP_ENA	= 1<<9,	/* Bit  9:	Enable MAC Loopback Mode */
+	GM_GPCR_PART_ENA	= 1<<8,	/* Bit  8:	Enable Partition Mode */
+	GM_GPCR_GIGS_ENA	= 1<<7,	/* Bit  7:	Gigabit Speed (1000 Mbps) */
+	GM_GPCR_FL_PASS		= 1<<6,	/* Bit  6:	Force Link Pass */
+	GM_GPCR_DUP_FULL	= 1<<5,	/* Bit  5:	Full Duplex Mode */
+	GM_GPCR_FC_RX_DIS	= 1<<4,	/* Bit  4:	Disable Rx Flow-Control Mode */
+	GM_GPCR_SPEED_100	= 1<<3,   /* Bit  3:	Port Speed 100 Mbps */
+	GM_GPCR_AU_DUP_DIS	= 1<<2,	/* Bit  2:	Disable Auto-Update Duplex */
+	GM_GPCR_AU_FCT_DIS	= 1<<1,	/* Bit  1:	Disable Auto-Update Flow-C. */
+	GM_GPCR_AU_SPD_DIS	= 1<<0,	/* Bit  0:	Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000	(GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+#define GM_GPCR_AU_ALL_DIS	(GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
+
+/*	GM_TX_CTRL			16 bit r/w	Transmit Control Register */
+enum {
+	GM_TXCR_FORCE_JAM	= 1<<15, /* Bit 15:	Force Jam / Flow-Control */
+	GM_TXCR_CRC_DIS		= 1<<14, /* Bit 14:	Disable insertion of CRC */
+	GM_TXCR_PAD_DIS		= 1<<13, /* Bit 13:	Disable padding of packets */
+	GM_TXCR_COL_THR_MSK	= 7<<10, /* Bit 12..10:	Collision Threshold */
+};
+
+#define TX_COL_THR(x)		(((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF		0x04	/* late collision after 64 byte */
+
+/*	GM_RX_CTRL			16 bit r/w	Receive Control Register */
+enum {
+	GM_RXCR_UCF_ENA	= 1<<15, /* Bit 15:	Enable Unicast filtering */
+	GM_RXCR_MCF_ENA	= 1<<14, /* Bit 14:	Enable Multicast filtering */
+	GM_RXCR_CRC_DIS	= 1<<13, /* Bit 13:	Remove 4-byte CRC */
+	GM_RXCR_PASS_FC	= 1<<12, /* Bit 12:	Pass FC packets to FIFO */
+};
+
+/*	GM_TX_PARAM		16 bit r/w	Transmit Parameter Register */
+enum {
+	GM_TXPA_JAMLEN_MSK	= 0x03<<14,	/* Bit 15..14:	Jam Length */
+	GM_TXPA_JAMIPG_MSK	= 0x1f<<9,	/* Bit 13..9:	Jam IPG */
+	GM_TXPA_JAMDAT_MSK	= 0x1f<<4,	/* Bit  8..4:	IPG Jam to Data */
+
+	TX_JAM_LEN_DEF		= 0x03,
+	TX_JAM_IPG_DEF		= 0x0b,
+	TX_IPG_JAM_DEF		= 0x1c,
+};
+
+#define TX_JAM_LEN_VAL(x)	(((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x)	(((x)<<9)  & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x)	(((x)<<4)  & GM_TXPA_JAMDAT_MSK)
+
+
+/*	GM_SERIAL_MODE			16 bit r/w	Serial Mode Register */
+enum {
+	GM_SMOD_DATABL_MSK	= 0x1f<<11, /* Bit 15..11:	Data Blinder (r/o) */
+	GM_SMOD_LIMIT_4		= 1<<10, /* Bit 10:	4 consecutive Tx trials */
+	GM_SMOD_VLAN_ENA	= 1<<9,	/* Bit  9:	Enable VLAN  (Max. Frame Len) */
+	GM_SMOD_JUMBO_ENA	= 1<<8,	/* Bit  8:	Enable Jumbo (Max. Frame Len) */
+	 GM_SMOD_IPG_MSK	= 0x1f	/* Bit 4..0:	Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x)	(((x)<<11) & GM_SMOD_DATABL_MSK)
+#define DATA_BLIND_DEF		0x04
+
+#define IPG_DATA_VAL(x)		(x & GM_SMOD_IPG_MSK)
+#define IPG_DATA_DEF		0x1e
+
+/*	GM_SMI_CTRL			16 bit r/w	SMI Control Register */
+enum {
+	GM_SMI_CT_PHY_A_MSK	= 0x1f<<11, /* Bit 15..11:	PHY Device Address */
+	GM_SMI_CT_REG_A_MSK	= 0x1f<<6, /* Bit 10.. 6:	PHY Register Address */
+	GM_SMI_CT_OP_RD		= 1<<5,	/* Bit  5:	OpCode Read (0=Write)*/
+	GM_SMI_CT_RD_VAL	= 1<<4,	/* Bit  4:	Read Valid (Read completed) */
+	GM_SMI_CT_BUSY		= 1<<3,	/* Bit  3:	Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x)	(((x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x)	(((x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/*	GM_PHY_ADDR				16 bit r/w	GPHY Address Register */
+enum {
+	GM_PAR_MIB_CLR	= 1<<5,	/* Bit  5:	Set MIB Clear Counter Mode */
+	GM_PAR_MIB_TST	= 1<<4,	/* Bit  4:	MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+	GMR_FS_LEN	= 0xffff<<16, /* Bit 31..16:	Rx Frame Length */
+	GMR_FS_LEN_SHIFT = 16,
+	GMR_FS_VLAN	= 1<<13, /* Bit 13:	VLAN Packet */
+	GMR_FS_JABBER	= 1<<12, /* Bit 12:	Jabber Packet */
+	GMR_FS_UN_SIZE	= 1<<11, /* Bit 11:	Undersize Packet */
+	GMR_FS_MC	= 1<<10, /* Bit 10:	Multicast Packet */
+	GMR_FS_BC	= 1<<9, /* Bit  9:	Broadcast Packet */
+	GMR_FS_RX_OK	= 1<<8, /* Bit  8:	Receive OK (Good Packet) */
+	GMR_FS_GOOD_FC	= 1<<7, /* Bit  7:	Good Flow-Control Packet */
+	GMR_FS_BAD_FC	= 1<<6, /* Bit  6:	Bad  Flow-Control Packet */
+	GMR_FS_MII_ERR	= 1<<5, /* Bit  5:	MII Error */
+	GMR_FS_LONG_ERR	= 1<<4, /* Bit  4:	Too Long Packet */
+	GMR_FS_FRAGMENT	= 1<<3, /* Bit  3:	Fragment */
+
+	GMR_FS_CRC_ERR	= 1<<1, /* Bit  1:	CRC Error */
+	GMR_FS_RX_FF_OV	= 1<<0, /* Bit  0:	Rx FIFO Overflow */
+
+/*
+ * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
+ */
+	GMR_FS_ANY_ERR	= GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
+		  	  GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
+			  GMR_FS_JABBER,
+/* Rx GMAC FIFO Flush Mask (default) */
+	RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
+			   GMR_FS_BAD_FC |  GMR_FS_UN_SIZE | GMR_FS_JABBER,
+};
+
+/*	RX_GMF_CTRL_T	32 bit	Rx GMAC FIFO Control/Test */
+enum {
+	GMF_WP_TST_ON	= 1<<14,	/* Write Pointer Test On */
+	GMF_WP_TST_OFF	= 1<<13,	/* Write Pointer Test Off */
+	GMF_WP_STEP	= 1<<12,	/* Write Pointer Step/Increment */
+
+	GMF_RP_TST_ON	= 1<<10,	/* Read Pointer Test On */
+	GMF_RP_TST_OFF	= 1<<9,		/* Read Pointer Test Off */
+	GMF_RP_STEP	= 1<<8,		/* Read Pointer Step/Increment */
+	GMF_RX_F_FL_ON	= 1<<7,		/* Rx FIFO Flush Mode On */
+	GMF_RX_F_FL_OFF	= 1<<6,		/* Rx FIFO Flush Mode Off */
+	GMF_CLI_RX_FO	= 1<<5,		/* Clear IRQ Rx FIFO Overrun */
+	GMF_CLI_RX_FC	= 1<<4,		/* Clear IRQ Rx Frame Complete */
+	GMF_OPER_ON	= 1<<3,		/* Operational Mode On */
+	GMF_OPER_OFF	= 1<<2,		/* Operational Mode Off */
+	GMF_RST_CLR	= 1<<1,		/* Clear GMAC FIFO Reset */
+	GMF_RST_SET	= 1<<0,		/* Set   GMAC FIFO Reset */
+
+	RX_GMF_FL_THR_DEF = 0xa,	/* flush threshold (default) */
+};
+
+
+/*	TX_GMF_CTRL_T	32 bit	Tx GMAC FIFO Control/Test */
+enum {
+	GMF_WSP_TST_ON	= 1<<18, /* Write Shadow Pointer Test On */
+	GMF_WSP_TST_OFF	= 1<<17, /* Write Shadow Pointer Test Off */
+	GMF_WSP_STEP	= 1<<16, /* Write Shadow Pointer Step/Increment */
+
+	GMF_CLI_TX_FU	= 1<<6,	/* Clear IRQ Tx FIFO Underrun */
+	GMF_CLI_TX_FC	= 1<<5,	/* Clear IRQ Tx Frame Complete */
+	GMF_CLI_TX_PE	= 1<<4,	/* Clear IRQ Tx Parity Error */
+};
+
+/*	GMAC_TI_ST_CTRL	 8 bit	Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+	GMT_ST_START	= 1<<2,	/* Start Time Stamp Timer */
+	GMT_ST_STOP	= 1<<1,	/* Stop  Time Stamp Timer */
+	GMT_ST_CLR_IRQ	= 1<<0,	/* Clear Time Stamp Timer IRQ */
+};
+
+/*	GMAC_CTRL		32 bit	GMAC Control Reg (YUKON only) */
+enum {
+	GMC_H_BURST_ON	= 1<<7,	/* Half Duplex Burst Mode On */
+	GMC_H_BURST_OFF	= 1<<6,	/* Half Duplex Burst Mode Off */
+	GMC_F_LOOPB_ON	= 1<<5,	/* FIFO Loopback On */
+	GMC_F_LOOPB_OFF	= 1<<4,	/* FIFO Loopback Off */
+	GMC_PAUSE_ON	= 1<<3,	/* Pause On */
+	GMC_PAUSE_OFF	= 1<<2,	/* Pause Off */
+	GMC_RST_CLR	= 1<<1,	/* Clear GMAC Reset */
+	GMC_RST_SET	= 1<<0,	/* Set   GMAC Reset */
+};
+
+/*	GPHY_CTRL		32 bit	GPHY Control Reg (YUKON only) */
+enum {
+	GPC_SEL_BDT	= 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
+	GPC_INT_POL_HI	= 1<<27, /* IRQ Polarity is Active HIGH */
+	GPC_75_OHM	= 1<<26, /* Use 75 Ohm Termination instead of 50 */
+	GPC_DIS_FC	= 1<<25, /* Disable Automatic Fiber/Copper Detection */
+	GPC_DIS_SLEEP	= 1<<24, /* Disable Energy Detect */
+	GPC_HWCFG_M_3	= 1<<23, /* HWCFG_MODE[3] */
+	GPC_HWCFG_M_2	= 1<<22, /* HWCFG_MODE[2] */
+	GPC_HWCFG_M_1	= 1<<21, /* HWCFG_MODE[1] */
+	GPC_HWCFG_M_0	= 1<<20, /* HWCFG_MODE[0] */
+	GPC_ANEG_0	= 1<<19, /* ANEG[0] */
+	GPC_ENA_XC	= 1<<18, /* Enable MDI crossover */
+	GPC_DIS_125	= 1<<17, /* Disable 125 MHz clock */
+	GPC_ANEG_3	= 1<<16, /* ANEG[3] */
+	GPC_ANEG_2	= 1<<15, /* ANEG[2] */
+	GPC_ANEG_1	= 1<<14, /* ANEG[1] */
+	GPC_ENA_PAUSE	= 1<<13, /* Enable Pause (SYM_OR_REM) */
+	GPC_PHYADDR_4	= 1<<12, /* Bit 4 of Phy Addr */
+	GPC_PHYADDR_3	= 1<<11, /* Bit 3 of Phy Addr */
+	GPC_PHYADDR_2	= 1<<10, /* Bit 2 of Phy Addr */
+	GPC_PHYADDR_1	= 1<<9,	 /* Bit 1 of Phy Addr */
+	GPC_PHYADDR_0	= 1<<8,	 /* Bit 0 of Phy Addr */
+						/* Bits  7..2:	reserved */
+	GPC_RST_CLR	= 1<<1,	/* Clear GPHY Reset */
+	GPC_RST_SET	= 1<<0,	/* Set   GPHY Reset */
+};
+
+#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+#define GPC_ANEG_ADV_ALL_M  (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0)
+
+/* forced speed and duplex mode (don't mix with other ANEG bits) */
+#define GPC_FRC10MBIT_HALF	0
+#define GPC_FRC10MBIT_FULL	GPC_ANEG_0
+#define GPC_FRC100MBIT_HALF	GPC_ANEG_1
+#define GPC_FRC100MBIT_FULL	(GPC_ANEG_0 | GPC_ANEG_1)
+
+/* auto-negotiation with limited advertised speeds */
+/* mix only with master/slave settings (for copper) */
+#define GPC_ADV_1000_HALF	GPC_ANEG_2
+#define GPC_ADV_1000_FULL	GPC_ANEG_3
+#define GPC_ADV_ALL		(GPC_ANEG_2 | GPC_ANEG_3)
+
+/* master/slave settings */
+/* only for copper with 1000 Mbps */
+#define GPC_FORCE_MASTER	0
+#define GPC_FORCE_SLAVE		GPC_ANEG_0
+#define GPC_PREF_MASTER		GPC_ANEG_1
+#define GPC_PREF_SLAVE		(GPC_ANEG_1 | GPC_ANEG_0)
+
+/*	GMAC_IRQ_SRC	 8 bit	GMAC Interrupt Source Reg (YUKON only) */
+/*	GMAC_IRQ_MSK	 8 bit	GMAC Interrupt Mask   Reg (YUKON only) */
+enum {
+	GM_IS_TX_CO_OV	= 1<<5,	/* Transmit Counter Overflow IRQ */
+	GM_IS_RX_CO_OV	= 1<<4,	/* Receive Counter Overflow IRQ */
+	GM_IS_TX_FF_UR	= 1<<3,	/* Transmit FIFO Underrun */
+	GM_IS_TX_COMPL	= 1<<2,	/* Frame Transmission Complete */
+	GM_IS_RX_FF_OR	= 1<<1,	/* Receive FIFO Overrun */
+	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
+
+#define GMAC_DEF_MSK	(GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
+
+/*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
+						/* Bits 15.. 2:	reserved */
+	GMLC_RST_CLR	= 1<<1,	/* Clear GMAC Link Reset */
+	GMLC_RST_SET	= 1<<0,	/* Set   GMAC Link Reset */
+
+
+/*	WOL_CTRL_STAT	16 bit	WOL Control/Status Reg */
+	WOL_CTL_LINK_CHG_OCC		= 1<<15,
+	WOL_CTL_MAGIC_PKT_OCC		= 1<<14,
+	WOL_CTL_PATTERN_OCC		= 1<<13,
+	WOL_CTL_CLEAR_RESULT		= 1<<12,
+	WOL_CTL_ENA_PME_ON_LINK_CHG	= 1<<11,
+	WOL_CTL_DIS_PME_ON_LINK_CHG	= 1<<10,
+	WOL_CTL_ENA_PME_ON_MAGIC_PKT	= 1<<9,
+	WOL_CTL_DIS_PME_ON_MAGIC_PKT	= 1<<8,
+	WOL_CTL_ENA_PME_ON_PATTERN	= 1<<7,
+	WOL_CTL_DIS_PME_ON_PATTERN	= 1<<6,
+	WOL_CTL_ENA_LINK_CHG_UNIT	= 1<<5,
+	WOL_CTL_DIS_LINK_CHG_UNIT	= 1<<4,
+	WOL_CTL_ENA_MAGIC_PKT_UNIT	= 1<<3,
+	WOL_CTL_DIS_MAGIC_PKT_UNIT	= 1<<2,
+	WOL_CTL_ENA_PATTERN_UNIT	= 1<<1,
+	WOL_CTL_DIS_PATTERN_UNIT	= 1<<0,
+};
+
+#define WOL_CTL_DEFAULT				\
+	(WOL_CTL_DIS_PME_ON_LINK_CHG |	\
+	WOL_CTL_DIS_PME_ON_PATTERN |	\
+	WOL_CTL_DIS_PME_ON_MAGIC_PKT |	\
+	WOL_CTL_DIS_LINK_CHG_UNIT |		\
+	WOL_CTL_DIS_PATTERN_UNIT |		\
+	WOL_CTL_DIS_MAGIC_PKT_UNIT)
+
+/*	WOL_MATCH_CTL	 8 bit	WOL Match Control Reg */
+#define WOL_CTL_PATT_ENA(x)	(1 << (x))
+
+
+/* XMAC II registers				      */
+enum {
+	XM_MMU_CMD	= 0x0000, /* 16 bit r/w	MMU Command Register */
+	XM_POFF		= 0x0008, /* 32 bit r/w	Packet Offset Register */
+	XM_BURST	= 0x000c, /* 32 bit r/w	Burst Register for half duplex*/
+	XM_1L_VLAN_TAG	= 0x0010, /* 16 bit r/w	One Level VLAN Tag ID */
+	XM_2L_VLAN_TAG	= 0x0014, /* 16 bit r/w	Two Level VLAN Tag ID */
+	XM_TX_CMD	= 0x0020, /* 16 bit r/w	Transmit Command Register */
+	XM_TX_RT_LIM	= 0x0024, /* 16 bit r/w	Transmit Retry Limit Register */
+	XM_TX_STIME	= 0x0028, /* 16 bit r/w	Transmit Slottime Register */
+	XM_TX_IPG	= 0x002c, /* 16 bit r/w	Transmit Inter Packet Gap */
+	XM_RX_CMD	= 0x0030, /* 16 bit r/w	Receive Command Register */
+	XM_PHY_ADDR	= 0x0034, /* 16 bit r/w	PHY Address Register */
+	XM_PHY_DATA	= 0x0038, /* 16 bit r/w	PHY Data Register */
+	XM_GP_PORT	= 0x0040, /* 32 bit r/w	General Purpose Port Register */
+	XM_IMSK		= 0x0044, /* 16 bit r/w	Interrupt Mask Register */
+	XM_ISRC		= 0x0048, /* 16 bit r/o	Interrupt Status Register */
+	XM_HW_CFG	= 0x004c, /* 16 bit r/w	Hardware Config Register */
+	XM_TX_LO_WM	= 0x0060, /* 16 bit r/w	Tx FIFO Low Water Mark */
+	XM_TX_HI_WM	= 0x0062, /* 16 bit r/w	Tx FIFO High Water Mark */
+	XM_TX_THR	= 0x0064, /* 16 bit r/w	Tx Request Threshold */
+	XM_HT_THR	= 0x0066, /* 16 bit r/w	Host Request Threshold */
+	XM_PAUSE_DA	= 0x0068, /* NA reg r/w	Pause Destination Address */
+	XM_CTL_PARA	= 0x0070, /* 32 bit r/w	Control Parameter Register */
+	XM_MAC_OPCODE	= 0x0074, /* 16 bit r/w	Opcode for MAC control frames */
+	XM_MAC_PTIME	= 0x0076, /* 16 bit r/w	Pause time for MAC ctrl frames*/
+	XM_TX_STAT	= 0x0078, /* 32 bit r/o	Tx Status LIFO Register */
+
+	XM_EXM_START	= 0x0080, /* r/w	Start Address of the EXM Regs */
+#define XM_EXM(reg)	(XM_EXM_START + ((reg) << 3))
+};
+
+enum {
+	XM_SRC_CHK	= 0x0100, /* NA reg r/w	Source Check Address Register */
+	XM_SA		= 0x0108, /* NA reg r/w	Station Address Register */
+	XM_HSM		= 0x0110, /* 64 bit r/w	Hash Match Address Registers */
+	XM_RX_LO_WM	= 0x0118, /* 16 bit r/w	Receive Low Water Mark */
+	XM_RX_HI_WM	= 0x011a, /* 16 bit r/w	Receive High Water Mark */
+	XM_RX_THR	= 0x011c, /* 32 bit r/w	Receive Request Threshold */
+	XM_DEV_ID	= 0x0120, /* 32 bit r/o	Device ID Register */
+	XM_MODE		= 0x0124, /* 32 bit r/w	Mode Register */
+	XM_LSA		= 0x0128, /* NA reg r/o	Last Source Register */
+	XM_TS_READ	= 0x0130, /* 32 bit r/o	Time Stamp Read Register */
+	XM_TS_LOAD	= 0x0134, /* 32 bit r/o	Time Stamp Load Value */
+	XM_STAT_CMD	= 0x0200, /* 16 bit r/w	Statistics Command Register */
+	XM_RX_CNT_EV	= 0x0204, /* 32 bit r/o	Rx Counter Event Register */
+	XM_TX_CNT_EV	= 0x0208, /* 32 bit r/o	Tx Counter Event Register */
+	XM_RX_EV_MSK	= 0x020c, /* 32 bit r/w	Rx Counter Event Mask */
+	XM_TX_EV_MSK	= 0x0210, /* 32 bit r/w	Tx Counter Event Mask */
+	XM_TXF_OK	= 0x0280, /* 32 bit r/o	Frames Transmitted OK Conuter */
+	XM_TXO_OK_HI	= 0x0284, /* 32 bit r/o	Octets Transmitted OK High Cnt*/
+	XM_TXO_OK_LO	= 0x0288, /* 32 bit r/o	Octets Transmitted OK Low Cnt */
+	XM_TXF_BC_OK	= 0x028c, /* 32 bit r/o	Broadcast Frames Xmitted OK */
+	XM_TXF_MC_OK	= 0x0290, /* 32 bit r/o	Multicast Frames Xmitted OK */
+	XM_TXF_UC_OK	= 0x0294, /* 32 bit r/o	Unicast Frames Xmitted OK */
+	XM_TXF_LONG	= 0x0298, /* 32 bit r/o	Tx Long Frame Counter */
+	XM_TXE_BURST	= 0x029c, /* 32 bit r/o	Tx Burst Event Counter */
+	XM_TXF_MPAUSE	= 0x02a0, /* 32 bit r/o	Tx Pause MAC Ctrl Frame Cnt */
+	XM_TXF_MCTRL	= 0x02a4, /* 32 bit r/o	Tx MAC Ctrl Frame Counter */
+	XM_TXF_SNG_COL	= 0x02a8, /* 32 bit r/o	Tx Single Collision Counter */
+	XM_TXF_MUL_COL	= 0x02ac, /* 32 bit r/o	Tx Multiple Collision Counter */
+	XM_TXF_ABO_COL	= 0x02b0, /* 32 bit r/o	Tx aborted due to Exces. Col. */
+	XM_TXF_LAT_COL	= 0x02b4, /* 32 bit r/o	Tx Late Collision Counter */
+	XM_TXF_DEF	= 0x02b8, /* 32 bit r/o	Tx Deferred Frame Counter */
+	XM_TXF_EX_DEF	= 0x02bc, /* 32 bit r/o	Tx Excessive Deferall Counter */
+	XM_TXE_FIFO_UR	= 0x02c0, /* 32 bit r/o	Tx FIFO Underrun Event Cnt */
+	XM_TXE_CS_ERR	= 0x02c4, /* 32 bit r/o	Tx Carrier Sense Error Cnt */
+	XM_TXP_UTIL	= 0x02c8, /* 32 bit r/o	Tx Utilization in % */
+	XM_TXF_64B	= 0x02d0, /* 32 bit r/o	64 Byte Tx Frame Counter */
+	XM_TXF_127B	= 0x02d4, /* 32 bit r/o	65-127 Byte Tx Frame Counter */
+	XM_TXF_255B	= 0x02d8, /* 32 bit r/o	128-255 Byte Tx Frame Counter */
+	XM_TXF_511B	= 0x02dc, /* 32 bit r/o	256-511 Byte Tx Frame Counter */
+	XM_TXF_1023B	= 0x02e0, /* 32 bit r/o	512-1023 Byte Tx Frame Counter*/
+	XM_TXF_MAX_SZ	= 0x02e4, /* 32 bit r/o	1024-MaxSize Byte Tx Frame Cnt*/
+	XM_RXF_OK	= 0x0300, /* 32 bit r/o	Frames Received OK */
+	XM_RXO_OK_HI	= 0x0304, /* 32 bit r/o	Octets Received OK High Cnt */
+	XM_RXO_OK_LO	= 0x0308, /* 32 bit r/o	Octets Received OK Low Counter*/
+	XM_RXF_BC_OK	= 0x030c, /* 32 bit r/o	Broadcast Frames Received OK */
+	XM_RXF_MC_OK	= 0x0310, /* 32 bit r/o	Multicast Frames Received OK */
+	XM_RXF_UC_OK	= 0x0314, /* 32 bit r/o	Unicast Frames Received OK */
+	XM_RXF_MPAUSE	= 0x0318, /* 32 bit r/o	Rx Pause MAC Ctrl Frame Cnt */
+	XM_RXF_MCTRL	= 0x031c, /* 32 bit r/o	Rx MAC Ctrl Frame Counter */
+	XM_RXF_INV_MP	= 0x0320, /* 32 bit r/o	Rx invalid Pause Frame Cnt */
+	XM_RXF_INV_MOC	= 0x0324, /* 32 bit r/o	Rx Frames with inv. MAC Opcode*/
+	XM_RXE_BURST	= 0x0328, /* 32 bit r/o	Rx Burst Event Counter */
+	XM_RXE_FMISS	= 0x032c, /* 32 bit r/o	Rx Missed Frames Event Cnt */
+	XM_RXF_FRA_ERR	= 0x0330, /* 32 bit r/o	Rx Framing Error Counter */
+	XM_RXE_FIFO_OV	= 0x0334, /* 32 bit r/o	Rx FIFO overflow Event Cnt */
+	XM_RXF_JAB_PKT	= 0x0338, /* 32 bit r/o	Rx Jabber Packet Frame Cnt */
+	XM_RXE_CAR_ERR	= 0x033c, /* 32 bit r/o	Rx Carrier Event Error Cnt */
+	XM_RXF_LEN_ERR	= 0x0340, /* 32 bit r/o	Rx in Range Length Error */
+	XM_RXE_SYM_ERR	= 0x0344, /* 32 bit r/o	Rx Symbol Error Counter */
+	XM_RXE_SHT_ERR	= 0x0348, /* 32 bit r/o	Rx Short Event Error Cnt */
+	XM_RXE_RUNT	= 0x034c, /* 32 bit r/o	Rx Runt Event Counter */
+	XM_RXF_LNG_ERR	= 0x0350, /* 32 bit r/o	Rx Frame too Long Error Cnt */
+	XM_RXF_FCS_ERR	= 0x0354, /* 32 bit r/o	Rx Frame Check Seq. Error Cnt */
+	XM_RXF_CEX_ERR	= 0x035c, /* 32 bit r/o	Rx Carrier Ext Error Frame Cnt*/
+	XM_RXP_UTIL	= 0x0360, /* 32 bit r/o	Rx Utilization in % */
+	XM_RXF_64B	= 0x0368, /* 32 bit r/o	64 Byte Rx Frame Counter */
+	XM_RXF_127B	= 0x036c, /* 32 bit r/o	65-127 Byte Rx Frame Counter */
+	XM_RXF_255B	= 0x0370, /* 32 bit r/o	128-255 Byte Rx Frame Counter */
+	XM_RXF_511B	= 0x0374, /* 32 bit r/o	256-511 Byte Rx Frame Counter */
+	XM_RXF_1023B	= 0x0378, /* 32 bit r/o	512-1023 Byte Rx Frame Counter*/
+	XM_RXF_MAX_SZ	= 0x037c, /* 32 bit r/o	1024-MaxSize Byte Rx Frame Cnt*/
+};
+
+/*	XM_MMU_CMD	16 bit r/w	MMU Command Register */
+enum {
+	XM_MMU_PHY_RDY	= 1<<12, /* Bit 12:	PHY Read Ready */
+	XM_MMU_PHY_BUSY	= 1<<11, /* Bit 11:	PHY Busy */
+	XM_MMU_IGN_PF	= 1<<10, /* Bit 10:	Ignore Pause Frame */
+	XM_MMU_MAC_LB	= 1<<9,	 /* Bit  9:	Enable MAC Loopback */
+	XM_MMU_FRC_COL	= 1<<7,	 /* Bit  7:	Force Collision */
+	XM_MMU_SIM_COL	= 1<<6,	 /* Bit  6:	Simulate Collision */
+	XM_MMU_NO_PRE	= 1<<5,	 /* Bit  5:	No MDIO Preamble */
+	XM_MMU_GMII_FD	= 1<<4,	 /* Bit  4:	GMII uses Full Duplex */
+	XM_MMU_RAT_CTRL	= 1<<3,	 /* Bit  3:	Enable Rate Control */
+	XM_MMU_GMII_LOOP= 1<<2,	 /* Bit  2:	PHY is in Loopback Mode */
+	XM_MMU_ENA_RX	= 1<<1,	 /* Bit  1:	Enable Receiver */
+	XM_MMU_ENA_TX	= 1<<0,	 /* Bit  0:	Enable Transmitter */
+};
+
+
+/*	XM_TX_CMD	16 bit r/w	Transmit Command Register */
+enum {
+	XM_TX_BK2BK	= 1<<6,	/* Bit  6:	Ignor Carrier Sense (Tx Bk2Bk)*/
+	XM_TX_ENC_BYP	= 1<<5,	/* Bit  5:	Set Encoder in Bypass Mode */
+	XM_TX_SAM_LINE	= 1<<4,	/* Bit  4: (sc)	Start utilization calculation */
+	XM_TX_NO_GIG_MD	= 1<<3,	/* Bit  3:	Disable Carrier Extension */
+	XM_TX_NO_PRE	= 1<<2,	/* Bit  2:	Disable Preamble Generation */
+	XM_TX_NO_CRC	= 1<<1,	/* Bit  1:	Disable CRC Generation */
+	XM_TX_AUTO_PAD	= 1<<0,	/* Bit  0:	Enable Automatic Padding */
+};
+
+/*	XM_TX_RT_LIM	16 bit r/w	Transmit Retry Limit Register */
+#define XM_RT_LIM_MSK	0x1f	/* Bit  4..0:	Tx Retry Limit */
+
+
+/*	XM_TX_STIME	16 bit r/w	Transmit Slottime Register */
+#define XM_STIME_MSK	0x7f	/* Bit  6..0:	Tx Slottime bits */
+
+
+/*	XM_TX_IPG	16 bit r/w	Transmit Inter Packet Gap */
+#define XM_IPG_MSK		0xff	/* Bit  7..0:	IPG value bits */
+
+
+/*	XM_RX_CMD	16 bit r/w	Receive Command Register */
+enum {
+	XM_RX_LENERR_OK	= 1<<8,	/* Bit  8	don't set Rx Err bit for */
+				/*		inrange error packets */
+	XM_RX_BIG_PK_OK	= 1<<7,	/* Bit  7	don't set Rx Err bit for */
+				/*		jumbo packets */
+	XM_RX_IPG_CAP	= 1<<6,	/* Bit  6	repl. type field with IPG */
+	XM_RX_TP_MD	= 1<<5,	/* Bit  5:	Enable transparent Mode */
+	XM_RX_STRIP_FCS	= 1<<4,	/* Bit  4:	Enable FCS Stripping */
+	XM_RX_SELF_RX	= 1<<3,	/* Bit  3: 	Enable Rx of own packets */
+	XM_RX_SAM_LINE	= 1<<2,	/* Bit  2: (sc)	Start utilization calculation */
+	XM_RX_STRIP_PAD	= 1<<1,	/* Bit  1:	Strip pad bytes of Rx frames */
+	XM_RX_DIS_CEXT	= 1<<0,	/* Bit  0:	Disable carrier ext. check */
+};
+
+
+/*	XM_GP_PORT	32 bit r/w	General Purpose Port Register */
+enum {
+	XM_GP_ANIP	= 1<<6,	/* Bit  6: (ro)	Auto-Neg. in progress */
+	XM_GP_FRC_INT	= 1<<5,	/* Bit  5: (sc)	Force Interrupt */
+	XM_GP_RES_MAC	= 1<<3,	/* Bit  3: (sc)	Reset MAC and FIFOs */
+	XM_GP_RES_STAT	= 1<<2,	/* Bit  2: (sc)	Reset the statistics module */
+	XM_GP_INP_ASS	= 1<<0,	/* Bit  0: (ro) GP Input Pin asserted */
+};
+
+
+/*	XM_IMSK		16 bit r/w	Interrupt Mask Register */
+/*	XM_ISRC		16 bit r/o	Interrupt Status Register */
+enum {
+	XM_IS_LNK_AE	= 1<<14, /* Bit 14:	Link Asynchronous Event */
+	XM_IS_TX_ABORT	= 1<<13, /* Bit 13:	Transmit Abort, late Col. etc */
+	XM_IS_FRC_INT	= 1<<12, /* Bit 12:	Force INT bit set in GP */
+	XM_IS_INP_ASS	= 1<<11, /* Bit 11:	Input Asserted, GP bit 0 set */
+	XM_IS_LIPA_RC	= 1<<10, /* Bit 10:	Link Partner requests config */
+	XM_IS_RX_PAGE	= 1<<9,	/* Bit  9:	Page Received */
+	XM_IS_TX_PAGE	= 1<<8,	/* Bit  8:	Next Page Loaded for Transmit */
+	XM_IS_AND	= 1<<7,	/* Bit  7:	Auto-Negotiation Done */
+	XM_IS_TSC_OV	= 1<<6,	/* Bit  6:	Time Stamp Counter Overflow */
+	XM_IS_RXC_OV	= 1<<5,	/* Bit  5:	Rx Counter Event Overflow */
+	XM_IS_TXC_OV	= 1<<4,	/* Bit  4:	Tx Counter Event Overflow */
+	XM_IS_RXF_OV	= 1<<3,	/* Bit  3:	Receive FIFO Overflow */
+	XM_IS_TXF_UR	= 1<<2,	/* Bit  2:	Transmit FIFO Underrun */
+	XM_IS_TX_COMP	= 1<<1,	/* Bit  1:	Frame Tx Complete */
+	XM_IS_RX_COMP	= 1<<0,	/* Bit  0:	Frame Rx Complete */
+
+	XM_IMSK_DISABLE	= 0xffff,
+};
+
+/*	XM_HW_CFG	16 bit r/w	Hardware Config Register */
+enum {
+	XM_HW_GEN_EOP	= 1<<3,	/* Bit  3:	generate End of Packet pulse */
+	XM_HW_COM4SIG	= 1<<2,	/* Bit  2:	use Comma Detect for Sig. Det.*/
+	XM_HW_GMII_MD	= 1<<0,	/* Bit  0:	GMII Interface selected */
+};
+
+
+/*	XM_TX_LO_WM	16 bit r/w	Tx FIFO Low Water Mark */
+/*	XM_TX_HI_WM	16 bit r/w	Tx FIFO High Water Mark */
+#define XM_TX_WM_MSK	0x01ff	/* Bit  9.. 0	Tx FIFO Watermark bits */
+
+/*	XM_TX_THR	16 bit r/w	Tx Request Threshold */
+/*	XM_HT_THR	16 bit r/w	Host Request Threshold */
+/*	XM_RX_THR	16 bit r/w	Rx Request Threshold */
+#define XM_THR_MSK		0x03ff	/* Bit 10.. 0	Rx/Tx Request Threshold bits */
+
+
+/*	XM_TX_STAT	32 bit r/o	Tx Status LIFO Register */
+enum {
+	XM_ST_VALID	= (1UL<<31),	/* Bit 31:	Status Valid */
+	XM_ST_BYTE_CNT	= (0x3fffL<<17),	/* Bit 30..17:	Tx frame Length */
+	XM_ST_RETRY_CNT	= (0x1fL<<12),	/* Bit 16..12:	Retry Count */
+	XM_ST_EX_COL	= 1<<11,	/* Bit 11:	Excessive Collisions */
+	XM_ST_EX_DEF	= 1<<10,	/* Bit 10:	Excessive Deferral */
+	XM_ST_BURST	= 1<<9,		/* Bit  9:	p. xmitted in burst md*/
+	XM_ST_DEFER	= 1<<8,		/* Bit  8:	packet was defered */
+	XM_ST_BC	= 1<<7,		/* Bit  7:	Broadcast packet */
+	XM_ST_MC	= 1<<6,		/* Bit  6:	Multicast packet */
+	XM_ST_UC	= 1<<5,		/* Bit  5:	Unicast packet */
+	XM_ST_TX_UR	= 1<<4,		/* Bit  4:	FIFO Underrun occurred */
+	XM_ST_CS_ERR	= 1<<3,		/* Bit  3:	Carrier Sense Error */
+	XM_ST_LAT_COL	= 1<<2,		/* Bit  2:	Late Collision Error */
+	XM_ST_MUL_COL	= 1<<1,		/* Bit  1:	Multiple Collisions */
+	XM_ST_SGN_COL	= 1<<0,		/* Bit  0:	Single Collision */
+};
+
+/*	XM_RX_LO_WM	16 bit r/w	Receive Low Water Mark */
+/*	XM_RX_HI_WM	16 bit r/w	Receive High Water Mark */
+#define XM_RX_WM_MSK	0x03ff		/* Bit 11.. 0:	Rx FIFO Watermark bits */
+
+
+/*	XM_DEV_ID	32 bit r/o	Device ID Register */
+#define XM_DEV_OUI	(0x00ffffffUL<<8)	/* Bit 31..8:	Device OUI */
+#define XM_DEV_REV	(0x07L << 5)		/* Bit  7..5:	Chip Rev Num */
+
+
+/*	XM_MODE		32 bit r/w	Mode Register */
+enum {
+	XM_MD_ENA_REJ	= 1<<26, /* Bit 26:	Enable Frame Reject */
+	XM_MD_SPOE_E	= 1<<25, /* Bit 25:	Send Pause on Edge */
+									/* 		extern generated */
+	XM_MD_TX_REP	= 1<<24, /* Bit 24:	Transmit Repeater Mode */
+	XM_MD_SPOFF_I	= 1<<23, /* Bit 23:	Send Pause on FIFO full */
+									/*		intern generated */
+	XM_MD_LE_STW	= 1<<22, /* Bit 22:	Rx Stat Word in Little Endian */
+	XM_MD_TX_CONT	= 1<<21, /* Bit 21:	Send Continuous */
+	XM_MD_TX_PAUSE	= 1<<20, /* Bit 20: (sc)	Send Pause Frame */
+	XM_MD_ATS	= 1<<19, /* Bit 19:	Append Time Stamp */
+	XM_MD_SPOL_I	= 1<<18, /* Bit 18:	Send Pause on Low */
+									/*		intern generated */
+	XM_MD_SPOH_I	= 1<<17, /* Bit 17:	Send Pause on High */
+									/*		intern generated */
+	XM_MD_CAP	= 1<<16, /* Bit 16:	Check Address Pair */
+	XM_MD_ENA_HASH	= 1<<15, /* Bit 15:	Enable Hashing */
+	XM_MD_CSA	= 1<<14, /* Bit 14:	Check Station Address */
+	XM_MD_CAA	= 1<<13, /* Bit 13:	Check Address Array */
+	XM_MD_RX_MCTRL	= 1<<12, /* Bit 12:	Rx MAC Control Frame */
+	XM_MD_RX_RUNT	= 1<<11, /* Bit 11:	Rx Runt Frames */
+	XM_MD_RX_IRLE	= 1<<10, /* Bit 10:	Rx in Range Len Err Frame */
+	XM_MD_RX_LONG	= 1<<9,  /* Bit  9:	Rx Long Frame */
+	XM_MD_RX_CRCE	= 1<<8,  /* Bit  8:	Rx CRC Error Frame */
+	XM_MD_RX_ERR	= 1<<7,  /* Bit  7:	Rx Error Frame */
+	XM_MD_DIS_UC	= 1<<6,  /* Bit  6:	Disable Rx Unicast */
+	XM_MD_DIS_MC	= 1<<5,  /* Bit  5:	Disable Rx Multicast */
+	XM_MD_DIS_BC	= 1<<4,  /* Bit  4:	Disable Rx Broadcast */
+	XM_MD_ENA_PROM	= 1<<3,  /* Bit  3:	Enable Promiscuous */
+	XM_MD_ENA_BE	= 1<<2,  /* Bit  2:	Enable Big Endian */
+	XM_MD_FTF	= 1<<1,  /* Bit  1: (sc)	Flush Tx FIFO */
+	XM_MD_FRF	= 1<<0,  /* Bit  0: (sc)	Flush Rx FIFO */
+};
+
+#define XM_PAUSE_MODE	(XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
+#define XM_DEF_MODE	(XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
+			 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
+
+/*	XM_STAT_CMD	16 bit r/w	Statistics Command Register */
+enum {
+	XM_SC_SNP_RXC	= 1<<5,	/* Bit  5: (sc)	Snap Rx Counters */
+	XM_SC_SNP_TXC	= 1<<4,	/* Bit  4: (sc)	Snap Tx Counters */
+	XM_SC_CP_RXC	= 1<<3,	/* Bit  3: 	Copy Rx Counters Continuously */
+	XM_SC_CP_TXC	= 1<<2,	/* Bit  2:	Copy Tx Counters Continuously */
+	XM_SC_CLR_RXC	= 1<<1,	/* Bit  1: (sc)	Clear Rx Counters */
+	XM_SC_CLR_TXC	= 1<<0,	/* Bit  0: (sc) Clear Tx Counters */
+};
+
+
+/*	XM_RX_CNT_EV	32 bit r/o	Rx Counter Event Register */
+/*	XM_RX_EV_MSK	32 bit r/w	Rx Counter Event Mask */
+enum {
+	XMR_MAX_SZ_OV	= 1<<31, /* Bit 31:	1024-MaxSize Rx Cnt Ov*/
+	XMR_1023B_OV	= 1<<30, /* Bit 30:	512-1023Byte Rx Cnt Ov*/
+	XMR_511B_OV	= 1<<29, /* Bit 29:	256-511 Byte Rx Cnt Ov*/
+	XMR_255B_OV	= 1<<28, /* Bit 28:	128-255 Byte Rx Cnt Ov*/
+	XMR_127B_OV	= 1<<27, /* Bit 27:	65-127 Byte Rx Cnt Ov */
+	XMR_64B_OV	= 1<<26, /* Bit 26:	64 Byte Rx Cnt Ov */
+	XMR_UTIL_OV	= 1<<25, /* Bit 25:	Rx Util Cnt Overflow */
+	XMR_UTIL_UR	= 1<<24, /* Bit 24:	Rx Util Cnt Underrun */
+	XMR_CEX_ERR_OV	= 1<<23, /* Bit 23:	CEXT Err Cnt Ov */
+	XMR_FCS_ERR_OV	= 1<<21, /* Bit 21:	Rx FCS Error Cnt Ov */
+	XMR_LNG_ERR_OV	= 1<<20, /* Bit 20:	Rx too Long Err Cnt Ov*/
+	XMR_RUNT_OV	= 1<<19, /* Bit 19:	Runt Event Cnt Ov */
+	XMR_SHT_ERR_OV	= 1<<18, /* Bit 18:	Rx Short Ev Err Cnt Ov*/
+	XMR_SYM_ERR_OV	= 1<<17, /* Bit 17:	Rx Sym Err Cnt Ov */
+	XMR_CAR_ERR_OV	= 1<<15, /* Bit 15:	Rx Carr Ev Err Cnt Ov */
+	XMR_JAB_PKT_OV	= 1<<14, /* Bit 14:	Rx Jabb Packet Cnt Ov */
+	XMR_FIFO_OV	= 1<<13, /* Bit 13:	Rx FIFO Ov Ev Cnt Ov */
+	XMR_FRA_ERR_OV	= 1<<12, /* Bit 12:	Rx Framing Err Cnt Ov */
+	XMR_FMISS_OV	= 1<<11, /* Bit 11:	Rx Missed Ev Cnt Ov */
+	XMR_BURST	= 1<<10, /* Bit 10:	Rx Burst Event Cnt Ov */
+	XMR_INV_MOC	= 1<<9,  /* Bit  9:	Rx with inv. MAC OC Ov*/
+	XMR_INV_MP	= 1<<8,  /* Bit  8:	Rx inv Pause Frame Ov */
+	XMR_MCTRL_OV	= 1<<7,  /* Bit  7:	Rx MAC Ctrl-F Cnt Ov */
+	XMR_MPAUSE_OV	= 1<<6,  /* Bit  6:	Rx Pause MAC Ctrl-F Ov*/
+	XMR_UC_OK_OV	= 1<<5,  /* Bit  5:	Rx Unicast Frame CntOv*/
+	XMR_MC_OK_OV	= 1<<4,  /* Bit  4:	Rx Multicast Cnt Ov */
+	XMR_BC_OK_OV	= 1<<3,  /* Bit  3:	Rx Broadcast Cnt Ov */
+	XMR_OK_LO_OV	= 1<<2,  /* Bit  2:	Octets Rx OK Low CntOv*/
+	XMR_OK_HI_OV	= 1<<1,  /* Bit  1:	Octets Rx OK Hi Cnt Ov*/
+	XMR_OK_OV	= 1<<0,  /* Bit  0:	Frames Received Ok Ov */
+};
+
+#define XMR_DEF_MSK		(XMR_OK_LO_OV | XMR_OK_HI_OV)
+
+/*	XM_TX_CNT_EV	32 bit r/o	Tx Counter Event Register */
+/*	XM_TX_EV_MSK	32 bit r/w	Tx Counter Event Mask */
+enum {
+	XMT_MAX_SZ_OV	= 1<<25,	/* Bit 25:	1024-MaxSize Tx Cnt Ov*/
+	XMT_1023B_OV	= 1<<24,	/* Bit 24:	512-1023Byte Tx Cnt Ov*/
+	XMT_511B_OV	= 1<<23,	/* Bit 23:	256-511 Byte Tx Cnt Ov*/
+	XMT_255B_OV	= 1<<22,	/* Bit 22:	128-255 Byte Tx Cnt Ov*/
+	XMT_127B_OV	= 1<<21,	/* Bit 21:	65-127 Byte Tx Cnt Ov */
+	XMT_64B_OV	= 1<<20,	/* Bit 20:	64 Byte Tx Cnt Ov */
+	XMT_UTIL_OV	= 1<<19,	/* Bit 19:	Tx Util Cnt Overflow */
+	XMT_UTIL_UR	= 1<<18,	/* Bit 18:	Tx Util Cnt Underrun */
+	XMT_CS_ERR_OV	= 1<<17,	/* Bit 17:	Tx Carr Sen Err Cnt Ov*/
+	XMT_FIFO_UR_OV	= 1<<16,	/* Bit 16:	Tx FIFO Ur Ev Cnt Ov */
+	XMT_EX_DEF_OV	= 1<<15,	/* Bit 15:	Tx Ex Deferall Cnt Ov */
+	XMT_DEF	= 1<<14,	/* Bit 14:	Tx Deferred Cnt Ov */
+	XMT_LAT_COL_OV	= 1<<13,	/* Bit 13:	Tx Late Col Cnt Ov */
+	XMT_ABO_COL_OV	= 1<<12,	/* Bit 12:	Tx abo dueto Ex Col Ov*/
+	XMT_MUL_COL_OV	= 1<<11,	/* Bit 11:	Tx Mult Col Cnt Ov */
+	XMT_SNG_COL	= 1<<10,	/* Bit 10:	Tx Single Col Cnt Ov */
+	XMT_MCTRL_OV	= 1<<9,		/* Bit  9:	Tx MAC Ctrl Counter Ov*/
+	XMT_MPAUSE	= 1<<8,		/* Bit  8:	Tx Pause MAC Ctrl-F Ov*/
+	XMT_BURST	= 1<<7,		/* Bit  7:	Tx Burst Event Cnt Ov */
+	XMT_LONG	= 1<<6,		/* Bit  6:	Tx Long Frame Cnt Ov */
+	XMT_UC_OK_OV	= 1<<5,		/* Bit  5:	Tx Unicast Cnt Ov */
+	XMT_MC_OK_OV	= 1<<4,		/* Bit  4:	Tx Multicast Cnt Ov */
+	XMT_BC_OK_OV	= 1<<3,		/* Bit  3:	Tx Broadcast Cnt Ov */
+	XMT_OK_LO_OV	= 1<<2,		/* Bit  2:	Octets Tx OK Low CntOv*/
+	XMT_OK_HI_OV	= 1<<1,		/* Bit  1:	Octets Tx OK Hi Cnt Ov*/
+	XMT_OK_OV	= 1<<0,		/* Bit  0:	Frames Tx Ok Ov */
+};
+
+#define XMT_DEF_MSK		(XMT_OK_LO_OV | XMT_OK_HI_OV)
+
+struct skge_rx_desc {
+	u32		control;
+	u32		next_offset;
+	u32		dma_lo;
+	u32		dma_hi;
+	u32		status;
+	u32		timestamp;
+	u16		csum2;
+	u16		csum1;
+	u16		csum2_start;
+	u16		csum1_start;
+};
+
+struct skge_tx_desc {
+	u32		control;
+	u32		next_offset;
+	u32		dma_lo;
+	u32		dma_hi;
+	u32		status;
+	u32		csum_offs;
+	u16		csum_write;
+	u16		csum_start;
+	u32		rsvd;
+};
+
+struct skge_element {
+	struct skge_element	*next;
+	void			*desc;
+	struct sk_buff  	*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapaddr);
+	DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct skge_ring {
+	struct skge_element *to_clean;
+	struct skge_element *to_use;
+	struct skge_element *start;
+	unsigned long	    count;
+};
+
+
+struct skge_hw {
+	void __iomem  	     *regs;
+	struct pci_dev	     *pdev;
+	spinlock_t	     hw_lock;
+	u32		     intr_mask;
+	struct net_device    *dev[2];
+
+	u8	     	     chip_id;
+	u8		     chip_rev;
+	u8		     copper;
+	u8		     ports;
+	u8		     phy_type;
+
+	u32	     	     ram_size;
+	u32	     	     ram_offset;
+	u16		     phy_addr;
+	spinlock_t	     phy_lock;
+	struct tasklet_struct phy_task;
+
+	char		     irq_name[0]; /* skge@pci:000:04:00.0 */
+};
+
+enum pause_control {
+	FLOW_MODE_NONE 		= 1, /* No Flow-Control */
+	FLOW_MODE_LOC_SEND	= 2, /* Local station sends PAUSE */
+	FLOW_MODE_SYMMETRIC	= 3, /* Both stations may send PAUSE */
+	FLOW_MODE_SYM_OR_REM	= 4, /* Both stations may send PAUSE or
+				      * just the remote station may send PAUSE
+				      */
+};
+
+enum pause_status {
+	FLOW_STAT_INDETERMINATED=0,	/* indeterminated */
+	FLOW_STAT_NONE,			/* No Flow Control */
+	FLOW_STAT_REM_SEND,		/* Remote Station sends PAUSE */
+	FLOW_STAT_LOC_SEND,		/* Local station sends PAUSE */
+	FLOW_STAT_SYMMETRIC,		/* Both station may send PAUSE */
+};
+
+
+struct skge_port {
+	struct skge_hw	     *hw;
+	struct net_device    *netdev;
+	struct napi_struct   napi;
+	int		     port;
+	u32		     msg_enable;
+
+	struct skge_ring     tx_ring;
+
+	struct skge_ring     rx_ring ____cacheline_aligned_in_smp;
+	unsigned int	     rx_buf_size;
+
+	struct timer_list    link_timer;
+	enum pause_control   flow_control;
+	enum pause_status    flow_status;
+	u8		     blink_on;
+	u8		     wol;
+	u8		     autoneg;	/* AUTONEG_ENABLE, AUTONEG_DISABLE */
+	u8		     duplex;	/* DUPLEX_HALF, DUPLEX_FULL */
+	u16		     speed;	/* SPEED_1000, SPEED_100, ... */
+	u32		     advertising;
+
+	void		     *mem;	/* PCI memory for rings */
+	dma_addr_t	     dma;
+	unsigned long	     mem_size;
+#ifdef CONFIG_SKGE_DEBUG
+	struct dentry	     *debugfs;
+#endif
+};
+
+
+/* Register accessor for memory mapped device */
+static inline u32 skge_read32(const struct skge_hw *hw, int reg)
+{
+	return readl(hw->regs + reg);
+}
+
+static inline u16 skge_read16(const struct skge_hw *hw, int reg)
+{
+	return readw(hw->regs + reg);
+}
+
+static inline u8 skge_read8(const struct skge_hw *hw, int reg)
+{
+	return readb(hw->regs + reg);
+}
+
+static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val)
+{
+	writel(val, hw->regs + reg);
+}
+
+static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val)
+{
+	writew(val, hw->regs + reg);
+}
+
+static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
+{
+	writeb(val, hw->regs + reg);
+}
+
+/* MAC Related Registers inside the device. */
+#define SK_REG(port,reg)	(((port)<<7)+(u16)(reg))
+#define SK_XMAC_REG(port, reg) \
+	((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
+
+static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
+{
+	u32 v;
+	v = skge_read16(hw, SK_XMAC_REG(port, reg));
+	v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
+	return v;
+}
+
+static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
+{
+	return skge_read16(hw, SK_XMAC_REG(port,reg));
+}
+
+static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
+{
+	skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
+	skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
+}
+
+static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
+{
+	skge_write16(hw, SK_XMAC_REG(port,r), v);
+}
+
+static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
+				   const u8 *hash)
+{
+	xm_write16(hw, port, reg,   (u16)hash[0] | ((u16)hash[1] << 8));
+	xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
+	xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
+	xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
+}
+
+static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
+				   const u8 *addr)
+{
+	xm_write16(hw, port, reg,   (u16)addr[0] | ((u16)addr[1] << 8));
+	xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
+	xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
+}
+
+#define SK_GMAC_REG(port,reg) \
+	(BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+
+static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
+{
+	return skge_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
+{
+	return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
+		| ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
+}
+
+static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
+{
+	skge_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
+				    const u8 *addr)
+{
+	gma_write16(hw, port, reg,  (u16) addr[0] | ((u16) addr[1] << 8));
+	gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+	gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
new file mode 100644
index 0000000..4b62aa1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -0,0 +1,5270 @@
+/*
+ * New driver for Marvell Yukon 2 chipset.
+ * Based on earlier sk98lin, and skge driver.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+
+#include <asm/irq.h>
+
+#include "sky2.h"
+
+#define DRV_NAME		"sky2"
+#define DRV_VERSION		"1.30"
+
+/*
+ * The Yukon II chipset takes 64 bit command blocks (called list elements)
+ * that are organized into three (receive, transmit, status) different rings
+ * similar to Tigon3.
+ */
+
+#define RX_LE_SIZE	    	1024
+#define RX_LE_BYTES		(RX_LE_SIZE*sizeof(struct sky2_rx_le))
+#define RX_MAX_PENDING		(RX_LE_SIZE/6 - 2)
+#define RX_DEF_PENDING		RX_MAX_PENDING
+
+/* This is the worst case number of transmit list elements for a single skb:
+   VLAN:GSO + CKSUM + Data + skb_frags * DMA */
+#define MAX_SKB_TX_LE	(2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
+#define TX_MIN_PENDING		(MAX_SKB_TX_LE+1)
+#define TX_MAX_PENDING		1024
+#define TX_DEF_PENDING		63
+
+#define TX_WATCHDOG		(5 * HZ)
+#define NAPI_WEIGHT		64
+#define PHY_RETRIES		1000
+
+#define SKY2_EEPROM_MAGIC	0x9955aabb
+
+#define RING_NEXT(x, s)	(((x)+1) & ((s)-1))
+
+static const u32 default_msg =
+    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+    | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
+    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1;		/* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int copybreak __read_mostly = 128;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+static int legacy_pme = 0;
+module_param(legacy_pme, int, 0);
+MODULE_PARM_DESC(legacy_pme, "Legacy power management");
+
+static const struct pci_device_id sky2_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) },	/* DGE-550T */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, sky2_id_table);
+
+/* Avoid conditionals by using array */
+static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
+static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
+
+static void sky2_set_multicast(struct net_device *dev);
+static irqreturn_t sky2_intr(int irq, void *dev_id);
+
+/* Access to PHY via serial interconnect */
+static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
+{
+	int i;
+
+	gma_write16(hw, port, GM_SMI_DATA, val);
+	gma_write16(hw, port, GM_SMI_CTRL,
+		    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
+
+	for (i = 0; i < PHY_RETRIES; i++) {
+		u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
+		if (ctrl == 0xffff)
+			goto io_error;
+
+		if (!(ctrl & GM_SMI_CT_BUSY))
+			return 0;
+
+		udelay(10);
+	}
+
+	dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
+	return -ETIMEDOUT;
+
+io_error:
+	dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
+	return -EIO;
+}
+
+static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
+{
+	int i;
+
+	gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
+		    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+	for (i = 0; i < PHY_RETRIES; i++) {
+		u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
+		if (ctrl == 0xffff)
+			goto io_error;
+
+		if (ctrl & GM_SMI_CT_RD_VAL) {
+			*val = gma_read16(hw, port, GM_SMI_DATA);
+			return 0;
+		}
+
+		udelay(10);
+	}
+
+	dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
+	return -ETIMEDOUT;
+io_error:
+	dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
+	return -EIO;
+}
+
+static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
+{
+	u16 v;
+	__gm_phy_read(hw, port, reg, &v);
+	return v;
+}
+
+
+static void sky2_power_on(struct sky2_hw *hw)
+{
+	/* switch power to VCC (WA for VAUX problem) */
+	sky2_write8(hw, B0_POWER_CTRL,
+		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+	/* disable Core Clock Division, */
+	sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
+
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+		/* enable bits are inverted */
+		sky2_write8(hw, B2_Y2_CLK_GATE,
+			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+	else
+		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+
+	if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
+		u32 reg;
+
+		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
+		reg = sky2_pci_read32(hw, PCI_DEV_REG4);
+		/* set all bits to 0 except bits 15..12 and 8 */
+		reg &= P_ASPM_CONTROL_MSK;
+		sky2_pci_write32(hw, PCI_DEV_REG4, reg);
+
+		reg = sky2_pci_read32(hw, PCI_DEV_REG5);
+		/* set all bits to 0 except bits 28 & 27 */
+		reg &= P_CTL_TIM_VMAIN_AV_MSK;
+		sky2_pci_write32(hw, PCI_DEV_REG5, reg);
+
+		sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+
+		sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
+		/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
+		reg = sky2_read32(hw, B2_GP_IO);
+		reg |= GLB_GPIO_STAT_RACE_DIS;
+		sky2_write32(hw, B2_GP_IO, reg);
+
+		sky2_read32(hw, B2_GP_IO);
+	}
+
+	/* Turn on "driver loaded" LED */
+	sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON);
+}
+
+static void sky2_power_aux(struct sky2_hw *hw)
+{
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+	else
+		/* enable bits are inverted */
+		sky2_write8(hw, B2_Y2_CLK_GATE,
+			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+
+	/* switch power to VAUX if supported and PME from D3cold */
+	if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
+	     pci_pme_capable(hw->pdev, PCI_D3cold))
+		sky2_write8(hw, B0_POWER_CTRL,
+			    (PC_VAUX_ENA | PC_VCC_ENA |
+			     PC_VAUX_ON | PC_VCC_OFF));
+
+	/* turn off "driver loaded LED" */
+	sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF);
+}
+
+static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
+{
+	u16 reg;
+
+	/* disable all GMAC IRQ's */
+	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
+	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
+	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+	reg = gma_read16(hw, port, GM_RX_CTRL);
+	reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
+	gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+/* flow control to advertise bits */
+static const u16 copper_fc_adv[] = {
+	[FC_NONE]	= 0,
+	[FC_TX]		= PHY_M_AN_ASP,
+	[FC_RX]		= PHY_M_AN_PC,
+	[FC_BOTH]	= PHY_M_AN_PC | PHY_M_AN_ASP,
+};
+
+/* flow control to advertise bits when using 1000BaseX */
+static const u16 fiber_fc_adv[] = {
+	[FC_NONE] = PHY_M_P_NO_PAUSE_X,
+	[FC_TX]   = PHY_M_P_ASYM_MD_X,
+	[FC_RX]	  = PHY_M_P_SYM_MD_X,
+	[FC_BOTH] = PHY_M_P_BOTH_MD_X,
+};
+
+/* flow control to GMA disable bits */
+static const u16 gm_fc_disable[] = {
+	[FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
+	[FC_TX]	  = GM_GPCR_FC_RX_DIS,
+	[FC_RX]	  = GM_GPCR_FC_TX_DIS,
+	[FC_BOTH] = 0,
+};
+
+
+static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
+
+	if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+	    !(hw->flags & SKY2_HW_NEWER_PHY)) {
+		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+			   PHY_M_EC_MAC_S_MSK);
+		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+		/* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
+		if (hw->chip_id == CHIP_ID_YUKON_EC)
+			/* set downshift counter to 3x and enable downshift */
+			ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
+		else
+			/* set master & slave downshift counter to 1x */
+			ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+	}
+
+	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+	if (sky2_is_copper(hw)) {
+		if (!(hw->flags & SKY2_HW_GIGABIT)) {
+			/* enable automatic crossover */
+			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+			if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+			    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+				u16 spec;
+
+				/* Enable Class A driver for FE+ A0 */
+				spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+				spec |= PHY_M_FESC_SEL_CL_A;
+				gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+			}
+		} else {
+			/* disable energy detect */
+			ctrl &= ~PHY_M_PC_EN_DET_MSK;
+
+			/* enable automatic crossover */
+			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
+
+			/* downshift on PHY 88E1112 and 88E1149 is changed */
+			if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+			     (hw->flags & SKY2_HW_NEWER_PHY)) {
+				/* set downshift counter to 3x and enable downshift */
+				ctrl &= ~PHY_M_PC_DSC_MSK;
+				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
+			}
+		}
+	} else {
+		/* workaround for deviation #4.88 (CRC errors) */
+		/* disable Automatic Crossover */
+
+		ctrl &= ~PHY_M_PC_MDIX_MSK;
+	}
+
+	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+	/* special setup for PHY 88E1112 Fiber */
+	if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
+		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+		/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+		ctrl &= ~PHY_M_MAC_MD_MSK;
+		ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+		if (hw->pmd_type  == 'P') {
+			/* select page 1 to access Fiber registers */
+			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
+
+			/* for SFP-module set SIGDET polarity to low */
+			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+			ctrl |= PHY_M_FIB_SIGD_POL;
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+		}
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+	}
+
+	ctrl = PHY_CT_RESET;
+	ct1000 = 0;
+	adv = PHY_AN_CSMA;
+	reg = 0;
+
+	if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
+		if (sky2_is_copper(hw)) {
+			if (sky2->advertising & ADVERTISED_1000baseT_Full)
+				ct1000 |= PHY_M_1000C_AFD;
+			if (sky2->advertising & ADVERTISED_1000baseT_Half)
+				ct1000 |= PHY_M_1000C_AHD;
+			if (sky2->advertising & ADVERTISED_100baseT_Full)
+				adv |= PHY_M_AN_100_FD;
+			if (sky2->advertising & ADVERTISED_100baseT_Half)
+				adv |= PHY_M_AN_100_HD;
+			if (sky2->advertising & ADVERTISED_10baseT_Full)
+				adv |= PHY_M_AN_10_FD;
+			if (sky2->advertising & ADVERTISED_10baseT_Half)
+				adv |= PHY_M_AN_10_HD;
+
+		} else {	/* special defines for FIBER (88E1040S only) */
+			if (sky2->advertising & ADVERTISED_1000baseT_Full)
+				adv |= PHY_M_AN_1000X_AFD;
+			if (sky2->advertising & ADVERTISED_1000baseT_Half)
+				adv |= PHY_M_AN_1000X_AHD;
+		}
+
+		/* Restart Auto-negotiation */
+		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+	} else {
+		/* forced speed/duplex settings */
+		ct1000 = PHY_M_1000C_MSE;
+
+		/* Disable auto update for duplex flow control and duplex */
+		reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
+
+		switch (sky2->speed) {
+		case SPEED_1000:
+			ctrl |= PHY_CT_SP1000;
+			reg |= GM_GPCR_SPEED_1000;
+			break;
+		case SPEED_100:
+			ctrl |= PHY_CT_SP100;
+			reg |= GM_GPCR_SPEED_100;
+			break;
+		}
+
+		if (sky2->duplex == DUPLEX_FULL) {
+			reg |= GM_GPCR_DUP_FULL;
+			ctrl |= PHY_CT_DUP_MD;
+		} else if (sky2->speed < SPEED_1000)
+			sky2->flow_mode = FC_NONE;
+	}
+
+	if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
+		if (sky2_is_copper(hw))
+			adv |= copper_fc_adv[sky2->flow_mode];
+		else
+			adv |= fiber_fc_adv[sky2->flow_mode];
+	} else {
+		reg |= GM_GPCR_AU_FCT_DIS;
+ 		reg |= gm_fc_disable[sky2->flow_mode];
+
+		/* Forward pause packets to GMAC? */
+		if (sky2->flow_mode & FC_RX)
+			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+		else
+			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+	}
+
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+
+	if (hw->flags & SKY2_HW_GIGABIT)
+		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+	/* Setup Phy LED's */
+	ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
+	ledover = 0;
+
+	switch (hw->chip_id) {
+	case CHIP_ID_YUKON_FE:
+		/* on 88E3082 these bits are at 11..9 (shifted left) */
+		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
+
+		ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
+
+		/* delete ACT LED control bits */
+		ctrl &= ~PHY_M_FELP_LED1_MSK;
+		/* change ACT LED control to blink mode */
+		ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
+		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+		break;
+
+	case CHIP_ID_YUKON_FE_P:
+		/* Enable Link Partner Next Page */
+		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+		ctrl |= PHY_M_PC_ENA_LIP_NP;
+
+		/* disable Energy Detect and enable scrambler */
+		ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+		/* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
+		ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
+			PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
+			PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
+
+		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+		break;
+
+	case CHIP_ID_YUKON_XL:
+		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+		/* select page 3 to access LED control register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+		/* set LED Function Control register */
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
+			      PHY_M_LEDC_INIT_CTRL(7) |	/* 10 Mbps */
+			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
+			      PHY_M_LEDC_STA0_CTRL(7)));	/* 1000 Mbps */
+
+		/* set Polarity Control register */
+		gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
+			     (PHY_M_POLC_LS1_P_MIX(4) |
+			      PHY_M_POLC_IS0_P_MIX(4) |
+			      PHY_M_POLC_LOS_CTRL(2) |
+			      PHY_M_POLC_INIT_CTRL(2) |
+			      PHY_M_POLC_STA1_CTRL(2) |
+			      PHY_M_POLC_STA0_CTRL(2)));
+
+		/* restore page register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+		break;
+
+	case CHIP_ID_YUKON_EC_U:
+	case CHIP_ID_YUKON_EX:
+	case CHIP_ID_YUKON_SUPR:
+		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+		/* select page 3 to access LED control register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+		/* set LED Function Control register */
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
+			      PHY_M_LEDC_INIT_CTRL(8) |	/* 10 Mbps */
+			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
+			      PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
+
+		/* set Blink Rate in LED Timer Control Register */
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK,
+			     ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
+		/* restore page register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+		break;
+
+	default:
+		/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
+		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
+
+		/* turn off the Rx LED (LED_RX) */
+		ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
+		/* apply fixes in PHY AFE */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
+
+		/* increase differential signal amplitude in 10BASE-T */
+		gm_phy_write(hw, port, 0x18, 0xaa99);
+		gm_phy_write(hw, port, 0x17, 0x2011);
+
+		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+			/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
+			gm_phy_write(hw, port, 0x18, 0xa204);
+			gm_phy_write(hw, port, 0x17, 0x2002);
+		}
+
+		/* set page register to 0 */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+	} else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+		   hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+		/* apply workaround for integrated resistors calibration */
+		gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
+		gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
+	} else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+		/* apply fixes in PHY AFE */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+		/* apply RDAC termination workaround */
+		gm_phy_write(hw, port, 24, 0x2800);
+		gm_phy_write(hw, port, 23, 0x2001);
+
+		/* set page register back to 0 */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+	} else if (hw->chip_id != CHIP_ID_YUKON_EX &&
+		   hw->chip_id < CHIP_ID_YUKON_SUPR) {
+		/* no effect on Yukon-XL */
+		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+
+		if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
+		    sky2->speed == SPEED_100) {
+			/* turn on 100 Mbps LED (LED_LINK100) */
+			ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+		}
+
+		if (ledover)
+			gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+
+	} else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+		   (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
+		int i;
+		/* This a phy register setup workaround copied from vendor driver. */
+		static const struct {
+			u16 reg, val;
+		} eee_afe[] = {
+			{ 0x156, 0x58ce },
+			{ 0x153, 0x99eb },
+			{ 0x141, 0x8064 },
+			/* { 0x155, 0x130b },*/
+			{ 0x000, 0x0000 },
+			{ 0x151, 0x8433 },
+			{ 0x14b, 0x8c44 },
+			{ 0x14c, 0x0f90 },
+			{ 0x14f, 0x39aa },
+			/* { 0x154, 0x2f39 },*/
+			{ 0x14d, 0xba33 },
+			{ 0x144, 0x0048 },
+			{ 0x152, 0x2010 },
+			/* { 0x158, 0x1223 },*/
+			{ 0x140, 0x4444 },
+			{ 0x154, 0x2f3b },
+			{ 0x158, 0xb203 },
+			{ 0x157, 0x2029 },
+		};
+
+		/* Start Workaround for OptimaEEE Rev.Z0 */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
+
+		gm_phy_write(hw, port,  1, 0x4099);
+		gm_phy_write(hw, port,  3, 0x1120);
+		gm_phy_write(hw, port, 11, 0x113c);
+		gm_phy_write(hw, port, 14, 0x8100);
+		gm_phy_write(hw, port, 15, 0x112a);
+		gm_phy_write(hw, port, 17, 0x1008);
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
+		gm_phy_write(hw, port,  1, 0x20b0);
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+		for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
+			/* apply AFE settings */
+			gm_phy_write(hw, port, 17, eee_afe[i].val);
+			gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
+		}
+
+		/* End Workaround for OptimaEEE */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+
+		/* Enable 10Base-Te (EEE) */
+		if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
+			reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+			gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
+				     reg | PHY_M_10B_TE_ENABLE);
+		}
+	}
+
+	/* Enable phy interrupt on auto-negotiation complete (or link up) */
+	if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
+	else
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+}
+
+static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
+static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
+
+static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
+{
+	u32 reg1;
+
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+	reg1 &= ~phy_power[port];
+
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+		reg1 |= coma_mode[port];
+
+	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+	sky2_pci_read32(hw, PCI_DEV_REG1);
+
+	if (hw->chip_id == CHIP_ID_YUKON_FE)
+		gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
+	else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
+		sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+}
+
+static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
+{
+	u32 reg1;
+	u16 ctrl;
+
+	/* release GPHY Control reset */
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+	/* release GMAC reset */
+	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	if (hw->flags & SKY2_HW_NEWER_PHY) {
+		/* select page 2 to access MAC control register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+
+		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+		/* allow GMII Power Down */
+		ctrl &= ~PHY_M_MAC_GMIF_PUP;
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+		/* set page register back to 0 */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+	}
+
+	/* setup General Purpose Control Register */
+	gma_write16(hw, port, GM_GP_CTRL,
+		    GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
+		    GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
+		    GM_GPCR_AU_SPD_DIS);
+
+	if (hw->chip_id != CHIP_ID_YUKON_EC) {
+		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+			/* select page 2 to access MAC control register */
+			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+
+			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+			/* enable Power Down */
+			ctrl |= PHY_M_PC_POW_D_ENA;
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+			/* set page register back to 0 */
+			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+		}
+
+		/* set IEEE compatible Power Down Mode (dev. #4.99) */
+		gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
+	}
+
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+	reg1 |= phy_power[port];		/* set PHY to PowerDown/COMA Mode */
+	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+}
+
+/* configure IPG according to used link speed */
+static void sky2_set_ipg(struct sky2_port *sky2)
+{
+	u16 reg;
+
+	reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
+	reg &= ~GM_SMOD_IPG_MSK;
+	if (sky2->speed > SPEED_100)
+		reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+	else
+		reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+	gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
+}
+
+/* Enable Rx/Tx */
+static void sky2_enable_rx_tx(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 reg;
+
+	reg = gma_read16(hw, port, GM_GP_CTRL);
+	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+}
+
+/* Force a renegotiation */
+static void sky2_phy_reinit(struct sky2_port *sky2)
+{
+	spin_lock_bh(&sky2->phy_lock);
+	sky2_phy_init(sky2->hw, sky2->port);
+	sky2_enable_rx_tx(sky2);
+	spin_unlock_bh(&sky2->phy_lock);
+}
+
+/* Put device in state to listen for Wake On Lan */
+static void sky2_wol_init(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	enum flow_control save_mode;
+	u16 ctrl;
+
+	/* Bring hardware out of reset */
+	sky2_write16(hw, B0_CTST, CS_RST_CLR);
+	sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	/* Force to 10/100
+	 * sky2_reset will re-enable on resume
+	 */
+	save_mode = sky2->flow_mode;
+	ctrl = sky2->advertising;
+
+	sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
+	sky2->flow_mode = FC_NONE;
+
+	spin_lock_bh(&sky2->phy_lock);
+	sky2_phy_power_up(hw, port);
+	sky2_phy_init(hw, port);
+	spin_unlock_bh(&sky2->phy_lock);
+
+	sky2->flow_mode = save_mode;
+	sky2->advertising = ctrl;
+
+	/* Set GMAC to no flow control and auto update for speed/duplex */
+	gma_write16(hw, port, GM_GP_CTRL,
+		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+	/* Set WOL address */
+	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+		    sky2->netdev->dev_addr, ETH_ALEN);
+
+	/* Turn on appropriate WOL control bits */
+	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+	ctrl = 0;
+	if (sky2->wol & WAKE_PHY)
+		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+	if (sky2->wol & WAKE_MAGIC)
+		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+	else
+		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
+
+	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+	/* Disable PiG firmware */
+	sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
+
+	/* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
+	if (legacy_pme) {
+		u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+		reg1 |= PCI_Y2_PME_LEGACY;
+		sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+	}
+
+	/* block receiver */
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+	sky2_read32(hw, B0_CTST);
+}
+
+static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
+{
+	struct net_device *dev = hw->dev[port];
+
+	if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
+	      hw->chip_rev != CHIP_REV_YU_EX_A0) ||
+	     hw->chip_id >= CHIP_ID_YUKON_FE_P) {
+		/* Yukon-Extreme B0 and further Extreme devices */
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+	} else if (dev->mtu > ETH_DATA_LEN) {
+		/* set Tx GMAC FIFO Almost Empty Threshold */
+		sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+			     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+	} else
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+}
+
+static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+	u16 reg;
+	u32 rx_reg;
+	int i;
+	const u8 *addr = hw->dev[port]->dev_addr;
+
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	if (hw->chip_id == CHIP_ID_YUKON_XL &&
+	    hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+	    port == 1) {
+		/* WA DEV_472 -- looks like crossed wires on port 2 */
+		/* clear GMAC 1 Control reset */
+		sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
+		do {
+			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
+			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
+		} while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
+			 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
+			 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
+	}
+
+	sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+	/* Enable Transmit FIFO Underrun */
+	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
+	spin_lock_bh(&sky2->phy_lock);
+	sky2_phy_power_up(hw, port);
+	sky2_phy_init(hw, port);
+	spin_unlock_bh(&sky2->phy_lock);
+
+	/* MIB clear */
+	reg = gma_read16(hw, port, GM_PHY_ADDR);
+	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+	for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
+		gma_read16(hw, port, i);
+	gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+	/* transmit control */
+	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+	/* receive control reg: unicast + multicast + no FCS  */
+	gma_write16(hw, port, GM_RX_CTRL,
+		    GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+	/* transmit flow control */
+	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+	/* transmit parameter */
+	gma_write16(hw, port, GM_TX_PARAM,
+		    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+		    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+		    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
+		    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
+
+	/* serial mode register */
+	reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
+		GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
+
+	if (hw->dev[port]->mtu > ETH_DATA_LEN)
+		reg |= GM_SMOD_JUMBO_ENA;
+
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+	    hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+		reg |= GM_NEW_FLOW_CTRL;
+
+	gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+	/* virtual address for data */
+	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+	/* physical address: used for pause frames */
+	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+
+	/* ignore counter overflows */
+	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+	/* Configure Rx MAC FIFO */
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+	rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+	if (hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_FE_P)
+		rx_reg |= GMF_RX_OVER_ON;
+
+	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
+
+	if (hw->chip_id == CHIP_ID_YUKON_XL) {
+		/* Hardware errata - clear flush mask */
+		sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
+	} else {
+		/* Flush Rx MAC FIFO on any flow control or error */
+		sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
+	}
+
+	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
+	reg = RX_GMF_FL_THR_DEF + 1;
+	/* Another magic mystery workaround from sk98lin */
+	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+	    hw->chip_rev == CHIP_REV_YU_FE2_A0)
+		reg = 0x178;
+	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
+
+	/* Configure Tx MAC FIFO */
+	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+
+	/* On chips without ram buffer, pause is controlled by MAC level */
+	if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
+		/* Pause threshold is scaled by 8 in bytes */
+		if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+		    hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			reg = 1568 / 8;
+		else
+			reg = 1024 / 8;
+		sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
+		sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
+
+		sky2_set_tx_stfwd(hw, port);
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+	    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+		/* disable dynamic watermark */
+		reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
+		reg &= ~TX_DYN_WM_ENA;
+		sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
+	}
+}
+
+/* Assign Ram Buffer allocation to queue */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+{
+	u32 end;
+
+	/* convert from K bytes to qwords used for hw register */
+	start *= 1024/8;
+	space *= 1024/8;
+	end = start + space - 1;
+
+	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+	sky2_write32(hw, RB_ADDR(q, RB_START), start);
+	sky2_write32(hw, RB_ADDR(q, RB_END), end);
+	sky2_write32(hw, RB_ADDR(q, RB_WP), start);
+	sky2_write32(hw, RB_ADDR(q, RB_RP), start);
+
+	if (q == Q_R1 || q == Q_R2) {
+		u32 tp = space - space/4;
+
+		/* On receive queue's set the thresholds
+		 * give receiver priority when > 3/4 full
+		 * send pause when down to 2K
+		 */
+		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+
+		tp = space - 8192/8;
+		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+	} else {
+		/* Enable store & forward on Tx queue's because
+		 * Tx FIFO is only 1K on Yukon
+		 */
+		sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+	}
+
+	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+	sky2_read8(hw, RB_ADDR(q, RB_CTRL));
+}
+
+/* Setup Bus Memory Interface */
+static void sky2_qset(struct sky2_hw *hw, u16 q)
+{
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
+	sky2_write32(hw, Q_ADDR(q, Q_WM),  BMU_WM_DEFAULT);
+}
+
+/* Setup prefetch unit registers. This is the interface between
+ * hardware and driver list elements
+ */
+static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
+			       dma_addr_t addr, u32 last)
+{
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
+	sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
+
+	sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
+}
+
+static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
+{
+	struct sky2_tx_le *le = sky2->tx_le + *slot;
+
+	*slot = RING_NEXT(*slot, sky2->tx_ring_size);
+	le->ctrl = 0;
+	return le;
+}
+
+static void tx_init(struct sky2_port *sky2)
+{
+	struct sky2_tx_le *le;
+
+	sky2->tx_prod = sky2->tx_cons = 0;
+	sky2->tx_tcpsum = 0;
+	sky2->tx_last_mss = 0;
+	netdev_reset_queue(sky2->netdev);
+
+	le = get_tx_le(sky2, &sky2->tx_prod);
+	le->addr = 0;
+	le->opcode = OP_ADDR64 | HW_OWNER;
+	sky2->tx_last_upper = 0;
+}
+
+/* Update chip's next pointer */
+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
+{
+	/* Make sure write' to descriptors are complete before we tell hardware */
+	wmb();
+	sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
+
+	/* Synchronize I/O on since next processor may write to tail */
+	mmiowb();
+}
+
+
+static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
+{
+	struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
+	sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
+	le->ctrl = 0;
+	return le;
+}
+
+static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
+{
+	unsigned size;
+
+	/* Space needed for frame data + headers rounded up */
+	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+	/* Stopping point for hardware truncation */
+	return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
+{
+	struct rx_ring_info *re;
+	unsigned size;
+
+	/* Space needed for frame data + headers rounded up */
+	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+	sky2->rx_nfrags = size >> PAGE_SHIFT;
+	BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+	/* Compute residue after pages */
+	size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+	/* Optimize to handle small packets and headers */
+	if (size < copybreak)
+		size = copybreak;
+	if (size < ETH_HLEN)
+		size = ETH_HLEN;
+
+	return size;
+}
+
+/* Build description to hardware for one receive segment */
+static void sky2_rx_add(struct sky2_port *sky2, u8 op,
+			dma_addr_t map, unsigned len)
+{
+	struct sky2_rx_le *le;
+
+	if (sizeof(dma_addr_t) > sizeof(u32)) {
+		le = sky2_next_rx(sky2);
+		le->addr = cpu_to_le32(upper_32_bits(map));
+		le->opcode = OP_ADDR64 | HW_OWNER;
+	}
+
+	le = sky2_next_rx(sky2);
+	le->addr = cpu_to_le32(lower_32_bits(map));
+	le->length = cpu_to_le16(len);
+	le->opcode = op | HW_OWNER;
+}
+
+/* Build description to hardware for one possibly fragmented skb */
+static void sky2_rx_submit(struct sky2_port *sky2,
+			   const struct rx_ring_info *re)
+{
+	int i;
+
+	sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
+
+	for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
+		sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
+}
+
+
+static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
+			    unsigned size)
+{
+	struct sk_buff *skb = re->skb;
+	int i;
+
+	re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
+	if (pci_dma_mapping_error(pdev, re->data_addr))
+		goto mapping_error;
+
+	dma_unmap_len_set(re, data_size, size);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
+						    skb_frag_size(frag),
+						    DMA_FROM_DEVICE);
+
+		if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
+			goto map_page_error;
+	}
+	return 0;
+
+map_page_error:
+	while (--i >= 0) {
+		pci_unmap_page(pdev, re->frag_addr[i],
+			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
+			       PCI_DMA_FROMDEVICE);
+	}
+
+	pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
+			 PCI_DMA_FROMDEVICE);
+
+mapping_error:
+	if (net_ratelimit())
+		dev_warn(&pdev->dev, "%s: rx mapping error\n",
+			 skb->dev->name);
+	return -EIO;
+}
+
+static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
+{
+	struct sk_buff *skb = re->skb;
+	int i;
+
+	pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
+			 PCI_DMA_FROMDEVICE);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+		pci_unmap_page(pdev, re->frag_addr[i],
+			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
+			       PCI_DMA_FROMDEVICE);
+}
+
+/* Tell chip where to start receive checksum.
+ * Actually has two checksums, but set both same to avoid possible byte
+ * order problems.
+ */
+static void rx_set_checksum(struct sky2_port *sky2)
+{
+	struct sky2_rx_le *le = sky2_next_rx(sky2);
+
+	le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
+	le->ctrl = 0;
+	le->opcode = OP_TCPSTART | HW_OWNER;
+
+	sky2_write32(sky2->hw,
+		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+		     (sky2->netdev->features & NETIF_F_RXCSUM)
+		     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+}
+
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	int i, nkeys = 4;
+
+	/* Supports IPv6 and other modes */
+	if (hw->flags & SKY2_HW_NEW_LE) {
+		nkeys = 10;
+		sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+	}
+
+	/* Program RSS initial values */
+	if (features & NETIF_F_RXHASH) {
+		u32 rss_key[10];
+
+		netdev_rss_key_fill(rss_key, sizeof(rss_key));
+		for (i = 0; i < nkeys; i++)
+			sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+				     rss_key[i]);
+
+		/* Need to turn on (undocumented) flag to make hashing work  */
+		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+			     RX_STFW_ENA);
+
+		sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+			     BMU_ENA_RX_RSS_HASH);
+	} else
+		sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+			     BMU_DIS_RX_RSS_HASH);
+}
+
+/*
+ * The RX Stop command will not work for Yukon-2 if the BMU does not
+ * reach the end of packet and since we can't make sure that we have
+ * incoming data, we must reset the BMU while it is not doing a DMA
+ * transfer. Since it is possible that the RX path is still active,
+ * the RX RAM buffer will be stopped first, so any possible incoming
+ * data will not trigger a DMA. After the RAM buffer is stopped, the
+ * BMU is polled until any DMA in progress is ended and only then it
+ * will be reset.
+ */
+static void sky2_rx_stop(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned rxq = rxqaddr[sky2->port];
+	int i;
+
+	/* disable the RAM Buffer receive queue */
+	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
+
+	for (i = 0; i < 0xffff; i++)
+		if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
+		    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
+			goto stopped;
+
+	netdev_warn(sky2->netdev, "receiver stop failed\n");
+stopped:
+	sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
+
+	/* reset the Rx prefetch unit */
+	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+	mmiowb();
+}
+
+/* Clean out receive buffer area, assumes receiver hardware stopped */
+static void sky2_rx_clean(struct sky2_port *sky2)
+{
+	unsigned i;
+
+	if (sky2->rx_le)
+		memset(sky2->rx_le, 0, RX_LE_BYTES);
+
+	for (i = 0; i < sky2->rx_pending; i++) {
+		struct rx_ring_info *re = sky2->rx_ring + i;
+
+		if (re->skb) {
+			sky2_rx_unmap_skb(sky2->hw->pdev, re);
+			kfree_skb(re->skb);
+			re->skb = NULL;
+		}
+	}
+}
+
+/* Basic MII support */
+static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	int err = -EOPNOTSUPP;
+
+	if (!netif_running(dev))
+		return -ENODEV;	/* Phy still in reset */
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = PHY_ADDR_MARV;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u16 val = 0;
+
+		spin_lock_bh(&sky2->phy_lock);
+		err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
+		spin_unlock_bh(&sky2->phy_lock);
+
+		data->val_out = val;
+		break;
+	}
+
+	case SIOCSMIIREG:
+		spin_lock_bh(&sky2->phy_lock);
+		err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
+				   data->val_in);
+		spin_unlock_bh(&sky2->phy_lock);
+		break;
+	}
+	return err;
+}
+
+#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
+
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	u16 port = sky2->port;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_ON);
+	else
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_OFF);
+
+	if (features & NETIF_F_HW_VLAN_CTAG_TX) {
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_ON);
+
+		dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+	} else {
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_OFF);
+
+		/* Can't do transmit offload of vlan without hw vlan */
+		dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
+	}
+}
+
+/* Amount of required worst case padding in rx buffer */
+static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
+{
+	return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
+}
+
+/*
+ * Allocate an skb for receiving. If the MTU is large enough
+ * make the skb non-linear with a fragment list of pages.
+ */
+static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
+{
+	struct sk_buff *skb;
+	int i;
+
+	skb = __netdev_alloc_skb(sky2->netdev,
+				 sky2->rx_data_size + sky2_rx_pad(sky2->hw),
+				 gfp);
+	if (!skb)
+		goto nomem;
+
+	if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
+		unsigned char *start;
+		/*
+		 * Workaround for a bug in FIFO that cause hang
+		 * if the FIFO if the receive buffer is not 64 byte aligned.
+		 * The buffer returned from netdev_alloc_skb is
+		 * aligned except if slab debugging is enabled.
+		 */
+		start = PTR_ALIGN(skb->data, 8);
+		skb_reserve(skb, start - skb->data);
+	} else
+		skb_reserve(skb, NET_IP_ALIGN);
+
+	for (i = 0; i < sky2->rx_nfrags; i++) {
+		struct page *page = alloc_page(gfp);
+
+		if (!page)
+			goto free_partial;
+		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+	}
+
+	return skb;
+free_partial:
+	kfree_skb(skb);
+nomem:
+	return NULL;
+}
+
+static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
+{
+	sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
+}
+
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned i;
+
+	sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+	/* Fill Rx ring */
+	for (i = 0; i < sky2->rx_pending; i++) {
+		struct rx_ring_info *re = sky2->rx_ring + i;
+
+		re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
+		if (!re->skb)
+			return -ENOMEM;
+
+		if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+			dev_kfree_skb(re->skb);
+			re->skb = NULL;
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Setup receiver buffer pool.
+ * Normal case this ends up creating one list element for skb
+ * in the receive ring. Worst case if using large MTU and each
+ * allocation falls on a different 64 bit region, that results
+ * in 6 list elements per ring entry.
+ * One element is used for checksum enable/disable, and one
+ * extra to avoid wrap.
+ */
+static void sky2_rx_start(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	struct rx_ring_info *re;
+	unsigned rxq = rxqaddr[sky2->port];
+	unsigned i, thresh;
+
+	sky2->rx_put = sky2->rx_next = 0;
+	sky2_qset(hw, rxq);
+
+	/* On PCI express lowering the watermark gives better performance */
+	if (pci_is_pcie(hw->pdev))
+		sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
+
+	/* These chips have no ram buffer?
+	 * MAC Rx RAM Read is controlled by hardware */
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+	    hw->chip_rev > CHIP_REV_YU_EC_U_A0)
+		sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
+
+	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
+
+	if (!(hw->flags & SKY2_HW_NEW_LE))
+		rx_set_checksum(sky2);
+
+	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+		rx_set_rss(sky2->netdev, sky2->netdev->features);
+
+	/* submit Rx ring */
+	for (i = 0; i < sky2->rx_pending; i++) {
+		re = sky2->rx_ring + i;
+		sky2_rx_submit(sky2, re);
+	}
+
+	/*
+	 * The receiver hangs if it receives frames larger than the
+	 * packet buffer. As a workaround, truncate oversize frames, but
+	 * the register is limited to 9 bits, so if you do frames > 2052
+	 * you better get the MTU right!
+	 */
+	thresh = sky2_get_rx_threshold(sky2);
+	if (thresh > 0x1ff)
+		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
+	else {
+		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
+		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
+	}
+
+	/* Tell chip about available buffers */
+	sky2_rx_update(sky2, rxq);
+
+	if (hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
+		/*
+		 * Disable flushing of non ASF packets;
+		 * must be done after initializing the BMUs;
+		 * drivers without ASF support should do this too, otherwise
+		 * it may happen that they cannot run on ASF devices;
+		 * remember that the MAC FIFO isn't reset during initialization.
+		 */
+		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
+	}
+
+	if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
+		/* Enable RX Home Address & Routing Header checksum fix */
+		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
+			     RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
+
+		/* Enable TX Home Address & Routing Header checksum fix */
+		sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
+			     TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
+	}
+}
+
+static int sky2_alloc_buffers(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+
+	/* must be power of 2 */
+	sky2->tx_le = pci_alloc_consistent(hw->pdev,
+					   sky2->tx_ring_size *
+					   sizeof(struct sky2_tx_le),
+					   &sky2->tx_le_map);
+	if (!sky2->tx_le)
+		goto nomem;
+
+	sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
+				GFP_KERNEL);
+	if (!sky2->tx_ring)
+		goto nomem;
+
+	sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES,
+					    &sky2->rx_le_map);
+	if (!sky2->rx_le)
+		goto nomem;
+
+	sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
+				GFP_KERNEL);
+	if (!sky2->rx_ring)
+		goto nomem;
+
+	return sky2_alloc_rx_skbs(sky2);
+nomem:
+	return -ENOMEM;
+}
+
+static void sky2_free_buffers(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+
+	sky2_rx_clean(sky2);
+
+	if (sky2->rx_le) {
+		pci_free_consistent(hw->pdev, RX_LE_BYTES,
+				    sky2->rx_le, sky2->rx_le_map);
+		sky2->rx_le = NULL;
+	}
+	if (sky2->tx_le) {
+		pci_free_consistent(hw->pdev,
+				    sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+				    sky2->tx_le, sky2->tx_le_map);
+		sky2->tx_le = NULL;
+	}
+	kfree(sky2->tx_ring);
+	kfree(sky2->rx_ring);
+
+	sky2->tx_ring = NULL;
+	sky2->rx_ring = NULL;
+}
+
+static void sky2_hw_up(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u32 ramsize;
+	int cap;
+	struct net_device *otherdev = hw->dev[sky2->port^1];
+
+	tx_init(sky2);
+
+	/*
+ 	 * On dual port PCI-X card, there is an problem where status
+	 * can be received out of order due to split transactions
+	 */
+	if (otherdev && netif_running(otherdev) &&
+ 	    (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ 		u16 cmd;
+
+		cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
+ 		cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ 		sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+	}
+
+	sky2_mac_init(hw, port);
+
+	/* Register is number of 4K blocks on internal RAM buffer. */
+	ramsize = sky2_read8(hw, B2_E_0) * 4;
+	if (ramsize > 0) {
+		u32 rxspace;
+
+		netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
+		if (ramsize < 16)
+			rxspace = ramsize / 2;
+		else
+			rxspace = 8 + (2*(ramsize - 16))/3;
+
+		sky2_ramset(hw, rxqaddr[port], 0, rxspace);
+		sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+
+		/* Make sure SyncQ is disabled */
+		sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
+			    RB_RST_SET);
+	}
+
+	sky2_qset(hw, txqaddr[port]);
+
+	/* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
+	if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
+		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
+
+	/* Set almost empty threshold */
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+	    hw->chip_rev == CHIP_REV_YU_EC_U_A0)
+		sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
+
+	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
+			   sky2->tx_ring_size - 1);
+
+	sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
+	netdev_update_features(sky2->netdev);
+
+	sky2_rx_start(sky2);
+}
+
+/* Setup device IRQ and enable napi to process */
+static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
+{
+	struct pci_dev *pdev = hw->pdev;
+	int err;
+
+	err = request_irq(pdev->irq, sky2_intr,
+			  (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
+			  name, hw);
+	if (err)
+		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+	else {
+		hw->flags |= SKY2_HW_IRQ_SETUP;
+
+		napi_enable(&hw->napi);
+		sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+		sky2_read32(hw, B0_IMSK);
+	}
+
+	return err;
+}
+
+
+/* Bring up network interface. */
+static int sky2_open(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u32 imask;
+	int err;
+
+	netif_carrier_off(dev);
+
+	err = sky2_alloc_buffers(sky2);
+	if (err)
+		goto err_out;
+
+	/* With single port, IRQ is setup when device is brought up */
+	if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
+		goto err_out;
+
+	sky2_hw_up(sky2);
+
+	/* Enable interrupts from phy/mac for port */
+	imask = sky2_read32(hw, B0_IMSK);
+
+	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+	    hw->chip_id == CHIP_ID_YUKON_PRM ||
+	    hw->chip_id == CHIP_ID_YUKON_OP_2)
+		imask |= Y2_IS_PHY_QLNK;	/* enable PHY Quick Link */
+
+	imask |= portirq_msk[port];
+	sky2_write32(hw, B0_IMSK, imask);
+	sky2_read32(hw, B0_IMSK);
+
+	netif_info(sky2, ifup, dev, "enabling interface\n");
+
+	return 0;
+
+err_out:
+	sky2_free_buffers(sky2);
+	return err;
+}
+
+/* Modular subtraction in ring */
+static inline int tx_inuse(const struct sky2_port *sky2)
+{
+	return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
+}
+
+/* Number of list elements available for next tx */
+static inline int tx_avail(const struct sky2_port *sky2)
+{
+	return sky2->tx_pending - tx_inuse(sky2);
+}
+
+/* Estimate of number of transmit list elements required */
+static unsigned tx_le_req(const struct sk_buff *skb)
+{
+	unsigned count;
+
+	count = (skb_shinfo(skb)->nr_frags + 1)
+		* (sizeof(dma_addr_t) / sizeof(u32));
+
+	if (skb_is_gso(skb))
+		++count;
+	else if (sizeof(dma_addr_t) == sizeof(u32))
+		++count;	/* possible vlan */
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		++count;
+
+	return count;
+}
+
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
+{
+	if (re->flags & TX_MAP_SINGLE)
+		pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
+				 dma_unmap_len(re, maplen),
+				 PCI_DMA_TODEVICE);
+	else if (re->flags & TX_MAP_PAGE)
+		pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
+			       dma_unmap_len(re, maplen),
+			       PCI_DMA_TODEVICE);
+	re->flags = 0;
+}
+
+/*
+ * Put one packet in ring for transmit.
+ * A single packet can generate multiple list elements, and
+ * the number of ring elements will probably be less than the number
+ * of list elements used.
+ */
+static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
+				   struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	struct sky2_tx_le *le = NULL;
+	struct tx_ring_info *re;
+	unsigned i, len;
+	dma_addr_t mapping;
+	u32 upper;
+	u16 slot;
+	u16 mss;
+	u8 ctrl;
+
+ 	if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
+  		return NETDEV_TX_BUSY;
+
+	len = skb_headlen(skb);
+	mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+	if (pci_dma_mapping_error(hw->pdev, mapping))
+		goto mapping_error;
+
+	slot = sky2->tx_prod;
+	netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
+		     "tx queued, slot %u, len %d\n", slot, skb->len);
+
+	/* Send high bits if needed */
+	upper = upper_32_bits(mapping);
+	if (upper != sky2->tx_last_upper) {
+		le = get_tx_le(sky2, &slot);
+		le->addr = cpu_to_le32(upper);
+		sky2->tx_last_upper = upper;
+		le->opcode = OP_ADDR64 | HW_OWNER;
+	}
+
+	/* Check for TCP Segmentation Offload */
+	mss = skb_shinfo(skb)->gso_size;
+	if (mss != 0) {
+
+		if (!(hw->flags & SKY2_HW_NEW_LE))
+			mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+
+  		if (mss != sky2->tx_last_mss) {
+			le = get_tx_le(sky2, &slot);
+  			le->addr = cpu_to_le32(mss);
+
+			if (hw->flags & SKY2_HW_NEW_LE)
+				le->opcode = OP_MSS | HW_OWNER;
+			else
+				le->opcode = OP_LRGLEN | HW_OWNER;
+			sky2->tx_last_mss = mss;
+		}
+	}
+
+	ctrl = 0;
+
+	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
+	if (skb_vlan_tag_present(skb)) {
+		if (!le) {
+			le = get_tx_le(sky2, &slot);
+			le->addr = 0;
+			le->opcode = OP_VLAN|HW_OWNER;
+		} else
+			le->opcode |= OP_VLAN;
+		le->length = cpu_to_be16(skb_vlan_tag_get(skb));
+		ctrl |= INS_VLAN;
+	}
+
+	/* Handle TCP checksum offload */
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		/* On Yukon EX (some versions) encoding change. */
+ 		if (hw->flags & SKY2_HW_AUTO_TX_SUM)
+ 			ctrl |= CALSUM;	/* auto checksum */
+		else {
+			const unsigned offset = skb_transport_offset(skb);
+			u32 tcpsum;
+
+			tcpsum = offset << 16;			/* sum start */
+			tcpsum |= offset + skb->csum_offset;	/* sum write */
+
+			ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
+			if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+				ctrl |= UDPTCP;
+
+			if (tcpsum != sky2->tx_tcpsum) {
+				sky2->tx_tcpsum = tcpsum;
+
+				le = get_tx_le(sky2, &slot);
+				le->addr = cpu_to_le32(tcpsum);
+				le->length = 0;	/* initial checksum value */
+				le->ctrl = 1;	/* one packet */
+				le->opcode = OP_TCPLISW | HW_OWNER;
+			}
+		}
+	}
+
+	re = sky2->tx_ring + slot;
+	re->flags = TX_MAP_SINGLE;
+	dma_unmap_addr_set(re, mapaddr, mapping);
+	dma_unmap_len_set(re, maplen, len);
+
+	le = get_tx_le(sky2, &slot);
+	le->addr = cpu_to_le32(lower_32_bits(mapping));
+	le->length = cpu_to_le16(len);
+	le->ctrl = ctrl;
+	le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
+
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+
+		if (dma_mapping_error(&hw->pdev->dev, mapping))
+			goto mapping_unwind;
+
+		upper = upper_32_bits(mapping);
+		if (upper != sky2->tx_last_upper) {
+			le = get_tx_le(sky2, &slot);
+			le->addr = cpu_to_le32(upper);
+			sky2->tx_last_upper = upper;
+			le->opcode = OP_ADDR64 | HW_OWNER;
+		}
+
+		re = sky2->tx_ring + slot;
+		re->flags = TX_MAP_PAGE;
+		dma_unmap_addr_set(re, mapaddr, mapping);
+		dma_unmap_len_set(re, maplen, skb_frag_size(frag));
+
+		le = get_tx_le(sky2, &slot);
+		le->addr = cpu_to_le32(lower_32_bits(mapping));
+		le->length = cpu_to_le16(skb_frag_size(frag));
+		le->ctrl = ctrl;
+		le->opcode = OP_BUFFER | HW_OWNER;
+	}
+
+	re->skb = skb;
+	le->ctrl |= EOP;
+
+	sky2->tx_prod = slot;
+
+	if (tx_avail(sky2) <= MAX_SKB_TX_LE)
+		netif_stop_queue(dev);
+
+	netdev_sent_queue(dev, skb->len);
+	sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
+
+	return NETDEV_TX_OK;
+
+mapping_unwind:
+	for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
+		re = sky2->tx_ring + i;
+
+		sky2_tx_unmap(hw->pdev, re);
+	}
+
+mapping_error:
+	if (net_ratelimit())
+		dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/*
+ * Free ring elements from starting at tx_cons until "done"
+ *
+ * NB:
+ *  1. The hardware will tell us about partial completion of multi-part
+ *     buffers so make sure not to free skb to early.
+ *  2. This may run in parallel start_xmit because the it only
+ *     looks at the tail of the queue of FIFO (tx_cons), not
+ *     the head (tx_prod)
+ */
+static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
+{
+	struct net_device *dev = sky2->netdev;
+	u16 idx;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
+
+	BUG_ON(done >= sky2->tx_ring_size);
+
+	for (idx = sky2->tx_cons; idx != done;
+	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
+		struct tx_ring_info *re = sky2->tx_ring + idx;
+		struct sk_buff *skb = re->skb;
+
+		sky2_tx_unmap(sky2->hw->pdev, re);
+
+		if (skb) {
+			netif_printk(sky2, tx_done, KERN_DEBUG, dev,
+				     "tx done %u\n", idx);
+
+			pkts_compl++;
+			bytes_compl += skb->len;
+
+			re->skb = NULL;
+			dev_kfree_skb_any(skb);
+
+			sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
+		}
+	}
+
+	sky2->tx_cons = idx;
+	smp_mb();
+
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+	u64_stats_update_begin(&sky2->tx_stats.syncp);
+	sky2->tx_stats.packets += pkts_compl;
+	sky2->tx_stats.bytes += bytes_compl;
+	u64_stats_update_end(&sky2->tx_stats.syncp);
+}
+
+static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
+{
+	/* Disable Force Sync bit and Enable Alloc bit */
+	sky2_write8(hw, SK_REG(port, TXA_CTRL),
+		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
+	sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+	sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+	/* Reset the PCI FIFO of the async Tx queue */
+	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
+		     BMU_RST_SET | BMU_FIFO_RST);
+
+	/* Reset the Tx prefetch units */
+	sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
+		     PREF_UNIT_RST_SET);
+
+	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+
+	sky2_read32(hw, B0_CTST);
+}
+
+static void sky2_hw_down(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 ctrl;
+
+	/* Force flow control off */
+	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
+	/* Stop transmitter */
+	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
+	sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
+
+	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+		     RB_RST_SET | RB_DIS_OP_MD);
+
+	ctrl = gma_read16(hw, port, GM_GP_CTRL);
+	ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
+	gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+
+	/* Workaround shared GMAC reset */
+	if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
+	      port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
+		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+
+	/* Force any delayed status interrupt and NAPI */
+	sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
+	sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
+	sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
+	sky2_read8(hw, STAT_ISR_TIMER_CTRL);
+
+	sky2_rx_stop(sky2);
+
+	spin_lock_bh(&sky2->phy_lock);
+	sky2_phy_power_down(hw, port);
+	spin_unlock_bh(&sky2->phy_lock);
+
+	sky2_tx_reset(hw, port);
+
+	/* Free any pending frames stuck in HW queue */
+	sky2_tx_complete(sky2, sky2->tx_prod);
+}
+
+/* Network shutdown */
+static int sky2_close(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	/* Never really got started! */
+	if (!sky2->tx_le)
+		return 0;
+
+	netif_info(sky2, ifdown, dev, "disabling interface\n");
+
+	if (hw->ports == 1) {
+		sky2_write32(hw, B0_IMSK, 0);
+		sky2_read32(hw, B0_IMSK);
+
+		napi_disable(&hw->napi);
+		free_irq(hw->pdev->irq, hw);
+		hw->flags &= ~SKY2_HW_IRQ_SETUP;
+	} else {
+		u32 imask;
+
+		/* Disable port IRQ */
+		imask  = sky2_read32(hw, B0_IMSK);
+		imask &= ~portirq_msk[sky2->port];
+		sky2_write32(hw, B0_IMSK, imask);
+		sky2_read32(hw, B0_IMSK);
+
+		synchronize_irq(hw->pdev->irq);
+		napi_synchronize(&hw->napi);
+	}
+
+	sky2_hw_down(sky2);
+
+	sky2_free_buffers(sky2);
+
+	return 0;
+}
+
+static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
+{
+	if (hw->flags & SKY2_HW_FIBRE_PHY)
+		return SPEED_1000;
+
+	if (!(hw->flags & SKY2_HW_GIGABIT)) {
+		if (aux & PHY_M_PS_SPEED_100)
+			return SPEED_100;
+		else
+			return SPEED_10;
+	}
+
+	switch (aux & PHY_M_PS_SPEED_MSK) {
+	case PHY_M_PS_SPEED_1000:
+		return SPEED_1000;
+	case PHY_M_PS_SPEED_100:
+		return SPEED_100;
+	default:
+		return SPEED_10;
+	}
+}
+
+static void sky2_link_up(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	static const char *fc_name[] = {
+		[FC_NONE]	= "none",
+		[FC_TX]		= "tx",
+		[FC_RX]		= "rx",
+		[FC_BOTH]	= "both",
+	};
+
+	sky2_set_ipg(sky2);
+
+	sky2_enable_rx_tx(sky2);
+
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+
+	netif_carrier_on(sky2->netdev);
+
+	mod_timer(&hw->watchdog_timer, jiffies + 1);
+
+	/* Turn on link LED */
+	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
+		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
+
+	netif_info(sky2, link, sky2->netdev,
+		   "Link is up at %d Mbps, %s duplex, flow control %s\n",
+		   sky2->speed,
+		   sky2->duplex == DUPLEX_FULL ? "full" : "half",
+		   fc_name[sky2->flow_status]);
+}
+
+static void sky2_link_down(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 reg;
+
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
+
+	reg = gma_read16(hw, port, GM_GP_CTRL);
+	reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+
+	netif_carrier_off(sky2->netdev);
+
+	/* Turn off link LED */
+	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+
+	netif_info(sky2, link, sky2->netdev, "Link is down\n");
+
+	sky2_phy_init(hw, port);
+}
+
+static enum flow_control sky2_flow(int rx, int tx)
+{
+	if (rx)
+		return tx ? FC_BOTH : FC_RX;
+	else
+		return tx ? FC_TX : FC_NONE;
+}
+
+static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 advert, lpa;
+
+	advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+	lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
+	if (lpa & PHY_M_AN_RF) {
+		netdev_err(sky2->netdev, "remote fault\n");
+		return -1;
+	}
+
+	if (!(aux & PHY_M_PS_SPDUP_RES)) {
+		netdev_err(sky2->netdev, "speed/duplex mismatch\n");
+		return -1;
+	}
+
+	sky2->speed = sky2_phy_speed(hw, aux);
+	sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+
+	/* Since the pause result bits seem to in different positions on
+	 * different chips. look at registers.
+	 */
+	if (hw->flags & SKY2_HW_FIBRE_PHY) {
+		/* Shift for bits in fiber PHY */
+		advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
+		lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
+
+		if (advert & ADVERTISE_1000XPAUSE)
+			advert |= ADVERTISE_PAUSE_CAP;
+		if (advert & ADVERTISE_1000XPSE_ASYM)
+			advert |= ADVERTISE_PAUSE_ASYM;
+		if (lpa & LPA_1000XPAUSE)
+			lpa |= LPA_PAUSE_CAP;
+		if (lpa & LPA_1000XPAUSE_ASYM)
+			lpa |= LPA_PAUSE_ASYM;
+	}
+
+	sky2->flow_status = FC_NONE;
+	if (advert & ADVERTISE_PAUSE_CAP) {
+		if (lpa & LPA_PAUSE_CAP)
+			sky2->flow_status = FC_BOTH;
+		else if (advert & ADVERTISE_PAUSE_ASYM)
+			sky2->flow_status = FC_RX;
+	} else if (advert & ADVERTISE_PAUSE_ASYM) {
+		if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
+			sky2->flow_status = FC_TX;
+	}
+
+	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
+	    !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
+		sky2->flow_status = FC_NONE;
+
+	if (sky2->flow_status & FC_TX)
+		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+	else
+		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
+	return 0;
+}
+
+/* Interrupt from PHY */
+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct sky2_port *sky2 = netdev_priv(dev);
+	u16 istatus, phystat;
+
+	if (!netif_running(dev))
+		return;
+
+	spin_lock(&sky2->phy_lock);
+	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+	netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
+		   istatus, phystat);
+
+	if (istatus & PHY_M_IS_AN_COMPL) {
+		if (sky2_autoneg_done(sky2, phystat) == 0 &&
+		    !netif_carrier_ok(dev))
+			sky2_link_up(sky2);
+		goto out;
+	}
+
+	if (istatus & PHY_M_IS_LSP_CHANGE)
+		sky2->speed = sky2_phy_speed(hw, phystat);
+
+	if (istatus & PHY_M_IS_DUP_CHANGE)
+		sky2->duplex =
+		    (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+
+	if (istatus & PHY_M_IS_LST_CHANGE) {
+		if (phystat & PHY_M_PS_LINK_UP)
+			sky2_link_up(sky2);
+		else
+			sky2_link_down(sky2);
+	}
+out:
+	spin_unlock(&sky2->phy_lock);
+}
+
+/* Special quick link interrupt (Yukon-2 Optima only) */
+static void sky2_qlink_intr(struct sky2_hw *hw)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
+	u32 imask;
+	u16 phy;
+
+	/* disable irq */
+	imask = sky2_read32(hw, B0_IMSK);
+	imask &= ~Y2_IS_PHY_QLNK;
+	sky2_write32(hw, B0_IMSK, imask);
+
+	/* reset PHY Link Detect */
+	phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+	sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+	sky2_link_up(sky2);
+}
+
+/* Transmit timeout is only called if we are running, carrier is up
+ * and tx queue is full (stopped).
+ */
+static void sky2_tx_timeout(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	netif_err(sky2, timer, dev, "tx timeout\n");
+
+	netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
+		      sky2->tx_cons, sky2->tx_prod,
+		      sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+		      sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
+
+	/* can't restart safely under softirq */
+	schedule_work(&hw->restart_work);
+}
+
+static int sky2_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	int err;
+	u16 ctl, mode;
+	u32 imask;
+
+	/* MTU size outside the spec */
+	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+		return -EINVAL;
+
+	/* MTU > 1500 on yukon FE and FE+ not allowed */
+	if (new_mtu > ETH_DATA_LEN &&
+	    (hw->chip_id == CHIP_ID_YUKON_FE ||
+	     hw->chip_id == CHIP_ID_YUKON_FE_P))
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		dev->mtu = new_mtu;
+		netdev_update_features(dev);
+		return 0;
+	}
+
+	imask = sky2_read32(hw, B0_IMSK);
+	sky2_write32(hw, B0_IMSK, 0);
+	sky2_read32(hw, B0_IMSK);
+
+	dev->trans_start = jiffies;	/* prevent tx timeout */
+	napi_disable(&hw->napi);
+	netif_tx_disable(dev);
+
+	synchronize_irq(hw->pdev->irq);
+
+	if (!(hw->flags & SKY2_HW_RAM_BUFFER))
+		sky2_set_tx_stfwd(hw, port);
+
+	ctl = gma_read16(hw, port, GM_GP_CTRL);
+	gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
+	sky2_rx_stop(sky2);
+	sky2_rx_clean(sky2);
+
+	dev->mtu = new_mtu;
+	netdev_update_features(dev);
+
+	mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |	GM_SMOD_VLAN_ENA;
+	if (sky2->speed > SPEED_100)
+		mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+	else
+		mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+
+	if (dev->mtu > ETH_DATA_LEN)
+		mode |= GM_SMOD_JUMBO_ENA;
+
+	gma_write16(hw, port, GM_SERIAL_MODE, mode);
+
+	sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
+
+	err = sky2_alloc_rx_skbs(sky2);
+	if (!err)
+		sky2_rx_start(sky2);
+	else
+		sky2_rx_clean(sky2);
+	sky2_write32(hw, B0_IMSK, imask);
+
+	sky2_read32(hw, B0_Y2_SP_LISR);
+	napi_enable(&hw->napi);
+
+	if (err)
+		dev_close(dev);
+	else {
+		gma_write16(hw, port, GM_GP_CTRL, ctl);
+
+		netif_wake_queue(dev);
+	}
+
+	return err;
+}
+
+static inline bool needs_copy(const struct rx_ring_info *re,
+			      unsigned length)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	/* Some architectures need the IP header to be aligned */
+	if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
+		return true;
+#endif
+	return length < copybreak;
+}
+
+/* For small just reuse existing skb for next receive */
+static struct sk_buff *receive_copy(struct sky2_port *sky2,
+				    const struct rx_ring_info *re,
+				    unsigned length)
+{
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
+	if (likely(skb)) {
+		pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
+					    length, PCI_DMA_FROMDEVICE);
+		skb_copy_from_linear_data(re->skb, skb->data, length);
+		skb->ip_summed = re->skb->ip_summed;
+		skb->csum = re->skb->csum;
+		skb_copy_hash(skb, re->skb);
+		skb->vlan_proto = re->skb->vlan_proto;
+		skb->vlan_tci = re->skb->vlan_tci;
+
+		pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
+					       length, PCI_DMA_FROMDEVICE);
+		re->skb->vlan_proto = 0;
+		re->skb->vlan_tci = 0;
+		skb_clear_hash(re->skb);
+		re->skb->ip_summed = CHECKSUM_NONE;
+		skb_put(skb, length);
+	}
+	return skb;
+}
+
+/* Adjust length of skb with fragments to match received data */
+static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+			  unsigned int length)
+{
+	int i, num_frags;
+	unsigned int size;
+
+	/* put header into skb */
+	size = min(length, hdr_space);
+	skb->tail += size;
+	skb->len += size;
+	length -= size;
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	for (i = 0; i < num_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		if (length == 0) {
+			/* don't need this page */
+			__skb_frag_unref(frag);
+			--skb_shinfo(skb)->nr_frags;
+		} else {
+			size = min(length, (unsigned) PAGE_SIZE);
+
+			skb_frag_size_set(frag, size);
+			skb->data_len += size;
+			skb->truesize += PAGE_SIZE;
+			skb->len += size;
+			length -= size;
+		}
+	}
+}
+
+/* Normal packet - take skb from ring element and put in a new one  */
+static struct sk_buff *receive_new(struct sky2_port *sky2,
+				   struct rx_ring_info *re,
+				   unsigned int length)
+{
+	struct sk_buff *skb;
+	struct rx_ring_info nre;
+	unsigned hdr_space = sky2->rx_data_size;
+
+	nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
+	if (unlikely(!nre.skb))
+		goto nobuf;
+
+	if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+		goto nomap;
+
+	skb = re->skb;
+	sky2_rx_unmap_skb(sky2->hw->pdev, re);
+	prefetch(skb->data);
+	*re = nre;
+
+	if (skb_shinfo(skb)->nr_frags)
+		skb_put_frags(skb, hdr_space, length);
+	else
+		skb_put(skb, length);
+	return skb;
+
+nomap:
+	dev_kfree_skb(nre.skb);
+nobuf:
+	return NULL;
+}
+
+/*
+ * Receive one packet.
+ * For larger packets, get new buffer.
+ */
+static struct sk_buff *sky2_receive(struct net_device *dev,
+				    u16 length, u32 status)
+{
+ 	struct sky2_port *sky2 = netdev_priv(dev);
+	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
+	struct sk_buff *skb = NULL;
+	u16 count = (status & GMR_FS_LEN) >> 16;
+
+	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+		     "rx slot %u status 0x%x len %d\n",
+		     sky2->rx_next, status, length);
+
+	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
+	prefetch(sky2->rx_ring + sky2->rx_next);
+
+	if (skb_vlan_tag_present(re->skb))
+		count -= VLAN_HLEN;	/* Account for vlan tag */
+
+	/* This chip has hardware problems that generates bogus status.
+	 * So do only marginal checking and expect higher level protocols
+	 * to handle crap frames.
+	 */
+	if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+	    sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
+	    length != count)
+		goto okay;
+
+	if (status & GMR_FS_ANY_ERR)
+		goto error;
+
+	if (!(status & GMR_FS_RX_OK))
+		goto resubmit;
+
+	/* if length reported by DMA does not match PHY, packet was truncated */
+	if (length != count)
+		goto error;
+
+okay:
+	if (needs_copy(re, length))
+		skb = receive_copy(sky2, re, length);
+	else
+		skb = receive_new(sky2, re, length);
+
+	dev->stats.rx_dropped += (skb == NULL);
+
+resubmit:
+	sky2_rx_submit(sky2, re);
+
+	return skb;
+
+error:
+	++dev->stats.rx_errors;
+
+	if (net_ratelimit())
+		netif_info(sky2, rx_err, dev,
+			   "rx error, status 0x%x length %d\n", status, length);
+
+	goto resubmit;
+}
+
+/* Transmit complete */
+static inline void sky2_tx_done(struct net_device *dev, u16 last)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (netif_running(dev)) {
+		sky2_tx_complete(sky2, last);
+
+		/* Wake unless it's detached, and called e.g. from sky2_close() */
+		if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+			netif_wake_queue(dev);
+	}
+}
+
+static inline void sky2_skb_rx(const struct sky2_port *sky2,
+			       struct sk_buff *skb)
+{
+	if (skb->ip_summed == CHECKSUM_NONE)
+		netif_receive_skb(skb);
+	else
+		napi_gro_receive(&sky2->hw->napi, skb);
+}
+
+static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
+				unsigned packets, unsigned bytes)
+{
+	struct net_device *dev = hw->dev[port];
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (packets == 0)
+		return;
+
+	u64_stats_update_begin(&sky2->rx_stats.syncp);
+	sky2->rx_stats.packets += packets;
+	sky2->rx_stats.bytes += bytes;
+	u64_stats_update_end(&sky2->rx_stats.syncp);
+
+	dev->last_rx = jiffies;
+	sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
+}
+
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+	/* If this happens then driver assuming wrong format for chip type */
+	BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+	/* Both checksum counters are programmed to start at
+	 * the same offset, so unless there is a problem they
+	 * should match. This failure is an early indication that
+	 * hardware receive checksumming won't work.
+	 */
+	if (likely((u16)(status >> 16) == (u16)status)) {
+		struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		skb->csum = le16_to_cpu(status);
+	} else {
+		dev_notice(&sky2->hw->pdev->dev,
+			   "%s: receive checksum problem (status = %#x)\n",
+			   sky2->netdev->name, status);
+
+		/* Disable checksum offload
+		 * It will be reenabled on next ndo_set_features, but if it's
+		 * really broken, will get disabled again
+		 */
+		sky2->netdev->features &= ~NETIF_F_RXCSUM;
+		sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+			     BMU_DIS_RX_CHKSUM);
+	}
+}
+
+static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
+{
+	struct sk_buff *skb;
+
+	skb = sky2->rx_ring[sky2->rx_next].skb;
+	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
+}
+
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+	struct sk_buff *skb;
+
+	skb = sky2->rx_ring[sky2->rx_next].skb;
+	skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
+}
+
+/* Process status response ring */
+static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
+{
+	int work_done = 0;
+	unsigned int total_bytes[2] = { 0 };
+	unsigned int total_packets[2] = { 0 };
+
+	if (to_do <= 0)
+		return work_done;
+
+	rmb();
+	do {
+		struct sky2_port *sky2;
+		struct sky2_status_le *le  = hw->st_le + hw->st_idx;
+		unsigned port;
+		struct net_device *dev;
+		struct sk_buff *skb;
+		u32 status;
+		u16 length;
+		u8 opcode = le->opcode;
+
+		if (!(opcode & HW_OWNER))
+			break;
+
+		hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
+
+		port = le->css & CSS_LINK_BIT;
+		dev = hw->dev[port];
+		sky2 = netdev_priv(dev);
+		length = le16_to_cpu(le->length);
+		status = le32_to_cpu(le->status);
+
+		le->opcode = 0;
+		switch (opcode & ~HW_OWNER) {
+		case OP_RXSTAT:
+			total_packets[port]++;
+			total_bytes[port] += length;
+
+			skb = sky2_receive(dev, length, status);
+			if (!skb)
+				break;
+
+			/* This chip reports checksum status differently */
+			if (hw->flags & SKY2_HW_NEW_LE) {
+				if ((dev->features & NETIF_F_RXCSUM) &&
+				    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
+				    (le->css & CSS_TCPUDPCSOK))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+				else
+					skb->ip_summed = CHECKSUM_NONE;
+			}
+
+			skb->protocol = eth_type_trans(skb, dev);
+			sky2_skb_rx(sky2, skb);
+
+			/* Stop after net poll weight */
+			if (++work_done >= to_do)
+				goto exit_loop;
+			break;
+
+		case OP_RXVLAN:
+			sky2_rx_tag(sky2, length);
+			break;
+
+		case OP_RXCHKSVLAN:
+			sky2_rx_tag(sky2, length);
+			/* fall through */
+		case OP_RXCHKS:
+			if (likely(dev->features & NETIF_F_RXCSUM))
+				sky2_rx_checksum(sky2, status);
+			break;
+
+		case OP_RSS_HASH:
+			sky2_rx_hash(sky2, status);
+			break;
+
+		case OP_TXINDEXLE:
+			/* TX index reports status for both ports */
+			sky2_tx_done(hw->dev[0], status & 0xfff);
+			if (hw->dev[1])
+				sky2_tx_done(hw->dev[1],
+				     ((status >> 24) & 0xff)
+					     | (u16)(length & 0xf) << 8);
+			break;
+
+		default:
+			if (net_ratelimit())
+				pr_warn("unknown status opcode 0x%x\n", opcode);
+		}
+	} while (hw->st_idx != idx);
+
+	/* Fully processed status ring so clear irq */
+	sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
+exit_loop:
+	sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
+	sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
+
+	return work_done;
+}
+
+static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
+{
+	struct net_device *dev = hw->dev[port];
+
+	if (net_ratelimit())
+		netdev_info(dev, "hw error interrupt status 0x%x\n", status);
+
+	if (status & Y2_IS_PAR_RD1) {
+		if (net_ratelimit())
+			netdev_err(dev, "ram data read parity error\n");
+		/* Clear IRQ */
+		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
+	}
+
+	if (status & Y2_IS_PAR_WR1) {
+		if (net_ratelimit())
+			netdev_err(dev, "ram data write parity error\n");
+
+		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
+	}
+
+	if (status & Y2_IS_PAR_MAC1) {
+		if (net_ratelimit())
+			netdev_err(dev, "MAC parity error\n");
+		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
+	}
+
+	if (status & Y2_IS_PAR_RX1) {
+		if (net_ratelimit())
+			netdev_err(dev, "RX parity error\n");
+		sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
+	}
+
+	if (status & Y2_IS_TCP_TXA1) {
+		if (net_ratelimit())
+			netdev_err(dev, "TCP segmentation error\n");
+		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
+	}
+}
+
+static void sky2_hw_intr(struct sky2_hw *hw)
+{
+	struct pci_dev *pdev = hw->pdev;
+	u32 status = sky2_read32(hw, B0_HWE_ISRC);
+	u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
+
+	status &= hwmsk;
+
+	if (status & Y2_IS_TIST_OV)
+		sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+
+	if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
+		u16 pci_err;
+
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		pci_err = sky2_pci_read16(hw, PCI_STATUS);
+		if (net_ratelimit())
+			dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
+			        pci_err);
+
+		sky2_pci_write16(hw, PCI_STATUS,
+				      pci_err | PCI_STATUS_ERROR_BITS);
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+	}
+
+	if (status & Y2_IS_PCI_EXP) {
+		/* PCI-Express uncorrectable Error occurred */
+		u32 err;
+
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+		sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
+			     0xfffffffful);
+		if (net_ratelimit())
+			dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
+
+		sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+	}
+
+	if (status & Y2_HWE_L1_MASK)
+		sky2_hw_error(hw, 0, status);
+	status >>= 8;
+	if (status & Y2_HWE_L1_MASK)
+		sky2_hw_error(hw, 1, status);
+}
+
+static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct sky2_port *sky2 = netdev_priv(dev);
+	u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+	netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
+
+	if (status & GM_IS_RX_CO_OV)
+		gma_read16(hw, port, GM_RX_IRQ_SRC);
+
+	if (status & GM_IS_TX_CO_OV)
+		gma_read16(hw, port, GM_TX_IRQ_SRC);
+
+	if (status & GM_IS_RX_FF_OR) {
+		++dev->stats.rx_fifo_errors;
+		sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+	}
+
+	if (status & GM_IS_TX_FF_UR) {
+		++dev->stats.tx_fifo_errors;
+		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+	}
+}
+
+/* This should never happen it is a bug. */
+static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
+{
+	struct net_device *dev = hw->dev[port];
+	u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+
+	dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
+		dev->name, (unsigned) q, (unsigned) idx,
+		(unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
+
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
+}
+
+static int sky2_rx_hung(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	unsigned rxq = rxqaddr[port];
+	u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
+	u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
+	u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
+	u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
+
+	/* If idle and MAC or PCI is stuck */
+	if (sky2->check.last == dev->last_rx &&
+	    ((mac_rp == sky2->check.mac_rp &&
+	      mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
+	     /* Check if the PCI RX hang */
+	     (fifo_rp == sky2->check.fifo_rp &&
+	      fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
+		netdev_printk(KERN_DEBUG, dev,
+			      "hung mac %d:%d fifo %d (%d:%d)\n",
+			      mac_lev, mac_rp, fifo_lev,
+			      fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
+		return 1;
+	} else {
+		sky2->check.last = dev->last_rx;
+		sky2->check.mac_rp = mac_rp;
+		sky2->check.mac_lev = mac_lev;
+		sky2->check.fifo_rp = fifo_rp;
+		sky2->check.fifo_lev = fifo_lev;
+		return 0;
+	}
+}
+
+static void sky2_watchdog(unsigned long arg)
+{
+	struct sky2_hw *hw = (struct sky2_hw *) arg;
+
+	/* Check for lost IRQ once a second */
+	if (sky2_read32(hw, B0_ISRC)) {
+		napi_schedule(&hw->napi);
+	} else {
+		int i, active = 0;
+
+		for (i = 0; i < hw->ports; i++) {
+			struct net_device *dev = hw->dev[i];
+			if (!netif_running(dev))
+				continue;
+			++active;
+
+			/* For chips with Rx FIFO, check if stuck */
+			if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
+			     sky2_rx_hung(dev)) {
+				netdev_info(dev, "receiver hang detected\n");
+				schedule_work(&hw->restart_work);
+				return;
+			}
+		}
+
+		if (active == 0)
+			return;
+	}
+
+	mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
+}
+
+/* Hardware/software error handling */
+static void sky2_err_intr(struct sky2_hw *hw, u32 status)
+{
+	if (net_ratelimit())
+		dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
+
+	if (status & Y2_IS_HW_ERR)
+		sky2_hw_intr(hw);
+
+	if (status & Y2_IS_IRQ_MAC1)
+		sky2_mac_intr(hw, 0);
+
+	if (status & Y2_IS_IRQ_MAC2)
+		sky2_mac_intr(hw, 1);
+
+	if (status & Y2_IS_CHK_RX1)
+		sky2_le_error(hw, 0, Q_R1);
+
+	if (status & Y2_IS_CHK_RX2)
+		sky2_le_error(hw, 1, Q_R2);
+
+	if (status & Y2_IS_CHK_TXA1)
+		sky2_le_error(hw, 0, Q_XA1);
+
+	if (status & Y2_IS_CHK_TXA2)
+		sky2_le_error(hw, 1, Q_XA2);
+}
+
+static int sky2_poll(struct napi_struct *napi, int work_limit)
+{
+	struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
+	u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+	int work_done = 0;
+	u16 idx;
+
+	if (unlikely(status & Y2_IS_ERROR))
+		sky2_err_intr(hw, status);
+
+	if (status & Y2_IS_IRQ_PHY1)
+		sky2_phy_intr(hw, 0);
+
+	if (status & Y2_IS_IRQ_PHY2)
+		sky2_phy_intr(hw, 1);
+
+	if (status & Y2_IS_PHY_QLNK)
+		sky2_qlink_intr(hw);
+
+	while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
+		work_done += sky2_status_intr(hw, work_limit - work_done, idx);
+
+		if (work_done >= work_limit)
+			goto done;
+	}
+
+	napi_complete(napi);
+	sky2_read32(hw, B0_Y2_SP_LISR);
+done:
+
+	return work_done;
+}
+
+static irqreturn_t sky2_intr(int irq, void *dev_id)
+{
+	struct sky2_hw *hw = dev_id;
+	u32 status;
+
+	/* Reading this mask interrupts as side effect */
+	status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+	if (status == 0 || status == ~0) {
+		sky2_write32(hw, B0_Y2_SP_ICR, 2);
+		return IRQ_NONE;
+	}
+
+	prefetch(&hw->st_le[hw->st_idx]);
+
+	napi_schedule(&hw->napi);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sky2_netpoll(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	napi_schedule(&sky2->hw->napi);
+}
+#endif
+
+/* Chip internal frequency for clock calculations */
+static u32 sky2_mhz(const struct sky2_hw *hw)
+{
+	switch (hw->chip_id) {
+	case CHIP_ID_YUKON_EC:
+	case CHIP_ID_YUKON_EC_U:
+	case CHIP_ID_YUKON_EX:
+	case CHIP_ID_YUKON_SUPR:
+	case CHIP_ID_YUKON_UL_2:
+	case CHIP_ID_YUKON_OPT:
+	case CHIP_ID_YUKON_PRM:
+	case CHIP_ID_YUKON_OP_2:
+		return 125;
+
+	case CHIP_ID_YUKON_FE:
+		return 100;
+
+	case CHIP_ID_YUKON_FE_P:
+		return 50;
+
+	case CHIP_ID_YUKON_XL:
+		return 156;
+
+	default:
+		BUG();
+	}
+}
+
+static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
+{
+	return sky2_mhz(hw) * us;
+}
+
+static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
+{
+	return clk / sky2_mhz(hw);
+}
+
+
+static int sky2_init(struct sky2_hw *hw)
+{
+	u8 t8;
+
+	/* Enable all clocks and check for bad PCI access */
+	sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
+	sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
+	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+	switch (hw->chip_id) {
+	case CHIP_ID_YUKON_XL:
+		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+		if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+			hw->flags |= SKY2_HW_RSS_BROKEN;
+		break;
+
+	case CHIP_ID_YUKON_EC_U:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEWER_PHY
+			| SKY2_HW_ADV_POWER_CTL;
+		break;
+
+	case CHIP_ID_YUKON_EX:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEWER_PHY
+			| SKY2_HW_NEW_LE
+			| SKY2_HW_ADV_POWER_CTL
+			| SKY2_HW_RSS_CHKSUM;
+
+		/* New transmit checksum */
+		if (hw->chip_rev != CHIP_REV_YU_EX_B0)
+			hw->flags |= SKY2_HW_AUTO_TX_SUM;
+		break;
+
+	case CHIP_ID_YUKON_EC:
+		/* This rev is really old, and requires untested workarounds */
+		if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
+			dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
+			return -EOPNOTSUPP;
+		}
+		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
+		break;
+
+	case CHIP_ID_YUKON_FE:
+		hw->flags = SKY2_HW_RSS_BROKEN;
+		break;
+
+	case CHIP_ID_YUKON_FE_P:
+		hw->flags = SKY2_HW_NEWER_PHY
+			| SKY2_HW_NEW_LE
+			| SKY2_HW_AUTO_TX_SUM
+			| SKY2_HW_ADV_POWER_CTL;
+
+		/* The workaround for status conflicts VLAN tag detection. */
+		if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
+		break;
+
+	case CHIP_ID_YUKON_SUPR:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEWER_PHY
+			| SKY2_HW_NEW_LE
+			| SKY2_HW_AUTO_TX_SUM
+			| SKY2_HW_ADV_POWER_CTL;
+
+		if (hw->chip_rev == CHIP_REV_YU_SU_A0)
+			hw->flags |= SKY2_HW_RSS_CHKSUM;
+		break;
+
+	case CHIP_ID_YUKON_UL_2:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_ADV_POWER_CTL;
+		break;
+
+	case CHIP_ID_YUKON_OPT:
+	case CHIP_ID_YUKON_PRM:
+	case CHIP_ID_YUKON_OP_2:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEW_LE
+			| SKY2_HW_ADV_POWER_CTL;
+		break;
+
+	default:
+		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+			hw->chip_id);
+		return -EOPNOTSUPP;
+	}
+
+	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
+	if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
+		hw->flags |= SKY2_HW_FIBRE_PHY;
+
+	hw->ports = 1;
+	t8 = sky2_read8(hw, B2_Y2_HW_RES);
+	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
+		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
+			++hw->ports;
+	}
+
+	if (sky2_read8(hw, B2_E_0))
+		hw->flags |= SKY2_HW_RAM_BUFFER;
+
+	return 0;
+}
+
+static void sky2_reset(struct sky2_hw *hw)
+{
+	struct pci_dev *pdev = hw->pdev;
+	u16 status;
+	int i;
+	u32 hwe_mask = Y2_HWE_ALL_MASK;
+
+	/* disable ASF */
+	if (hw->chip_id == CHIP_ID_YUKON_EX
+	    || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+		sky2_write32(hw, CPU_WDOG, 0);
+		status = sky2_read16(hw, HCU_CCSR);
+		status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+			    HCU_CCSR_UC_STATE_MSK);
+		/*
+		 * CPU clock divider shouldn't be used because
+		 * - ASF firmware may malfunction
+		 * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+		 */
+		status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
+		sky2_write16(hw, HCU_CCSR, status);
+		sky2_write32(hw, CPU_WDOG, 0);
+	} else
+		sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+	sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
+
+	/* do a SW reset */
+	sky2_write8(hw, B0_CTST, CS_RST_SET);
+	sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+	/* allow writes to PCI config */
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+
+	/* clear PCI errors, if any */
+	status = sky2_pci_read16(hw, PCI_STATUS);
+	status |= PCI_STATUS_ERROR_BITS;
+	sky2_pci_write16(hw, PCI_STATUS, status);
+
+	sky2_write8(hw, B0_CTST, CS_MRST_CLR);
+
+	if (pci_is_pcie(pdev)) {
+		sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
+			     0xfffffffful);
+
+		/* If error bit is stuck on ignore it */
+		if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
+			dev_info(&pdev->dev, "ignoring stuck error report bit\n");
+		else
+			hwe_mask |= Y2_IS_PCI_EXP;
+	}
+
+	sky2_power_on(hw);
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+	for (i = 0; i < hw->ports; i++) {
+		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+		if (hw->chip_id == CHIP_ID_YUKON_EX ||
+		    hw->chip_id == CHIP_ID_YUKON_SUPR)
+			sky2_write16(hw, SK_REG(i, GMAC_CTRL),
+				     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
+				     | GMC_BYP_RETR_ON);
+
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
+		/* enable MACSec clock gating */
+		sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+	    hw->chip_id == CHIP_ID_YUKON_PRM ||
+	    hw->chip_id == CHIP_ID_YUKON_OP_2) {
+		u16 reg;
+
+		if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+			/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
+			sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
+
+			/* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
+			reg = 10;
+
+			/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+			sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
+		} else {
+			/* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
+			reg = 3;
+		}
+
+		reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+		reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
+
+		/* reset PHY Link Detect */
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
+
+		/* check if PSMv2 was running before */
+		reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
+		if (reg & PCI_EXP_LNKCTL_ASPMC)
+			/* restore the PCIe Link Control register */
+			sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
+					 reg);
+
+		if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+			hw->chip_rev == CHIP_REV_YU_PRM_A0) {
+			/* change PHY Interrupt polarity to low active */
+			reg = sky2_read16(hw, GPHY_CTRL);
+			sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
+
+			/* adapt HW for low active PHY Interrupt */
+			reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
+			sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
+		}
+
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+		/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+		sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
+	}
+
+	/* Clear I2C IRQ noise */
+	sky2_write32(hw, B2_I2C_IRQ, 1);
+
+	/* turn off hardware timer (unused) */
+	sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
+	sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+
+	/* Turn off descriptor polling */
+	sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
+
+	/* Turn off receive timestamp */
+	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
+	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+
+	/* enable the Tx Arbiters */
+	for (i = 0; i < hw->ports; i++)
+		sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+	/* Initialize ram interface */
+	for (i = 0; i < hw->ports; i++) {
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
+
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
+	}
+
+	sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
+
+	for (i = 0; i < hw->ports; i++)
+		sky2_gmac_reset(hw, i);
+
+	memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
+	hw->st_idx = 0;
+
+	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
+	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
+
+	sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
+	sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
+
+	/* Set the list last index */
+	sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
+
+	sky2_write16(hw, STAT_TX_IDX_TH, 10);
+	sky2_write8(hw, STAT_FIFO_WM, 16);
+
+	/* set Status-FIFO ISR watermark */
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
+		sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
+	else
+		sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
+
+	sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
+	sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+	sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
+
+	/* enable status unit */
+	sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
+
+	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+}
+
+/* Take device down (offline).
+ * Equivalent to doing dev_stop() but this does not
+ * inform upper layers of the transition.
+ */
+static void sky2_detach(struct net_device *dev)
+{
+	if (netif_running(dev)) {
+		netif_tx_lock(dev);
+		netif_device_detach(dev);	/* stop txq */
+		netif_tx_unlock(dev);
+		sky2_close(dev);
+	}
+}
+
+/* Bring device back after doing sky2_detach */
+static int sky2_reattach(struct net_device *dev)
+{
+	int err = 0;
+
+	if (netif_running(dev)) {
+		err = sky2_open(dev);
+		if (err) {
+			netdev_info(dev, "could not restart %d\n", err);
+			dev_close(dev);
+		} else {
+			netif_device_attach(dev);
+			sky2_set_multicast(dev);
+		}
+	}
+
+	return err;
+}
+
+static void sky2_all_down(struct sky2_hw *hw)
+{
+	int i;
+
+	if (hw->flags & SKY2_HW_IRQ_SETUP) {
+		sky2_write32(hw, B0_IMSK, 0);
+		sky2_read32(hw, B0_IMSK);
+
+		synchronize_irq(hw->pdev->irq);
+		napi_disable(&hw->napi);
+	}
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct sky2_port *sky2 = netdev_priv(dev);
+
+		if (!netif_running(dev))
+			continue;
+
+		netif_carrier_off(dev);
+		netif_tx_disable(dev);
+		sky2_hw_down(sky2);
+	}
+}
+
+static void sky2_all_up(struct sky2_hw *hw)
+{
+	u32 imask = Y2_IS_BASE;
+	int i;
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct sky2_port *sky2 = netdev_priv(dev);
+
+		if (!netif_running(dev))
+			continue;
+
+		sky2_hw_up(sky2);
+		sky2_set_multicast(dev);
+		imask |= portirq_msk[i];
+		netif_wake_queue(dev);
+	}
+
+	if (hw->flags & SKY2_HW_IRQ_SETUP) {
+		sky2_write32(hw, B0_IMSK, imask);
+		sky2_read32(hw, B0_IMSK);
+		sky2_read32(hw, B0_Y2_SP_LISR);
+		napi_enable(&hw->napi);
+	}
+}
+
+static void sky2_restart(struct work_struct *work)
+{
+	struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+
+	rtnl_lock();
+
+	sky2_all_down(hw);
+	sky2_reset(hw);
+	sky2_all_up(hw);
+
+	rtnl_unlock();
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+	return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
+}
+
+static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	const struct sky2_port *sky2 = netdev_priv(dev);
+
+	wol->supported = sky2_wol_supported(sky2->hw);
+	wol->wolopts = sky2->wol;
+}
+
+static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	bool enable_wakeup = false;
+	int i;
+
+	if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
+	    !device_can_wakeup(&hw->pdev->dev))
+		return -EOPNOTSUPP;
+
+	sky2->wol = wol->wolopts;
+
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct sky2_port *sky2 = netdev_priv(dev);
+
+		if (sky2->wol)
+			enable_wakeup = true;
+	}
+	device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
+
+	return 0;
+}
+
+static u32 sky2_supported_modes(const struct sky2_hw *hw)
+{
+	if (sky2_is_copper(hw)) {
+		u32 modes = SUPPORTED_10baseT_Half
+			| SUPPORTED_10baseT_Full
+			| SUPPORTED_100baseT_Half
+			| SUPPORTED_100baseT_Full;
+
+		if (hw->flags & SKY2_HW_GIGABIT)
+			modes |= SUPPORTED_1000baseT_Half
+				| SUPPORTED_1000baseT_Full;
+		return modes;
+	} else
+		return SUPPORTED_1000baseT_Half
+			| SUPPORTED_1000baseT_Full;
+}
+
+static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	ecmd->transceiver = XCVR_INTERNAL;
+	ecmd->supported = sky2_supported_modes(hw);
+	ecmd->phy_address = PHY_ADDR_MARV;
+	if (sky2_is_copper(hw)) {
+		ecmd->port = PORT_TP;
+		ethtool_cmd_speed_set(ecmd, sky2->speed);
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
+	} else {
+		ethtool_cmd_speed_set(ecmd, SPEED_1000);
+		ecmd->port = PORT_FIBRE;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+	}
+
+	ecmd->advertising = sky2->advertising;
+	ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+		? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	ecmd->duplex = sky2->duplex;
+	return 0;
+}
+
+static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	const struct sky2_hw *hw = sky2->hw;
+	u32 supported = sky2_supported_modes(hw);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		if (ecmd->advertising & ~supported)
+			return -EINVAL;
+
+		if (sky2_is_copper(hw))
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_TP |
+					    ADVERTISED_Autoneg;
+		else
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_FIBRE |
+					    ADVERTISED_Autoneg;
+
+		sky2->flags |= SKY2_FLAG_AUTO_SPEED;
+		sky2->duplex = -1;
+		sky2->speed = -1;
+	} else {
+		u32 setting;
+		u32 speed = ethtool_cmd_speed(ecmd);
+
+		switch (speed) {
+		case SPEED_1000:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_1000baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_1000baseT_Half;
+			else
+				return -EINVAL;
+			break;
+		case SPEED_100:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_100baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_100baseT_Half;
+			else
+				return -EINVAL;
+			break;
+
+		case SPEED_10:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_10baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_10baseT_Half;
+			else
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if ((setting & supported) == 0)
+			return -EINVAL;
+
+		sky2->speed = speed;
+		sky2->duplex = ecmd->duplex;
+		sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
+	}
+
+	if (netif_running(dev)) {
+		sky2_phy_reinit(sky2);
+		sky2_set_multicast(dev);
+	}
+
+	return 0;
+}
+
+static void sky2_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+		sizeof(info->bus_info));
+}
+
+static const struct sky2_stat {
+	char name[ETH_GSTRING_LEN];
+	u16 offset;
+} sky2_stats[] = {
+	{ "tx_bytes",	   GM_TXO_OK_HI },
+	{ "rx_bytes",	   GM_RXO_OK_HI },
+	{ "tx_broadcast",  GM_TXF_BC_OK },
+	{ "rx_broadcast",  GM_RXF_BC_OK },
+	{ "tx_multicast",  GM_TXF_MC_OK },
+	{ "rx_multicast",  GM_RXF_MC_OK },
+	{ "tx_unicast",    GM_TXF_UC_OK },
+	{ "rx_unicast",    GM_RXF_UC_OK },
+	{ "tx_mac_pause",  GM_TXF_MPAUSE },
+	{ "rx_mac_pause",  GM_RXF_MPAUSE },
+	{ "collisions",    GM_TXF_COL },
+	{ "late_collision",GM_TXF_LAT_COL },
+	{ "aborted", 	   GM_TXF_ABO_COL },
+	{ "single_collisions", GM_TXF_SNG_COL },
+	{ "multi_collisions", GM_TXF_MUL_COL },
+
+	{ "rx_short",      GM_RXF_SHT },
+	{ "rx_runt", 	   GM_RXE_FRAG },
+	{ "rx_64_byte_packets", GM_RXF_64B },
+	{ "rx_65_to_127_byte_packets", GM_RXF_127B },
+	{ "rx_128_to_255_byte_packets", GM_RXF_255B },
+	{ "rx_256_to_511_byte_packets", GM_RXF_511B },
+	{ "rx_512_to_1023_byte_packets", GM_RXF_1023B },
+	{ "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
+	{ "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
+	{ "rx_too_long",   GM_RXF_LNG_ERR },
+	{ "rx_fifo_overflow", GM_RXE_FIFO_OV },
+	{ "rx_jabber",     GM_RXF_JAB_PKT },
+	{ "rx_fcs_error",   GM_RXF_FCS_ERR },
+
+	{ "tx_64_byte_packets", GM_TXF_64B },
+	{ "tx_65_to_127_byte_packets", GM_TXF_127B },
+	{ "tx_128_to_255_byte_packets", GM_TXF_255B },
+	{ "tx_256_to_511_byte_packets", GM_TXF_511B },
+	{ "tx_512_to_1023_byte_packets", GM_TXF_1023B },
+	{ "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
+	{ "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
+	{ "tx_fifo_underrun", GM_TXE_FIFO_UR },
+};
+
+static u32 sky2_get_msglevel(struct net_device *netdev)
+{
+	struct sky2_port *sky2 = netdev_priv(netdev);
+	return sky2->msg_enable;
+}
+
+static int sky2_nway_reset(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
+		return -EINVAL;
+
+	sky2_phy_reinit(sky2);
+	sky2_set_multicast(dev);
+
+	return 0;
+}
+
+static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	int i;
+
+	data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
+	data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
+
+	for (i = 2; i < count; i++)
+		data[i] = get_stats32(hw, port, sky2_stats[i].offset);
+}
+
+static void sky2_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct sky2_port *sky2 = netdev_priv(netdev);
+	sky2->msg_enable = value;
+}
+
+static int sky2_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(sky2_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void sky2_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 * data)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
+}
+
+static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       sky2_stats[i].name, ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+static int sky2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	const struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+	memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
+		    dev->dev_addr, ETH_ALEN);
+	memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
+		    dev->dev_addr, ETH_ALEN);
+
+	/* virtual address for data */
+	gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+
+	/* physical address: used for pause frames */
+	gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+
+	return 0;
+}
+
+static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
+{
+	u32 bit;
+
+	bit = ether_crc(ETH_ALEN, addr) & 63;
+	filter[bit >> 3] |= 1 << (bit & 7);
+}
+
+static void sky2_set_multicast(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	struct netdev_hw_addr *ha;
+	u16 reg;
+	u8 filter[8];
+	int rx_pause;
+	static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+	rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
+	memset(filter, 0, sizeof(filter));
+
+	reg = gma_read16(hw, port, GM_RX_CTRL);
+	reg |= GM_RXCR_UCF_ENA;
+
+	if (dev->flags & IFF_PROMISC)	/* promiscuous */
+		reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+	else if (dev->flags & IFF_ALLMULTI)
+		memset(filter, 0xff, sizeof(filter));
+	else if (netdev_mc_empty(dev) && !rx_pause)
+		reg &= ~GM_RXCR_MCF_ENA;
+	else {
+		reg |= GM_RXCR_MCF_ENA;
+
+		if (rx_pause)
+			sky2_add_filter(filter, pause_mc_addr);
+
+		netdev_for_each_mc_addr(ha, dev)
+			sky2_add_filter(filter, ha->addr);
+	}
+
+	gma_write16(hw, port, GM_MC_ADDR_H1,
+		    (u16) filter[0] | ((u16) filter[1] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H2,
+		    (u16) filter[2] | ((u16) filter[3] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H3,
+		    (u16) filter[4] | ((u16) filter[5] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H4,
+		    (u16) filter[6] | ((u16) filter[7] << 8));
+
+	gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
+						struct rtnl_link_stats64 *stats)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	unsigned int start;
+	u64 _bytes, _packets;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
+		_bytes = sky2->rx_stats.bytes;
+		_packets = sky2->rx_stats.packets;
+	} while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
+
+	stats->rx_packets = _packets;
+	stats->rx_bytes = _bytes;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
+		_bytes = sky2->tx_stats.bytes;
+		_packets = sky2->tx_stats.packets;
+	} while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
+
+	stats->tx_packets = _packets;
+	stats->tx_bytes = _bytes;
+
+	stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
+		+ get_stats32(hw, port, GM_RXF_BC_OK);
+
+	stats->collisions = get_stats32(hw, port, GM_TXF_COL);
+
+	stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
+	stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
+	stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
+		+ get_stats32(hw, port, GM_RXE_FRAG);
+	stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
+
+	stats->rx_dropped = dev->stats.rx_dropped;
+	stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+	stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
+
+	return stats;
+}
+
+/* Can have one global because blinking is controlled by
+ * ethtool and that is always under RTNL mutex
+ */
+static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+
+	spin_lock_bh(&sky2->phy_lock);
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+	    hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
+		u16 pg;
+		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+		switch (mode) {
+		case MO_LED_OFF:
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+				     PHY_M_LEDC_LOS_CTRL(8) |
+				     PHY_M_LEDC_INIT_CTRL(8) |
+				     PHY_M_LEDC_STA1_CTRL(8) |
+				     PHY_M_LEDC_STA0_CTRL(8));
+			break;
+		case MO_LED_ON:
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+				     PHY_M_LEDC_LOS_CTRL(9) |
+				     PHY_M_LEDC_INIT_CTRL(9) |
+				     PHY_M_LEDC_STA1_CTRL(9) |
+				     PHY_M_LEDC_STA0_CTRL(9));
+			break;
+		case MO_LED_BLINK:
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+				     PHY_M_LEDC_LOS_CTRL(0xa) |
+				     PHY_M_LEDC_INIT_CTRL(0xa) |
+				     PHY_M_LEDC_STA1_CTRL(0xa) |
+				     PHY_M_LEDC_STA0_CTRL(0xa));
+			break;
+		case MO_LED_NORM:
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+				     PHY_M_LEDC_LOS_CTRL(1) |
+				     PHY_M_LEDC_INIT_CTRL(8) |
+				     PHY_M_LEDC_STA1_CTRL(7) |
+				     PHY_M_LEDC_STA0_CTRL(7));
+		}
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+	} else
+		gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+				     PHY_M_LED_MO_DUP(mode) |
+				     PHY_M_LED_MO_10(mode) |
+				     PHY_M_LED_MO_100(mode) |
+				     PHY_M_LED_MO_1000(mode) |
+				     PHY_M_LED_MO_RX(mode) |
+				     PHY_M_LED_MO_TX(mode));
+
+	spin_unlock_bh(&sky2->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int sky2_set_phys_id(struct net_device *dev,
+			    enum ethtool_phys_id_state state)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+	case ETHTOOL_ID_INACTIVE:
+		sky2_led(sky2, MO_LED_NORM);
+		break;
+	case ETHTOOL_ID_ON:
+		sky2_led(sky2, MO_LED_ON);
+		break;
+	case ETHTOOL_ID_OFF:
+		sky2_led(sky2, MO_LED_OFF);
+		break;
+	}
+
+	return 0;
+}
+
+static void sky2_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	switch (sky2->flow_mode) {
+	case FC_NONE:
+		ecmd->tx_pause = ecmd->rx_pause = 0;
+		break;
+	case FC_TX:
+		ecmd->tx_pause = 1, ecmd->rx_pause = 0;
+		break;
+	case FC_RX:
+		ecmd->tx_pause = 0, ecmd->rx_pause = 1;
+		break;
+	case FC_BOTH:
+		ecmd->tx_pause = ecmd->rx_pause = 1;
+	}
+
+	ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
+		? AUTONEG_ENABLE : AUTONEG_DISABLE;
+}
+
+static int sky2_set_pauseparam(struct net_device *dev,
+			       struct ethtool_pauseparam *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE)
+		sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
+	else
+		sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
+
+	sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
+
+	if (netif_running(dev))
+		sky2_phy_reinit(sky2);
+
+	return 0;
+}
+
+static int sky2_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
+		ecmd->tx_coalesce_usecs = 0;
+	else {
+		u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
+		ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
+	}
+	ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
+
+	if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
+		ecmd->rx_coalesce_usecs = 0;
+	else {
+		u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
+		ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
+	}
+	ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
+
+	if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
+		ecmd->rx_coalesce_usecs_irq = 0;
+	else {
+		u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
+		ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
+	}
+
+	ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
+
+	return 0;
+}
+
+/* Note: this affect both ports */
+static int sky2_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
+
+	if (ecmd->tx_coalesce_usecs > tmax ||
+	    ecmd->rx_coalesce_usecs > tmax ||
+	    ecmd->rx_coalesce_usecs_irq > tmax)
+		return -EINVAL;
+
+	if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
+		return -EINVAL;
+	if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
+		return -EINVAL;
+	if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
+		return -EINVAL;
+
+	if (ecmd->tx_coalesce_usecs == 0)
+		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+	else {
+		sky2_write32(hw, STAT_TX_TIMER_INI,
+			     sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
+		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+	}
+	sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
+
+	if (ecmd->rx_coalesce_usecs == 0)
+		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
+	else {
+		sky2_write32(hw, STAT_LEV_TIMER_INI,
+			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
+		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+	}
+	sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
+
+	if (ecmd->rx_coalesce_usecs_irq == 0)
+		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
+	else {
+		sky2_write32(hw, STAT_ISR_TIMER_INI,
+			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
+		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+	}
+	sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
+	return 0;
+}
+
+/*
+ * Hardware is limited to min of 128 and max of 2048 for ring size
+ * and  rounded up to next power of two
+ * to avoid division in modulus calclation
+ */
+static unsigned long roundup_ring_size(unsigned long pending)
+{
+	return max(128ul, roundup_pow_of_two(pending+1));
+}
+
+static void sky2_get_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	ering->rx_max_pending = RX_MAX_PENDING;
+	ering->tx_max_pending = TX_MAX_PENDING;
+
+	ering->rx_pending = sky2->rx_pending;
+	ering->tx_pending = sky2->tx_pending;
+}
+
+static int sky2_set_ringparam(struct net_device *dev,
+			      struct ethtool_ringparam *ering)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (ering->rx_pending > RX_MAX_PENDING ||
+	    ering->rx_pending < 8 ||
+	    ering->tx_pending < TX_MIN_PENDING ||
+	    ering->tx_pending > TX_MAX_PENDING)
+		return -EINVAL;
+
+	sky2_detach(dev);
+
+	sky2->rx_pending = ering->rx_pending;
+	sky2->tx_pending = ering->tx_pending;
+	sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
+
+	return sky2_reattach(dev);
+}
+
+static int sky2_get_regs_len(struct net_device *dev)
+{
+	return 0x4000;
+}
+
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+	/* This complicated switch statement is to make sure and
+	 * only access regions that are unreserved.
+	 * Some blocks are only valid on dual port cards.
+	 */
+	switch (b) {
+	/* second port */
+	case 5:		/* Tx Arbiter 2 */
+	case 9:		/* RX2 */
+	case 14 ... 15:	/* TX2 */
+	case 17: case 19: /* Ram Buffer 2 */
+	case 22 ... 23: /* Tx Ram Buffer 2 */
+	case 25:	/* Rx MAC Fifo 1 */
+	case 27:	/* Tx MAC Fifo 2 */
+	case 31:	/* GPHY 2 */
+	case 40 ... 47: /* Pattern Ram 2 */
+	case 52: case 54: /* TCP Segmentation 2 */
+	case 112 ... 116: /* GMAC 2 */
+		return hw->ports > 1;
+
+	case 0:		/* Control */
+	case 2:		/* Mac address */
+	case 4:		/* Tx Arbiter 1 */
+	case 7:		/* PCI express reg */
+	case 8:		/* RX1 */
+	case 12 ... 13: /* TX1 */
+	case 16: case 18:/* Rx Ram Buffer 1 */
+	case 20 ... 21: /* Tx Ram Buffer 1 */
+	case 24:	/* Rx MAC Fifo 1 */
+	case 26:	/* Tx MAC Fifo 1 */
+	case 28 ... 29: /* Descriptor and status unit */
+	case 30:	/* GPHY 1*/
+	case 32 ... 39: /* Pattern Ram 1 */
+	case 48: case 50: /* TCP Segmentation 1 */
+	case 56 ... 60:	/* PCI space */
+	case 80 ... 84:	/* GMAC 1 */
+		return 1;
+
+	default:
+		return 0;
+	}
+}
+
+/*
+ * Returns copy of control register region
+ * Note: ethtool_get_regs always provides full size (16k) buffer
+ */
+static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			  void *p)
+{
+	const struct sky2_port *sky2 = netdev_priv(dev);
+	const void __iomem *io = sky2->hw->regs;
+	unsigned int b;
+
+	regs->version = 1;
+
+	for (b = 0; b < 128; b++) {
+		/* skip poisonous diagnostic ram region in block 3 */
+		if (b == 3)
+			memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
+		else if (sky2_reg_access_ok(sky2->hw, b))
+			memcpy_fromio(p, io, 128);
+		else
+			memset(p, 0, 128);
+
+		p += 128;
+		io += 128;
+	}
+}
+
+static int sky2_get_eeprom_len(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	u16 reg2;
+
+	reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
+	return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+}
+
+static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
+{
+	unsigned long start = jiffies;
+
+	while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
+		/* Can take up to 10.6 ms for write */
+		if (time_after(jiffies, start + HZ/4)) {
+			dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
+			return -ETIMEDOUT;
+		}
+		mdelay(1);
+	}
+
+	return 0;
+}
+
+static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
+			 u16 offset, size_t length)
+{
+	int rc = 0;
+
+	while (length > 0) {
+		u32 val;
+
+		sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
+		rc = sky2_vpd_wait(hw, cap, 0);
+		if (rc)
+			break;
+
+		val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
+
+		memcpy(data, &val, min(sizeof(val), length));
+		offset += sizeof(u32);
+		data += sizeof(u32);
+		length -= sizeof(u32);
+	}
+
+	return rc;
+}
+
+static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
+			  u16 offset, unsigned int length)
+{
+	unsigned int i;
+	int rc = 0;
+
+	for (i = 0; i < length; i += sizeof(u32)) {
+		u32 val = *(u32 *)(data + i);
+
+		sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
+		sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
+
+		rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
+
+	if (!cap)
+		return -EINVAL;
+
+	eeprom->magic = SKY2_EEPROM_MAGIC;
+
+	return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+}
+
+static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
+
+	if (!cap)
+		return -EINVAL;
+
+	if (eeprom->magic != SKY2_EEPROM_MAGIC)
+		return -EINVAL;
+
+	/* Partial writes not supported */
+	if ((eeprom->offset & 3) || (eeprom->len & 3))
+		return -EINVAL;
+
+	return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+}
+
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	const struct sky2_port *sky2 = netdev_priv(dev);
+	const struct sky2_hw *hw = sky2->hw;
+
+	/* In order to do Jumbo packets on these chips, need to turn off the
+	 * transmit store/forward. Therefore checksum offload won't work.
+	 */
+	if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
+		netdev_info(dev, "checksum offload not possible with jumbo frames\n");
+		features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+	}
+
+	/* Some hardware requires receive checksum for RSS to work. */
+	if ( (features & NETIF_F_RXHASH) &&
+	     !(features & NETIF_F_RXCSUM) &&
+	     (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
+		netdev_info(dev, "receive hashing forces receive checksum\n");
+		features |= NETIF_F_RXCSUM;
+	}
+
+	return features;
+}
+
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	netdev_features_t changed = dev->features ^ features;
+
+	if ((changed & NETIF_F_RXCSUM) &&
+	    !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+		sky2_write32(sky2->hw,
+			     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+			     (features & NETIF_F_RXCSUM)
+			     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+	}
+
+	if (changed & NETIF_F_RXHASH)
+		rx_set_rss(dev, features);
+
+	if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
+		sky2_vlan_mode(dev, features);
+
+	return 0;
+}
+
+static const struct ethtool_ops sky2_ethtool_ops = {
+	.get_settings	= sky2_get_settings,
+	.set_settings	= sky2_set_settings,
+	.get_drvinfo	= sky2_get_drvinfo,
+	.get_wol	= sky2_get_wol,
+	.set_wol	= sky2_set_wol,
+	.get_msglevel	= sky2_get_msglevel,
+	.set_msglevel	= sky2_set_msglevel,
+	.nway_reset	= sky2_nway_reset,
+	.get_regs_len	= sky2_get_regs_len,
+	.get_regs	= sky2_get_regs,
+	.get_link	= ethtool_op_get_link,
+	.get_eeprom_len	= sky2_get_eeprom_len,
+	.get_eeprom	= sky2_get_eeprom,
+	.set_eeprom	= sky2_set_eeprom,
+	.get_strings	= sky2_get_strings,
+	.get_coalesce	= sky2_get_coalesce,
+	.set_coalesce	= sky2_set_coalesce,
+	.get_ringparam	= sky2_get_ringparam,
+	.set_ringparam	= sky2_set_ringparam,
+	.get_pauseparam = sky2_get_pauseparam,
+	.set_pauseparam = sky2_set_pauseparam,
+	.set_phys_id	= sky2_set_phys_id,
+	.get_sset_count = sky2_get_sset_count,
+	.get_ethtool_stats = sky2_get_ethtool_stats,
+};
+
+#ifdef CONFIG_SKY2_DEBUG
+
+static struct dentry *sky2_debug;
+
+
+/*
+ * Read and parse the first part of Vital Product Data
+ */
+#define VPD_SIZE	128
+#define VPD_MAGIC	0x82
+
+static const struct vpd_tag {
+	char tag[2];
+	char *label;
+} vpd_tags[] = {
+	{ "PN",	"Part Number" },
+	{ "EC", "Engineering Level" },
+	{ "MN", "Manufacturer" },
+	{ "SN", "Serial Number" },
+	{ "YA", "Asset Tag" },
+	{ "VL", "First Error Log Message" },
+	{ "VF", "Second Error Log Message" },
+	{ "VB", "Boot Agent ROM Configuration" },
+	{ "VE", "EFI UNDI Configuration" },
+};
+
+static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
+{
+	size_t vpd_size;
+	loff_t offs;
+	u8 len;
+	unsigned char *buf;
+	u16 reg2;
+
+	reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
+	vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+
+	seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
+	buf = kmalloc(vpd_size, GFP_KERNEL);
+	if (!buf) {
+		seq_puts(seq, "no memory!\n");
+		return;
+	}
+
+	if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
+		seq_puts(seq, "VPD read failed\n");
+		goto out;
+	}
+
+	if (buf[0] != VPD_MAGIC) {
+		seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
+		goto out;
+	}
+	len = buf[1];
+	if (len == 0 || len > vpd_size - 4) {
+		seq_printf(seq, "Invalid id length: %d\n", len);
+		goto out;
+	}
+
+	seq_printf(seq, "%.*s\n", len, buf + 3);
+	offs = len + 3;
+
+	while (offs < vpd_size - 4) {
+		int i;
+
+		if (!memcmp("RW", buf + offs, 2))	/* end marker */
+			break;
+		len = buf[offs + 2];
+		if (offs + len + 3 >= vpd_size)
+			break;
+
+		for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
+			if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
+				seq_printf(seq, " %s: %.*s\n",
+					   vpd_tags[i].label, len, buf + offs + 3);
+				break;
+			}
+		}
+		offs += len + 3;
+	}
+out:
+	kfree(buf);
+}
+
+static int sky2_debug_show(struct seq_file *seq, void *v)
+{
+	struct net_device *dev = seq->private;
+	const struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	unsigned idx, last;
+	int sop;
+
+	sky2_show_vpd(seq, hw);
+
+	seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
+		   sky2_read32(hw, B0_ISRC),
+		   sky2_read32(hw, B0_IMSK),
+		   sky2_read32(hw, B0_Y2_SP_ICR));
+
+	if (!netif_running(dev)) {
+		seq_printf(seq, "network not running\n");
+		return 0;
+	}
+
+	napi_disable(&hw->napi);
+	last = sky2_read16(hw, STAT_PUT_IDX);
+
+	seq_printf(seq, "Status ring %u\n", hw->st_size);
+	if (hw->st_idx == last)
+		seq_puts(seq, "Status ring (empty)\n");
+	else {
+		seq_puts(seq, "Status ring\n");
+		for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+		     idx = RING_NEXT(idx, hw->st_size)) {
+			const struct sky2_status_le *le = hw->st_le + idx;
+			seq_printf(seq, "[%d] %#x %d %#x\n",
+				   idx, le->opcode, le->length, le->status);
+		}
+		seq_puts(seq, "\n");
+	}
+
+	seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
+		   sky2->tx_cons, sky2->tx_prod,
+		   sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+		   sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
+
+	/* Dump contents of tx ring */
+	sop = 1;
+	for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
+	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
+		const struct sky2_tx_le *le = sky2->tx_le + idx;
+		u32 a = le32_to_cpu(le->addr);
+
+		if (sop)
+			seq_printf(seq, "%u:", idx);
+		sop = 0;
+
+		switch (le->opcode & ~HW_OWNER) {
+		case OP_ADDR64:
+			seq_printf(seq, " %#x:", a);
+			break;
+		case OP_LRGLEN:
+			seq_printf(seq, " mtu=%d", a);
+			break;
+		case OP_VLAN:
+			seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
+			break;
+		case OP_TCPLISW:
+			seq_printf(seq, " csum=%#x", a);
+			break;
+		case OP_LARGESEND:
+			seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
+			break;
+		case OP_PACKET:
+			seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
+			break;
+		case OP_BUFFER:
+			seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
+			break;
+		default:
+			seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
+				   a, le16_to_cpu(le->length));
+		}
+
+		if (le->ctrl & EOP) {
+			seq_putc(seq, '\n');
+			sop = 1;
+		}
+	}
+
+	seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
+		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
+		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
+		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
+
+	sky2_read32(hw, B0_Y2_SP_LISR);
+	napi_enable(&hw->napi);
+	return 0;
+}
+
+static int sky2_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sky2_debug_show, inode->i_private);
+}
+
+static const struct file_operations sky2_debug_fops = {
+	.owner		= THIS_MODULE,
+	.open		= sky2_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*
+ * Use network device events to create/remove/rename
+ * debugfs file entries
+ */
+static int sky2_device_event(struct notifier_block *unused,
+			     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_CHANGENAME:
+		if (sky2->debugfs) {
+			sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
+						       sky2_debug, dev->name);
+		}
+		break;
+
+	case NETDEV_GOING_DOWN:
+		if (sky2->debugfs) {
+			netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
+			debugfs_remove(sky2->debugfs);
+			sky2->debugfs = NULL;
+		}
+		break;
+
+	case NETDEV_UP:
+		sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
+						    sky2_debug, dev,
+						    &sky2_debug_fops);
+		if (IS_ERR(sky2->debugfs))
+			sky2->debugfs = NULL;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sky2_notifier = {
+	.notifier_call = sky2_device_event,
+};
+
+
+static __init void sky2_debug_init(void)
+{
+	struct dentry *ent;
+
+	ent = debugfs_create_dir("sky2", NULL);
+	if (!ent || IS_ERR(ent))
+		return;
+
+	sky2_debug = ent;
+	register_netdevice_notifier(&sky2_notifier);
+}
+
+static __exit void sky2_debug_cleanup(void)
+{
+	if (sky2_debug) {
+		unregister_netdevice_notifier(&sky2_notifier);
+		debugfs_remove(sky2_debug);
+		sky2_debug = NULL;
+	}
+}
+
+#else
+#define sky2_debug_init()
+#define sky2_debug_cleanup()
+#endif
+
+/* Two copies of network device operations to handle special case of
+   not allowing netpoll on second port */
+static const struct net_device_ops sky2_netdev_ops[2] = {
+  {
+	.ndo_open		= sky2_open,
+	.ndo_stop		= sky2_close,
+	.ndo_start_xmit		= sky2_xmit_frame,
+	.ndo_do_ioctl		= sky2_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= sky2_set_mac_address,
+	.ndo_set_rx_mode	= sky2_set_multicast,
+	.ndo_change_mtu		= sky2_change_mtu,
+	.ndo_fix_features	= sky2_fix_features,
+	.ndo_set_features	= sky2_set_features,
+	.ndo_tx_timeout		= sky2_tx_timeout,
+	.ndo_get_stats64	= sky2_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= sky2_netpoll,
+#endif
+  },
+  {
+	.ndo_open		= sky2_open,
+	.ndo_stop		= sky2_close,
+	.ndo_start_xmit		= sky2_xmit_frame,
+	.ndo_do_ioctl		= sky2_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= sky2_set_mac_address,
+	.ndo_set_rx_mode	= sky2_set_multicast,
+	.ndo_change_mtu		= sky2_change_mtu,
+	.ndo_fix_features	= sky2_fix_features,
+	.ndo_set_features	= sky2_set_features,
+	.ndo_tx_timeout		= sky2_tx_timeout,
+	.ndo_get_stats64	= sky2_get_stats,
+  },
+};
+
+/* Initialize network device */
+static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
+					   int highmem, int wol)
+{
+	struct sky2_port *sky2;
+	struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+	const void *iap;
+
+	if (!dev)
+		return NULL;
+
+	SET_NETDEV_DEV(dev, &hw->pdev->dev);
+	dev->irq = hw->pdev->irq;
+	dev->ethtool_ops = &sky2_ethtool_ops;
+	dev->watchdog_timeo = TX_WATCHDOG;
+	dev->netdev_ops = &sky2_netdev_ops[port];
+
+	sky2 = netdev_priv(dev);
+	sky2->netdev = dev;
+	sky2->hw = hw;
+	sky2->msg_enable = netif_msg_init(debug, default_msg);
+
+	u64_stats_init(&sky2->tx_stats.syncp);
+	u64_stats_init(&sky2->rx_stats.syncp);
+
+	/* Auto speed and flow control */
+	sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
+	if (hw->chip_id != CHIP_ID_YUKON_XL)
+		dev->hw_features |= NETIF_F_RXCSUM;
+
+	sky2->flow_mode = FC_BOTH;
+
+	sky2->duplex = -1;
+	sky2->speed = -1;
+	sky2->advertising = sky2_supported_modes(hw);
+	sky2->wol = wol;
+
+	spin_lock_init(&sky2->phy_lock);
+
+	sky2->tx_pending = TX_DEF_PENDING;
+	sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
+	sky2->rx_pending = RX_DEF_PENDING;
+
+	hw->dev[port] = dev;
+
+	sky2->port = port;
+
+	dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+
+	if (highmem)
+		dev->features |= NETIF_F_HIGHDMA;
+
+	/* Enable receive hashing unless hardware is known broken */
+	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+		dev->hw_features |= NETIF_F_RXHASH;
+
+	if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
+		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_CTAG_RX;
+		dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+	}
+
+	dev->features |= dev->hw_features;
+
+	/* try to get mac address in the following order:
+	 * 1) from device tree data
+	 * 2) from internal registers set by bootloader
+	 */
+	iap = of_get_mac_address(hw->pdev->dev.of_node);
+	if (iap)
+		memcpy(dev->dev_addr, iap, ETH_ALEN);
+	else
+		memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
+			      ETH_ALEN);
+
+	/* if the address is invalid, use a random value */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		struct sockaddr sa = { AF_UNSPEC };
+
+		netdev_warn(dev,
+			    "Invalid MAC address, defaulting to random\n");
+		eth_hw_addr_random(dev);
+		memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
+		if (sky2_set_mac_address(dev, &sa))
+			netdev_warn(dev, "Failed to set MAC address.\n");
+	}
+
+	return dev;
+}
+
+static void sky2_show_addr(struct net_device *dev)
+{
+	const struct sky2_port *sky2 = netdev_priv(dev);
+
+	netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
+}
+
+/* Handle software interrupt used during MSI test */
+static irqreturn_t sky2_test_intr(int irq, void *dev_id)
+{
+	struct sky2_hw *hw = dev_id;
+	u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+
+	if (status == 0)
+		return IRQ_NONE;
+
+	if (status & Y2_IS_IRQ_SW) {
+		hw->flags |= SKY2_HW_USE_MSI;
+		wake_up(&hw->msi_wait);
+		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+	}
+	sky2_write32(hw, B0_Y2_SP_ICR, 2);
+
+	return IRQ_HANDLED;
+}
+
+/* Test interrupt path by forcing a a software IRQ */
+static int sky2_test_msi(struct sky2_hw *hw)
+{
+	struct pci_dev *pdev = hw->pdev;
+	int err;
+
+	init_waitqueue_head(&hw->msi_wait);
+
+	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
+	if (err) {
+		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+		return err;
+	}
+
+	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
+	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
+	sky2_read8(hw, B0_CTST);
+
+	wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
+
+	if (!(hw->flags & SKY2_HW_USE_MSI)) {
+		/* MSI test failed, go back to INTx mode */
+		dev_info(&pdev->dev, "No interrupt generated using MSI, "
+			 "switching to INTx mode.\n");
+
+		err = -EOPNOTSUPP;
+		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+	}
+
+	sky2_write32(hw, B0_IMSK, 0);
+	sky2_read32(hw, B0_IMSK);
+
+	free_irq(pdev->irq, hw);
+
+	return err;
+}
+
+/* This driver supports yukon2 chipset only */
+static const char *sky2_name(u8 chipid, char *buf, int sz)
+{
+	const char *name[] = {
+		"XL",		/* 0xb3 */
+		"EC Ultra", 	/* 0xb4 */
+		"Extreme",	/* 0xb5 */
+		"EC",		/* 0xb6 */
+		"FE",		/* 0xb7 */
+		"FE+",		/* 0xb8 */
+		"Supreme",	/* 0xb9 */
+		"UL 2",		/* 0xba */
+		"Unknown",	/* 0xbb */
+		"Optima",	/* 0xbc */
+		"OptimaEEE",    /* 0xbd */
+		"Optima 2",	/* 0xbe */
+	};
+
+	if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
+		strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
+	else
+		snprintf(buf, sz, "(chip %#x)", chipid);
+	return buf;
+}
+
+static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *dev, *dev1;
+	struct sky2_hw *hw;
+	int err, using_dac = 0, wol_default;
+	u32 reg;
+	char buf1[16];
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto err_out;
+	}
+
+	/* Get configuration information
+	 * Note: only regular PCI config access once to test for HW issues
+	 *       other PCI access through shared memory for speed and to
+	 *	 avoid MMCONFIG problems.
+	 */
+	err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+	if (err) {
+		dev_err(&pdev->dev, "PCI read config failed\n");
+		goto err_out_disable;
+	}
+
+	if (~reg == 0) {
+		dev_err(&pdev->dev, "PCI configuration read error\n");
+		err = -EIO;
+		goto err_out_disable;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+		goto err_out_disable;
+	}
+
+	pci_set_master(pdev);
+
+	if (sizeof(dma_addr_t) > sizeof(u32) &&
+	    !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+		using_dac = 1;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err < 0) {
+			dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
+				"for consistent allocations\n");
+			goto err_out_free_regions;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
+			goto err_out_free_regions;
+		}
+	}
+
+
+#ifdef __BIG_ENDIAN
+	/* The sk98lin vendor driver uses hardware byte swapping but
+	 * this driver uses software swapping.
+	 */
+	reg &= ~PCI_REV_DESC;
+	err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+	if (err) {
+		dev_err(&pdev->dev, "PCI write config failed\n");
+		goto err_out_free_regions;
+	}
+#endif
+
+	wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
+
+	err = -ENOMEM;
+
+	hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+		     + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
+	if (!hw)
+		goto err_out_free_regions;
+
+	hw->pdev = pdev;
+	sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
+
+	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+	if (!hw->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		goto err_out_free_hw;
+	}
+
+	err = sky2_init(hw);
+	if (err)
+		goto err_out_iounmap;
+
+	/* ring for status responses */
+	hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+	hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+					 &hw->st_dma);
+	if (!hw->st_le) {
+		err = -ENOMEM;
+		goto err_out_reset;
+	}
+
+	dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
+		 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
+
+	sky2_reset(hw);
+
+	dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_out_free_pci;
+	}
+
+	if (!disable_msi && pci_enable_msi(pdev) == 0) {
+		err = sky2_test_msi(hw);
+		if (err) {
+ 			pci_disable_msi(pdev);
+			if (err != -EOPNOTSUPP)
+				goto err_out_free_netdev;
+		}
+ 	}
+
+	netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register net device\n");
+		goto err_out_free_netdev;
+	}
+
+	netif_carrier_off(dev);
+
+	sky2_show_addr(dev);
+
+	if (hw->ports > 1) {
+		dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
+		if (!dev1) {
+			err = -ENOMEM;
+			goto err_out_unregister;
+		}
+
+		err = register_netdev(dev1);
+		if (err) {
+			dev_err(&pdev->dev, "cannot register second net device\n");
+			goto err_out_free_dev1;
+		}
+
+		err = sky2_setup_irq(hw, hw->irq_name);
+		if (err)
+			goto err_out_unregister_dev1;
+
+		sky2_show_addr(dev1);
+	}
+
+	setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+	INIT_WORK(&hw->restart_work, sky2_restart);
+
+	pci_set_drvdata(pdev, hw);
+	pdev->d3_delay = 150;
+
+	return 0;
+
+err_out_unregister_dev1:
+	unregister_netdev(dev1);
+err_out_free_dev1:
+	free_netdev(dev1);
+err_out_unregister:
+	unregister_netdev(dev);
+err_out_free_netdev:
+	if (hw->flags & SKY2_HW_USE_MSI)
+		pci_disable_msi(pdev);
+	free_netdev(dev);
+err_out_free_pci:
+	pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+			    hw->st_le, hw->st_dma);
+err_out_reset:
+	sky2_write8(hw, B0_CTST, CS_RST_SET);
+err_out_iounmap:
+	iounmap(hw->regs);
+err_out_free_hw:
+	kfree(hw);
+err_out_free_regions:
+	pci_release_regions(pdev);
+err_out_disable:
+	pci_disable_device(pdev);
+err_out:
+	return err;
+}
+
+static void sky2_remove(struct pci_dev *pdev)
+{
+	struct sky2_hw *hw = pci_get_drvdata(pdev);
+	int i;
+
+	if (!hw)
+		return;
+
+	del_timer_sync(&hw->watchdog_timer);
+	cancel_work_sync(&hw->restart_work);
+
+	for (i = hw->ports-1; i >= 0; --i)
+		unregister_netdev(hw->dev[i]);
+
+	sky2_write32(hw, B0_IMSK, 0);
+	sky2_read32(hw, B0_IMSK);
+
+	sky2_power_aux(hw);
+
+	sky2_write8(hw, B0_CTST, CS_RST_SET);
+	sky2_read8(hw, B0_CTST);
+
+	if (hw->ports > 1) {
+		napi_disable(&hw->napi);
+		free_irq(pdev->irq, hw);
+	}
+
+	if (hw->flags & SKY2_HW_USE_MSI)
+		pci_disable_msi(pdev);
+	pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+			    hw->st_le, hw->st_dma);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	for (i = hw->ports-1; i >= 0; --i)
+		free_netdev(hw->dev[i]);
+
+	iounmap(hw->regs);
+	kfree(hw);
+}
+
+static int sky2_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sky2_hw *hw = pci_get_drvdata(pdev);
+	int i;
+
+	if (!hw)
+		return 0;
+
+	del_timer_sync(&hw->watchdog_timer);
+	cancel_work_sync(&hw->restart_work);
+
+	rtnl_lock();
+
+	sky2_all_down(hw);
+	for (i = 0; i < hw->ports; i++) {
+		struct net_device *dev = hw->dev[i];
+		struct sky2_port *sky2 = netdev_priv(dev);
+
+		if (sky2->wol)
+			sky2_wol_init(sky2);
+	}
+
+	sky2_power_aux(hw);
+	rtnl_unlock();
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sky2_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sky2_hw *hw = pci_get_drvdata(pdev);
+	int err;
+
+	if (!hw)
+		return 0;
+
+	/* Re-enable all clocks */
+	err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+	if (err) {
+		dev_err(&pdev->dev, "PCI write config failed\n");
+		goto out;
+	}
+
+	rtnl_lock();
+	sky2_reset(hw);
+	sky2_all_up(hw);
+	rtnl_unlock();
+
+	return 0;
+out:
+
+	dev_err(&pdev->dev, "resume failed (%d)\n", err);
+	pci_disable_device(pdev);
+	return err;
+}
+
+static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
+#define SKY2_PM_OPS (&sky2_pm_ops)
+
+#else
+
+#define SKY2_PM_OPS NULL
+#endif
+
+static void sky2_shutdown(struct pci_dev *pdev)
+{
+	struct sky2_hw *hw = pci_get_drvdata(pdev);
+	int port;
+
+	for (port = 0; port < hw->ports; port++) {
+		struct net_device *ndev = hw->dev[port];
+
+		rtnl_lock();
+		if (netif_running(ndev)) {
+			dev_close(ndev);
+			netif_device_detach(ndev);
+		}
+		rtnl_unlock();
+	}
+	sky2_suspend(&pdev->dev);
+	pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+	pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static struct pci_driver sky2_driver = {
+	.name = DRV_NAME,
+	.id_table = sky2_id_table,
+	.probe = sky2_probe,
+	.remove = sky2_remove,
+	.shutdown = sky2_shutdown,
+	.driver.pm = SKY2_PM_OPS,
+};
+
+static int __init sky2_init_module(void)
+{
+	pr_info("driver version " DRV_VERSION "\n");
+
+	sky2_debug_init();
+	return pci_register_driver(&sky2_driver);
+}
+
+static void __exit sky2_cleanup_module(void)
+{
+	pci_unregister_driver(&sky2_driver);
+	sky2_debug_cleanup();
+}
+
+module_init(sky2_init_module);
+module_exit(sky2_cleanup_module);
+
+MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
new file mode 100644
index 0000000..ec6dcd8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -0,0 +1,2432 @@
+/*
+ * Definitions for the new Marvell Yukon 2 driver.
+ */
+#ifndef _SKY2_H
+#define _SKY2_H
+
+#define ETH_JUMBO_MTU		9000	/* Maximum MTU supported */
+
+/* PCI config registers */
+enum {
+	PCI_DEV_REG1	= 0x40,
+	PCI_DEV_REG2	= 0x44,
+	PCI_DEV_STATUS  = 0x7c,
+	PCI_DEV_REG3	= 0x80,
+	PCI_DEV_REG4	= 0x84,
+	PCI_DEV_REG5    = 0x88,
+	PCI_CFG_REG_0	= 0x90,
+	PCI_CFG_REG_1	= 0x94,
+
+	PSM_CONFIG_REG0  = 0x98,
+	PSM_CONFIG_REG1	 = 0x9C,
+	PSM_CONFIG_REG2  = 0x160,
+	PSM_CONFIG_REG3  = 0x164,
+	PSM_CONFIG_REG4  = 0x168,
+
+	PCI_LDO_CTRL    = 0xbc,
+};
+
+/* Yukon-2 */
+enum pci_dev_reg_1 {
+	PCI_Y2_PIG_ENA	 = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
+	PCI_Y2_DLL_DIS	 = 1<<30, /* Disable PCI DLL (YUKON-2) */
+	PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */
+	PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
+	PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
+	PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
+	PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
+	PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
+
+	PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit  9.. 8:	GPHY Link Trigger Timer */
+	PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */
+	PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */
+	PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */
+};
+
+enum pci_dev_reg_2 {
+	PCI_VPD_WR_THR	= 0xffL<<24,	/* Bit 31..24:	VPD Write Threshold */
+	PCI_DEV_SEL	= 0x7fL<<17,	/* Bit 23..17:	EEPROM Device Select */
+	PCI_VPD_ROM_SZ	= 7L<<14,	/* Bit 16..14:	VPD ROM Size	*/
+
+	PCI_PATCH_DIR	= 0xfL<<8,	/* Bit 11.. 8:	Ext Patches dir 3..0 */
+	PCI_EXT_PATCHS	= 0xfL<<4,	/* Bit	7.. 4:	Extended Patches 3..0 */
+	PCI_EN_DUMMY_RD	= 1<<3,		/* Enable Dummy Read */
+	PCI_REV_DESC	= 1<<2,		/* Reverse Desc. Bytes */
+
+	PCI_USEDATA64	= 1<<0,		/* Use 64Bit Data bus ext */
+};
+
+/*	PCI_OUR_REG_3		32 bit	Our Register 3 (Yukon-ECU only) */
+enum pci_dev_reg_3 {
+	P_CLK_ASF_REGS_DIS	= 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
+	P_CLK_COR_REGS_D0_DIS	= 1<<17,/* Disable Clock Core Regs D0 */
+	P_CLK_MACSEC_DIS	= 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
+	P_CLK_PCI_REGS_D0_DIS	= 1<<16,/* Disable Clock PCI  Regs D0 */
+	P_CLK_COR_YTB_ARB_DIS	= 1<<15,/* Disable Clock YTB  Arbiter */
+	P_CLK_MAC_LNK1_D3_DIS	= 1<<14,/* Disable Clock MAC  Link1 D3 */
+	P_CLK_COR_LNK1_D0_DIS	= 1<<13,/* Disable Clock Core Link1 D0 */
+	P_CLK_MAC_LNK1_D0_DIS	= 1<<12,/* Disable Clock MAC  Link1 D0 */
+	P_CLK_COR_LNK1_D3_DIS	= 1<<11,/* Disable Clock Core Link1 D3 */
+	P_CLK_PCI_MST_ARB_DIS	= 1<<10,/* Disable Clock PCI  Master Arb. */
+	P_CLK_COR_REGS_D3_DIS	= 1<<9,	/* Disable Clock Core Regs D3 */
+	P_CLK_PCI_REGS_D3_DIS	= 1<<8,	/* Disable Clock PCI  Regs D3 */
+	P_CLK_REF_LNK1_GM_DIS	= 1<<7,	/* Disable Clock Ref. Link1 GMAC */
+	P_CLK_COR_LNK1_GM_DIS	= 1<<6,	/* Disable Clock Core Link1 GMAC */
+	P_CLK_PCI_COMMON_DIS	= 1<<5,	/* Disable Clock PCI  Common */
+	P_CLK_COR_COMMON_DIS	= 1<<4,	/* Disable Clock Core Common */
+	P_CLK_PCI_LNK1_BMU_DIS	= 1<<3,	/* Disable Clock PCI  Link1 BMU */
+	P_CLK_COR_LNK1_BMU_DIS	= 1<<2,	/* Disable Clock Core Link1 BMU */
+	P_CLK_PCI_LNK1_BIU_DIS	= 1<<1,	/* Disable Clock PCI  Link1 BIU */
+	P_CLK_COR_LNK1_BIU_DIS	= 1<<0,	/* Disable Clock Core Link1 BIU */
+	PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
+				    P_CLK_COR_REGS_D0_DIS |
+				    P_CLK_COR_LNK1_D0_DIS |
+				    P_CLK_MAC_LNK1_D0_DIS |
+				    P_CLK_PCI_MST_ARB_DIS |
+				    P_CLK_COR_COMMON_DIS |
+				    P_CLK_COR_LNK1_BMU_DIS,
+};
+
+/*	PCI_OUR_REG_4		32 bit	Our Register 4 (Yukon-ECU only) */
+enum pci_dev_reg_4 {
+				/* (Link Training & Status State Machine) */
+	P_PEX_LTSSM_STAT_MSK	= 0x7fL<<25,	/* Bit 31..25:	PEX LTSSM Mask */
+#define P_PEX_LTSSM_STAT(x)	((x << 25) & P_PEX_LTSSM_STAT_MSK)
+	P_PEX_LTSSM_L1_STAT	= 0x34,
+	P_PEX_LTSSM_DET_STAT	= 0x01,
+	P_TIMER_VALUE_MSK	= 0xffL<<16,	/* Bit 23..16:	Timer Value Mask */
+					/* (Active State Power Management) */
+	P_FORCE_ASPM_REQUEST	= 1<<15, /* Force ASPM Request (A1 only) */
+	P_ASPM_GPHY_LINK_DOWN	= 1<<14, /* GPHY Link Down (A1 only) */
+	P_ASPM_INT_FIFO_EMPTY	= 1<<13, /* Internal FIFO Empty (A1 only) */
+	P_ASPM_CLKRUN_REQUEST	= 1<<12, /* CLKRUN Request (A1 only) */
+
+	P_ASPM_FORCE_CLKREQ_ENA	= 1<<4,	/* Force CLKREQ Enable (A1b only) */
+	P_ASPM_CLKREQ_PAD_CTL	= 1<<3,	/* CLKREQ PAD Control (A1 only) */
+	P_ASPM_A1_MODE_SELECT	= 1<<2,	/* A1 Mode Select (A1 only) */
+	P_CLK_GATE_PEX_UNIT_ENA	= 1<<1,	/* Enable Gate PEX Unit Clock */
+	P_CLK_GATE_ROOT_COR_ENA	= 1<<0,	/* Enable Gate Root Core Clock */
+	P_ASPM_CONTROL_MSK	= P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
+				  | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
+};
+
+/*	PCI_OUR_REG_5		32 bit	Our Register 5 (Yukon-ECU only) */
+enum pci_dev_reg_5 {
+					/* Bit 31..27:	for A3 & later */
+	P_CTL_DIV_CORE_CLK_ENA	= 1<<31, /* Divide Core Clock Enable */
+	P_CTL_SRESET_VMAIN_AV	= 1<<30, /* Soft Reset for Vmain_av De-Glitch */
+	P_CTL_BYPASS_VMAIN_AV	= 1<<29, /* Bypass En. for Vmain_av De-Glitch */
+	P_CTL_TIM_VMAIN_AV_MSK	= 3<<27, /* Bit 28..27: Timer Vmain_av Mask */
+					 /* Bit 26..16: Release Clock on Event */
+	P_REL_PCIE_RST_DE_ASS	= 1<<26, /* PCIe Reset De-Asserted */
+	P_REL_GPHY_REC_PACKET	= 1<<25, /* GPHY Received Packet */
+	P_REL_INT_FIFO_N_EMPTY	= 1<<24, /* Internal FIFO Not Empty */
+	P_REL_MAIN_PWR_AVAIL	= 1<<23, /* Main Power Available */
+	P_REL_CLKRUN_REQ_REL	= 1<<22, /* CLKRUN Request Release */
+	P_REL_PCIE_RESET_ASS	= 1<<21, /* PCIe Reset Asserted */
+	P_REL_PME_ASSERTED	= 1<<20, /* PME Asserted */
+	P_REL_PCIE_EXIT_L1_ST	= 1<<19, /* PCIe Exit L1 State */
+	P_REL_LOADER_NOT_FIN	= 1<<18, /* EPROM Loader Not Finished */
+	P_REL_PCIE_RX_EX_IDLE	= 1<<17, /* PCIe Rx Exit Electrical Idle State */
+	P_REL_GPHY_LINK_UP	= 1<<16, /* GPHY Link Up */
+
+					/* Bit 10.. 0: Mask for Gate Clock */
+	P_GAT_PCIE_RST_ASSERTED	= 1<<10,/* PCIe Reset Asserted */
+	P_GAT_GPHY_N_REC_PACKET	= 1<<9, /* GPHY Not Received Packet */
+	P_GAT_INT_FIFO_EMPTY	= 1<<8, /* Internal FIFO Empty */
+	P_GAT_MAIN_PWR_N_AVAIL	= 1<<7, /* Main Power Not Available */
+	P_GAT_CLKRUN_REQ_REL	= 1<<6, /* CLKRUN Not Requested */
+	P_GAT_PCIE_RESET_ASS	= 1<<5, /* PCIe Reset Asserted */
+	P_GAT_PME_DE_ASSERTED	= 1<<4, /* PME De-Asserted */
+	P_GAT_PCIE_ENTER_L1_ST	= 1<<3, /* PCIe Enter L1 State */
+	P_GAT_LOADER_FINISHED	= 1<<2, /* EPROM Loader Finished */
+	P_GAT_PCIE_RX_EL_IDLE	= 1<<1, /* PCIe Rx Electrical Idle State */
+	P_GAT_GPHY_LINK_DOWN	= 1<<0,	/* GPHY Link Down */
+
+	PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET |
+				     P_REL_INT_FIFO_N_EMPTY |
+				     P_REL_PCIE_EXIT_L1_ST |
+				     P_REL_PCIE_RX_EX_IDLE |
+				     P_GAT_GPHY_N_REC_PACKET |
+				     P_GAT_INT_FIFO_EMPTY |
+				     P_GAT_PCIE_ENTER_L1_ST |
+				     P_GAT_PCIE_RX_EL_IDLE,
+};
+
+/*	PCI_CFG_REG_1			32 bit	Config Register 1 (Yukon-Ext only) */
+enum pci_cfg_reg1 {
+	P_CF1_DIS_REL_EVT_RST	= 1<<24, /* Dis. Rel. Event during PCIE reset */
+										/* Bit 23..21: Release Clock on Event */
+	P_CF1_REL_LDR_NOT_FIN	= 1<<23, /* EEPROM Loader Not Finished */
+	P_CF1_REL_VMAIN_AVLBL	= 1<<22, /* Vmain available */
+	P_CF1_REL_PCIE_RESET	= 1<<21, /* PCI-E reset */
+										/* Bit 20..18: Gate Clock on Event */
+	P_CF1_GAT_LDR_NOT_FIN	= 1<<20, /* EEPROM Loader Finished */
+	P_CF1_GAT_PCIE_RX_IDLE	= 1<<19, /* PCI-E Rx Electrical idle */
+	P_CF1_GAT_PCIE_RESET	= 1<<18, /* PCI-E Reset */
+	P_CF1_PRST_PHY_CLKREQ	= 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */
+	P_CF1_PCIE_RST_CLKREQ	= 1<<16, /* Enable PCI-E rst generate CLKREQ */
+
+	P_CF1_ENA_CFG_LDR_DONE	= 1<<8, /* Enable core level Config loader done */
+
+	P_CF1_ENA_TXBMU_RD_IDLE	= 1<<1, /* Enable TX BMU Read  IDLE for ASPM */
+	P_CF1_ENA_TXBMU_WR_IDLE	= 1<<0, /* Enable TX BMU Write IDLE for ASPM */
+
+	PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST |
+					P_CF1_REL_LDR_NOT_FIN |
+					P_CF1_REL_VMAIN_AVLBL |
+					P_CF1_REL_PCIE_RESET |
+					P_CF1_GAT_LDR_NOT_FIN |
+					P_CF1_GAT_PCIE_RESET |
+					P_CF1_PRST_PHY_CLKREQ |
+					P_CF1_ENA_CFG_LDR_DONE |
+					P_CF1_ENA_TXBMU_RD_IDLE |
+					P_CF1_ENA_TXBMU_WR_IDLE,
+};
+
+/* Yukon-Optima */
+enum {
+	PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31,   /* AC Present Status */
+
+	PSM_CONFIG_REG1_PTP_CLK_SEL	  = 1<<29,   /* PTP Clock Select */
+	PSM_CONFIG_REG1_PTP_MODE	  = 1<<28,   /* PTP Mode */
+
+	PSM_CONFIG_REG1_MUX_PHY_LINK	  = 1<<27,   /* PHY Energy Detect Event */
+
+	PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26,  /* Enable LED_DUPLEX for ac_present */
+	PSM_CONFIG_REG1_EN_PCIE_TIMER	  = 1<<25,    /* Enable PCIe Timer */
+	PSM_CONFIG_REG1_EN_SPU_TIMER	  = 1<<24,    /* Enable SPU Timer */
+	PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23,  /* AC Present Polarity */
+
+	PSM_CONFIG_REG1_EN_AC_PRESENT	  = 1<<21,    /* Enable AC Present */
+
+	PSM_CONFIG_REG1_EN_GPHY_INT_PSM	= 1<<20,      /* Enable GPHY INT for PSM */
+	PSM_CONFIG_REG1_DIS_PSM_TIMER	= 1<<19,      /* Disable PSM Timer */
+};
+
+/* Yukon-Supreme */
+enum {
+	PSM_CONFIG_REG1_GPHY_ENERGY_STS	= 1<<31, /* GPHY Energy Detect Status */
+
+	PSM_CONFIG_REG1_UART_MODE_MSK	= 3<<29, /* UART_Mode */
+	PSM_CONFIG_REG1_CLK_RUN_ASF	= 1<<28, /* Enable Clock Free Running for ASF Subsystem */
+	PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
+	PSM_CONFIG_REG1_VAUX_ONE	= 1<<26, /* Tie internal Vaux to 1'b1 */
+	PSM_CONFIG_REG1_UART_FC_RI_VAL	= 1<<25, /* Default value for UART_RI_n */
+	PSM_CONFIG_REG1_UART_FC_DCD_VAL	= 1<<24, /* Default value for UART_DCD_n */
+	PSM_CONFIG_REG1_UART_FC_DSR_VAL	= 1<<23, /* Default value for UART_DSR_n */
+	PSM_CONFIG_REG1_UART_FC_CTS_VAL	= 1<<22, /* Default value for UART_CTS_n */
+	PSM_CONFIG_REG1_LATCH_VAUX	= 1<<21, /* Enable Latch current Vaux_avlbl */
+	PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
+	PSM_CONFIG_REG1_UART_RST	= 1<<19, /* UART_RST */
+	PSM_CONFIG_REG1_PSM_PCIE_L1_POL	= 1<<18, /* PCIE L1 Event Polarity for PSM */
+	PSM_CONFIG_REG1_TIMER_STAT	= 1<<17, /* PSM Timer Status */
+	PSM_CONFIG_REG1_GPHY_INT	= 1<<16, /* GPHY INT Status */
+	PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
+	PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
+	PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ	= 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
+	PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK	= 1<<12, /* Disable CLK_GATE control snd_task */
+	PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA	= 1<<11, /* Disable flip-flop chain for sndmsg_inta */
+
+	PSM_CONFIG_REG1_DIS_LOADER	= 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_DO_PWDN		= 1<<8, /* Do Power Down, Start PSM Scheme */
+	PSM_CONFIG_REG1_DIS_PIG		= 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_DIS_PERST	= 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_EN_REG18_PD	= 1<<5, /* Enable REG18 Power Down for PSM */
+	PSM_CONFIG_REG1_EN_PSM_LOAD	= 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_EN_PSM_HOT_RST	= 1<<3, /* Enable PCIe Hot Reset for PSM */
+	PSM_CONFIG_REG1_EN_PSM_PERST	= 1<<2, /* Enable PCIe Reset Event for PSM */
+	PSM_CONFIG_REG1_EN_PSM_PCIE_L1	= 1<<1, /* Enable PCIe L1 Event for PSM */
+	PSM_CONFIG_REG1_EN_PSM		= 1<<0, /* Enable PSM Scheme */
+};
+
+/*	PSM_CONFIG_REG4				0x0168	PSM Config Register 4 */
+enum {
+						/* PHY Link Detect Timer */
+	PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
+	PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
+
+	PSM_CONFIG_REG4_DEBUG_TIMER	    = 1<<1, /* Debug Timer */
+	PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
+};
+
+
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+			       PCI_STATUS_SIG_SYSTEM_ERROR | \
+			       PCI_STATUS_REC_MASTER_ABORT | \
+			       PCI_STATUS_REC_TARGET_ABORT | \
+			       PCI_STATUS_PARITY)
+
+enum csr_regs {
+	B0_RAP		= 0x0000,
+	B0_CTST		= 0x0004,
+
+	B0_POWER_CTRL	= 0x0007,
+	B0_ISRC		= 0x0008,
+	B0_IMSK		= 0x000c,
+	B0_HWE_ISRC	= 0x0010,
+	B0_HWE_IMSK	= 0x0014,
+
+	/* Special ISR registers (Yukon-2 only) */
+	B0_Y2_SP_ISRC2	= 0x001c,
+	B0_Y2_SP_ISRC3	= 0x0020,
+	B0_Y2_SP_EISR	= 0x0024,
+	B0_Y2_SP_LISR	= 0x0028,
+	B0_Y2_SP_ICR	= 0x002c,
+
+	B2_MAC_1	= 0x0100,
+	B2_MAC_2	= 0x0108,
+	B2_MAC_3	= 0x0110,
+	B2_CONN_TYP	= 0x0118,
+	B2_PMD_TYP	= 0x0119,
+	B2_MAC_CFG	= 0x011a,
+	B2_CHIP_ID	= 0x011b,
+	B2_E_0		= 0x011c,
+
+	B2_Y2_CLK_GATE  = 0x011d,
+	B2_Y2_HW_RES	= 0x011e,
+	B2_E_3		= 0x011f,
+	B2_Y2_CLK_CTRL	= 0x0120,
+
+	B2_TI_INI	= 0x0130,
+	B2_TI_VAL	= 0x0134,
+	B2_TI_CTRL	= 0x0138,
+	B2_TI_TEST	= 0x0139,
+
+	B2_TST_CTRL1	= 0x0158,
+	B2_TST_CTRL2	= 0x0159,
+	B2_GP_IO	= 0x015c,
+
+	B2_I2C_CTRL	= 0x0160,
+	B2_I2C_DATA	= 0x0164,
+	B2_I2C_IRQ	= 0x0168,
+	B2_I2C_SW	= 0x016c,
+
+	Y2_PEX_PHY_DATA = 0x0170,
+	Y2_PEX_PHY_ADDR = 0x0172,
+
+	B3_RAM_ADDR	= 0x0180,
+	B3_RAM_DATA_LO	= 0x0184,
+	B3_RAM_DATA_HI	= 0x0188,
+
+/* RAM Interface Registers */
+/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
+/*
+ * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
+ * not usable in SW. Please notice these are NOT real timeouts, these are
+ * the number of qWords transferred continuously.
+ */
+#define RAM_BUFFER(port, reg)	(reg | (port <<6))
+
+	B3_RI_WTO_R1	= 0x0190,
+	B3_RI_WTO_XA1	= 0x0191,
+	B3_RI_WTO_XS1	= 0x0192,
+	B3_RI_RTO_R1	= 0x0193,
+	B3_RI_RTO_XA1	= 0x0194,
+	B3_RI_RTO_XS1	= 0x0195,
+	B3_RI_WTO_R2	= 0x0196,
+	B3_RI_WTO_XA2	= 0x0197,
+	B3_RI_WTO_XS2	= 0x0198,
+	B3_RI_RTO_R2	= 0x0199,
+	B3_RI_RTO_XA2	= 0x019a,
+	B3_RI_RTO_XS2	= 0x019b,
+	B3_RI_TO_VAL	= 0x019c,
+	B3_RI_CTRL	= 0x01a0,
+	B3_RI_TEST	= 0x01a2,
+	B3_MA_TOINI_RX1	= 0x01b0,
+	B3_MA_TOINI_RX2	= 0x01b1,
+	B3_MA_TOINI_TX1	= 0x01b2,
+	B3_MA_TOINI_TX2	= 0x01b3,
+	B3_MA_TOVAL_RX1	= 0x01b4,
+	B3_MA_TOVAL_RX2	= 0x01b5,
+	B3_MA_TOVAL_TX1	= 0x01b6,
+	B3_MA_TOVAL_TX2	= 0x01b7,
+	B3_MA_TO_CTRL	= 0x01b8,
+	B3_MA_TO_TEST	= 0x01ba,
+	B3_MA_RCINI_RX1	= 0x01c0,
+	B3_MA_RCINI_RX2	= 0x01c1,
+	B3_MA_RCINI_TX1	= 0x01c2,
+	B3_MA_RCINI_TX2	= 0x01c3,
+	B3_MA_RCVAL_RX1	= 0x01c4,
+	B3_MA_RCVAL_RX2	= 0x01c5,
+	B3_MA_RCVAL_TX1	= 0x01c6,
+	B3_MA_RCVAL_TX2	= 0x01c7,
+	B3_MA_RC_CTRL	= 0x01c8,
+	B3_MA_RC_TEST	= 0x01ca,
+	B3_PA_TOINI_RX1	= 0x01d0,
+	B3_PA_TOINI_RX2	= 0x01d4,
+	B3_PA_TOINI_TX1	= 0x01d8,
+	B3_PA_TOINI_TX2	= 0x01dc,
+	B3_PA_TOVAL_RX1	= 0x01e0,
+	B3_PA_TOVAL_RX2	= 0x01e4,
+	B3_PA_TOVAL_TX1	= 0x01e8,
+	B3_PA_TOVAL_TX2	= 0x01ec,
+	B3_PA_CTRL	= 0x01f0,
+	B3_PA_TEST	= 0x01f2,
+
+	Y2_CFG_SPC	= 0x1c00,	/* PCI config space region */
+	Y2_CFG_AER      = 0x1d00,	/* PCI Advanced Error Report region */
+};
+
+/*	B0_CTST			24 bit	Control/Status register */
+enum {
+	Y2_VMAIN_AVAIL	= 1<<17,/* VMAIN available (YUKON-2 only) */
+	Y2_VAUX_AVAIL	= 1<<16,/* VAUX available (YUKON-2 only) */
+	Y2_HW_WOL_ON	= 1<<15,/* HW WOL On  (Yukon-EC Ultra A1 only) */
+	Y2_HW_WOL_OFF	= 1<<14,/* HW WOL On  (Yukon-EC Ultra A1 only) */
+	Y2_ASF_ENABLE	= 1<<13,/* ASF Unit Enable (YUKON-2 only) */
+	Y2_ASF_DISABLE	= 1<<12,/* ASF Unit Disable (YUKON-2 only) */
+	Y2_CLK_RUN_ENA	= 1<<11,/* CLK_RUN Enable  (YUKON-2 only) */
+	Y2_CLK_RUN_DIS	= 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
+	Y2_LED_STAT_ON	= 1<<9, /* Status LED On  (YUKON-2 only) */
+	Y2_LED_STAT_OFF	= 1<<8, /* Status LED Off (YUKON-2 only) */
+
+	CS_ST_SW_IRQ	= 1<<7,	/* Set IRQ SW Request */
+	CS_CL_SW_IRQ	= 1<<6,	/* Clear IRQ SW Request */
+	CS_STOP_DONE	= 1<<5,	/* Stop Master is finished */
+	CS_STOP_MAST	= 1<<4,	/* Command Bit to stop the master */
+	CS_MRST_CLR	= 1<<3,	/* Clear Master reset	*/
+	CS_MRST_SET	= 1<<2,	/* Set Master reset	*/
+	CS_RST_CLR	= 1<<1,	/* Clear Software reset	*/
+	CS_RST_SET	= 1,	/* Set   Software reset	*/
+};
+
+/*	B0_POWER_CTRL	 8 Bit	Power Control reg (YUKON only) */
+enum {
+	PC_VAUX_ENA	= 1<<7,	/* Switch VAUX Enable  */
+	PC_VAUX_DIS	= 1<<6,	/* Switch VAUX Disable */
+	PC_VCC_ENA	= 1<<5,	/* Switch VCC Enable  */
+	PC_VCC_DIS	= 1<<4,	/* Switch VCC Disable */
+	PC_VAUX_ON	= 1<<3,	/* Switch VAUX On  */
+	PC_VAUX_OFF	= 1<<2,	/* Switch VAUX Off */
+	PC_VCC_ON	= 1<<1,	/* Switch VCC On  */
+	PC_VCC_OFF	= 1<<0,	/* Switch VCC Off */
+};
+
+/*	B2_IRQM_MSK 	32 bit	IRQ Moderation Mask */
+
+/*	B0_Y2_SP_ISRC2	32 bit	Special Interrupt Source Reg 2 */
+/*	B0_Y2_SP_ISRC3	32 bit	Special Interrupt Source Reg 3 */
+/*	B0_Y2_SP_EISR	32 bit	Enter ISR Reg */
+/*	B0_Y2_SP_LISR	32 bit	Leave ISR Reg */
+enum {
+	Y2_IS_HW_ERR	= 1<<31,	/* Interrupt HW Error */
+	Y2_IS_STAT_BMU	= 1<<30,	/* Status BMU Interrupt */
+	Y2_IS_ASF	= 1<<29,	/* ASF subsystem Interrupt */
+	Y2_IS_CPU_TO	= 1<<28,	/* CPU Timeout */
+	Y2_IS_POLL_CHK	= 1<<27,	/* Check IRQ from polling unit */
+	Y2_IS_TWSI_RDY	= 1<<26,	/* IRQ on end of TWSI Tx */
+	Y2_IS_IRQ_SW	= 1<<25,	/* SW forced IRQ	*/
+	Y2_IS_TIMINT	= 1<<24,	/* IRQ from Timer	*/
+
+	Y2_IS_IRQ_PHY2	= 1<<12,	/* Interrupt from PHY 2 */
+	Y2_IS_IRQ_MAC2	= 1<<11,	/* Interrupt from MAC 2 */
+	Y2_IS_CHK_RX2	= 1<<10,	/* Descriptor error Rx 2 */
+	Y2_IS_CHK_TXS2	= 1<<9,		/* Descriptor error TXS 2 */
+	Y2_IS_CHK_TXA2	= 1<<8,		/* Descriptor error TXA 2 */
+
+	Y2_IS_PSM_ACK	= 1<<7,		/* PSM Acknowledge (Yukon-Optima only) */
+	Y2_IS_PTP_TIST	= 1<<6,		/* PTP Time Stamp (Yukon-Optima only) */
+	Y2_IS_PHY_QLNK	= 1<<5,		/* PHY Quick Link (Yukon-Optima only) */
+
+	Y2_IS_IRQ_PHY1	= 1<<4,		/* Interrupt from PHY 1 */
+	Y2_IS_IRQ_MAC1	= 1<<3,		/* Interrupt from MAC 1 */
+	Y2_IS_CHK_RX1	= 1<<2,		/* Descriptor error Rx 1 */
+	Y2_IS_CHK_TXS1	= 1<<1,		/* Descriptor error TXS 1 */
+	Y2_IS_CHK_TXA1	= 1<<0,		/* Descriptor error TXA 1 */
+
+	Y2_IS_BASE	= Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
+	Y2_IS_PORT_1	= Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
+		          | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
+	Y2_IS_PORT_2	= Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
+			  | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+	Y2_IS_ERROR     = Y2_IS_HW_ERR |
+			  Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
+			  Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+};
+
+/*	B2_IRQM_HWE_MSK	32 bit	IRQ Moderation HW Error Mask */
+enum {
+	IS_ERR_MSK	= 0x00003fff,/* 		All Error bits */
+
+	IS_IRQ_TIST_OV	= 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+	IS_IRQ_SENSOR	= 1<<12, /* IRQ from Sensor (YUKON only) */
+	IS_IRQ_MST_ERR	= 1<<11, /* IRQ master error detected */
+	IS_IRQ_STAT	= 1<<10, /* IRQ status exception */
+	IS_NO_STAT_M1	= 1<<9,	/* No Rx Status from MAC 1 */
+	IS_NO_STAT_M2	= 1<<8,	/* No Rx Status from MAC 2 */
+	IS_NO_TIST_M1	= 1<<7,	/* No Time Stamp from MAC 1 */
+	IS_NO_TIST_M2	= 1<<6,	/* No Time Stamp from MAC 2 */
+	IS_RAM_RD_PAR	= 1<<5,	/* RAM Read  Parity Error */
+	IS_RAM_WR_PAR	= 1<<4,	/* RAM Write Parity Error */
+	IS_M1_PAR_ERR	= 1<<3,	/* MAC 1 Parity Error */
+	IS_M2_PAR_ERR	= 1<<2,	/* MAC 2 Parity Error */
+	IS_R1_PAR_ERR	= 1<<1,	/* Queue R1 Parity Error */
+	IS_R2_PAR_ERR	= 1<<0,	/* Queue R2 Parity Error */
+};
+
+/* Hardware error interrupt mask for Yukon 2 */
+enum {
+	Y2_IS_TIST_OV	= 1<<29,/* Time Stamp Timer overflow interrupt */
+	Y2_IS_SENSOR	= 1<<28, /* Sensor interrupt */
+	Y2_IS_MST_ERR	= 1<<27, /* Master error interrupt */
+	Y2_IS_IRQ_STAT	= 1<<26, /* Status exception interrupt */
+	Y2_IS_PCI_EXP	= 1<<25, /* PCI-Express interrupt */
+	Y2_IS_PCI_NEXP	= 1<<24, /* PCI-Express error similar to PCI error */
+						/* Link 2 */
+	Y2_IS_PAR_RD2	= 1<<13, /* Read RAM parity error interrupt */
+	Y2_IS_PAR_WR2	= 1<<12, /* Write RAM parity error interrupt */
+	Y2_IS_PAR_MAC2	= 1<<11, /* MAC hardware fault interrupt */
+	Y2_IS_PAR_RX2	= 1<<10, /* Parity Error Rx Queue 2 */
+	Y2_IS_TCP_TXS2	= 1<<9, /* TCP length mismatch sync Tx queue IRQ */
+	Y2_IS_TCP_TXA2	= 1<<8, /* TCP length mismatch async Tx queue IRQ */
+						/* Link 1 */
+	Y2_IS_PAR_RD1	= 1<<5, /* Read RAM parity error interrupt */
+	Y2_IS_PAR_WR1	= 1<<4, /* Write RAM parity error interrupt */
+	Y2_IS_PAR_MAC1	= 1<<3, /* MAC hardware fault interrupt */
+	Y2_IS_PAR_RX1	= 1<<2, /* Parity Error Rx Queue 1 */
+	Y2_IS_TCP_TXS1	= 1<<1, /* TCP length mismatch sync Tx queue IRQ */
+	Y2_IS_TCP_TXA1	= 1<<0, /* TCP length mismatch async Tx queue IRQ */
+
+	Y2_HWE_L1_MASK	= Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
+			  Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
+	Y2_HWE_L2_MASK	= Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
+			  Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
+
+	Y2_HWE_ALL_MASK	= Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
+			  Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
+};
+
+/*	B28_DPT_CTRL	 8 bit	Descriptor Poll Timer Ctrl Reg */
+enum {
+	DPT_START	= 1<<1,
+	DPT_STOP	= 1<<0,
+};
+
+/*	B2_TST_CTRL1	 8 bit	Test Control Register 1 */
+enum {
+	TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+	TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+	TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+	TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+	TST_FRC_APERR_M	 = 1<<3, /* force ADDRPERR on MST */
+	TST_FRC_APERR_T	 = 1<<2, /* force ADDRPERR on TRG */
+	TST_CFG_WRITE_ON = 1<<1, /* Enable  Config Reg WR */
+	TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/* 	B2_GPIO */
+enum {
+	GLB_GPIO_CLK_DEB_ENA = 1<<31,	/* Clock Debug Enable */
+	GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */
+
+	GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */
+	GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */
+	GLB_GPIO_STAT_RACE_DIS	= 1<<13, /* Status Race Disable */
+	GLB_GPIO_TEST_SEL_MSK	= 3<<11, /* Testmode Select */
+	GLB_GPIO_TEST_SEL_BASE	= 1<<11,
+	GLB_GPIO_RAND_ENA	= 1<<10, /* Random Enable */
+	GLB_GPIO_RAND_BIT_1	= 1<<9,  /* Random Bit 1 */
+};
+
+/*	B2_MAC_CFG		 8 bit	MAC Configuration / Chip Revision */
+enum {
+	CFG_CHIP_R_MSK	  = 0xf<<4,	/* Bit 7.. 4: Chip Revision */
+					/* Bit 3.. 2:	reserved */
+	CFG_DIS_M2_CLK	  = 1<<1,	/* Disable Clock for 2nd MAC */
+	CFG_SNG_MAC	  = 1<<0,	/* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/*	B2_CHIP_ID		 8 bit 	Chip Identification Number */
+enum {
+	CHIP_ID_YUKON_XL   = 0xb3, /* YUKON-2 XL */
+	CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */
+	CHIP_ID_YUKON_EX   = 0xb5, /* YUKON-2 Extreme */
+	CHIP_ID_YUKON_EC   = 0xb6, /* YUKON-2 EC */
+ 	CHIP_ID_YUKON_FE   = 0xb7, /* YUKON-2 FE */
+ 	CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
+	CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
+	CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
+	CHIP_ID_YUKON_OPT  = 0xbc, /* YUKON-2 Optima */
+	CHIP_ID_YUKON_PRM  = 0xbd, /* YUKON-2 Optima Prime */
+	CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */
+};
+
+enum yukon_xl_rev {
+	CHIP_REV_YU_XL_A0  = 0,
+	CHIP_REV_YU_XL_A1  = 1,
+	CHIP_REV_YU_XL_A2  = 2,
+	CHIP_REV_YU_XL_A3  = 3,
+};
+
+enum yukon_ec_rev {
+	CHIP_REV_YU_EC_A1    = 0,  /* Chip Rev. for Yukon-EC A1/A0 */
+	CHIP_REV_YU_EC_A2    = 1,  /* Chip Rev. for Yukon-EC A2 */
+	CHIP_REV_YU_EC_A3    = 2,  /* Chip Rev. for Yukon-EC A3 */
+};
+enum yukon_ec_u_rev {
+	CHIP_REV_YU_EC_U_A0  = 1,
+	CHIP_REV_YU_EC_U_A1  = 2,
+	CHIP_REV_YU_EC_U_B0  = 3,
+	CHIP_REV_YU_EC_U_B1  = 5,
+};
+enum yukon_fe_rev {
+	CHIP_REV_YU_FE_A1    = 1,
+	CHIP_REV_YU_FE_A2    = 2,
+};
+enum yukon_fe_p_rev {
+	CHIP_REV_YU_FE2_A0   = 0,
+};
+enum yukon_ex_rev {
+	CHIP_REV_YU_EX_A0    = 1,
+	CHIP_REV_YU_EX_B0    = 2,
+};
+enum yukon_supr_rev {
+	CHIP_REV_YU_SU_A0    = 0,
+	CHIP_REV_YU_SU_B0    = 1,
+	CHIP_REV_YU_SU_B1    = 3,
+};
+
+enum yukon_prm_rev {
+	CHIP_REV_YU_PRM_Z1   = 1,
+	CHIP_REV_YU_PRM_A0   = 2,
+};
+
+/*	B2_Y2_CLK_GATE	 8 bit	Clock Gating (Yukon-2 only) */
+enum {
+	Y2_STATUS_LNK2_INAC	= 1<<7, /* Status Link 2 inactive (0 = active) */
+	Y2_CLK_GAT_LNK2_DIS	= 1<<6, /* Disable clock gating Link 2 */
+	Y2_COR_CLK_LNK2_DIS	= 1<<5, /* Disable Core clock Link 2 */
+	Y2_PCI_CLK_LNK2_DIS	= 1<<4, /* Disable PCI clock Link 2 */
+	Y2_STATUS_LNK1_INAC	= 1<<3, /* Status Link 1 inactive (0 = active) */
+	Y2_CLK_GAT_LNK1_DIS	= 1<<2, /* Disable clock gating Link 1 */
+	Y2_COR_CLK_LNK1_DIS	= 1<<1, /* Disable Core clock Link 1 */
+	Y2_PCI_CLK_LNK1_DIS	= 1<<0, /* Disable PCI clock Link 1 */
+};
+
+/*	B2_Y2_HW_RES	8 bit	HW Resources (Yukon-2 only) */
+enum {
+	CFG_LED_MODE_MSK	= 7<<2,	/* Bit  4.. 2:	LED Mode Mask */
+	CFG_LINK_2_AVAIL	= 1<<1,	/* Link 2 available */
+	CFG_LINK_1_AVAIL	= 1<<0,	/* Link 1 available */
+};
+#define CFG_LED_MODE(x)		(((x) & CFG_LED_MODE_MSK) >> 2)
+#define CFG_DUAL_MAC_MSK	(CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
+
+
+/* B2_Y2_CLK_CTRL	32 bit	Clock Frequency Control Register (Yukon-2/EC) */
+enum {
+	Y2_CLK_DIV_VAL_MSK	= 0xff<<16,/* Bit 23..16: Clock Divisor Value */
+#define	Y2_CLK_DIV_VAL(x)	(((x)<<16) & Y2_CLK_DIV_VAL_MSK)
+	Y2_CLK_DIV_VAL2_MSK	= 7<<21,   /* Bit 23..21: Clock Divisor Value */
+	Y2_CLK_SELECT2_MSK	= 0x1f<<16,/* Bit 20..16: Clock Select */
+#define Y2_CLK_DIV_VAL_2(x)	(((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
+#define Y2_CLK_SEL_VAL_2(x)	(((x)<<16) & Y2_CLK_SELECT2_MSK)
+	Y2_CLK_DIV_ENA		= 1<<1, /* Enable  Core Clock Division */
+	Y2_CLK_DIV_DIS		= 1<<0,	/* Disable Core Clock Division */
+};
+
+/*	B2_TI_CTRL		 8 bit	Timer control */
+/*	B2_IRQM_CTRL	 8 bit	IRQ Moderation Timer Control */
+enum {
+	TIM_START	= 1<<2,	/* Start Timer */
+	TIM_STOP	= 1<<1,	/* Stop  Timer */
+	TIM_CLR_IRQ	= 1<<0,	/* Clear Timer IRQ (!IRQM) */
+};
+
+/*	B2_TI_TEST		 8 Bit	Timer Test */
+/*	B2_IRQM_TEST	 8 bit	IRQ Moderation Timer Test */
+/*	B28_DPT_TST		 8 bit	Descriptor Poll Timer Test Reg */
+enum {
+	TIM_T_ON	= 1<<2,	/* Test mode on */
+	TIM_T_OFF	= 1<<1,	/* Test mode off */
+	TIM_T_STEP	= 1<<0,	/* Test step */
+};
+
+/*	Y2_PEX_PHY_ADDR/DATA		PEX PHY address and data reg  (Yukon-2 only) */
+enum {
+	PEX_RD_ACCESS	= 1<<31, /* Access Mode Read = 1, Write = 0 */
+	PEX_DB_ACCESS	= 1<<30, /* Access to debug register */
+};
+
+/*	B3_RAM_ADDR		32 bit	RAM Address, to read or write */
+					/* Bit 31..19:	reserved */
+#define RAM_ADR_RAN	0x0007ffffL	/* Bit 18.. 0:	RAM Address Range */
+/* RAM Interface Registers */
+
+/*	B3_RI_CTRL		16 bit	RAM Interface Control Register */
+enum {
+	RI_CLR_RD_PERR	= 1<<9,	/* Clear IRQ RAM Read Parity Err */
+	RI_CLR_WR_PERR	= 1<<8,	/* Clear IRQ RAM Write Parity Err*/
+
+	RI_RST_CLR	= 1<<1,	/* Clear RAM Interface Reset */
+	RI_RST_SET	= 1<<0,	/* Set   RAM Interface Reset */
+};
+
+#define SK_RI_TO_53	36		/* RAM interface timeout */
+
+
+/* Port related registers FIFO, and Arbiter */
+#define SK_REG(port,reg)	(((port)<<7)+(reg))
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/*	TXA_ITI_INI		32 bit	Tx Arb Interval Timer Init Val */
+/*	TXA_ITI_VAL		32 bit	Tx Arb Interval Timer Value */
+/*	TXA_LIM_INI		32 bit	Tx Arb Limit Counter Init Val */
+/*	TXA_LIM_VAL		32 bit	Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL	0x00ffffffUL	/* Bit 23.. 0:	Max TXA Timer/Cnt Val */
+
+/*	TXA_CTRL		 8 bit	Tx Arbiter Control Register */
+enum {
+	TXA_ENA_FSYNC	= 1<<7,	/* Enable  force of sync Tx queue */
+	TXA_DIS_FSYNC	= 1<<6,	/* Disable force of sync Tx queue */
+	TXA_ENA_ALLOC	= 1<<5,	/* Enable  alloc of free bandwidth */
+	TXA_DIS_ALLOC	= 1<<4,	/* Disable alloc of free bandwidth */
+	TXA_START_RC	= 1<<3,	/* Start sync Rate Control */
+	TXA_STOP_RC	= 1<<2,	/* Stop  sync Rate Control */
+	TXA_ENA_ARB	= 1<<1,	/* Enable  Tx Arbiter */
+	TXA_DIS_ARB	= 1<<0,	/* Disable Tx Arbiter */
+};
+
+/*
+ *	Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+	TXA_ITI_INI	= 0x0200,/* 32 bit	Tx Arb Interval Timer Init Val*/
+	TXA_ITI_VAL	= 0x0204,/* 32 bit	Tx Arb Interval Timer Value */
+	TXA_LIM_INI	= 0x0208,/* 32 bit	Tx Arb Limit Counter Init Val */
+	TXA_LIM_VAL	= 0x020c,/* 32 bit	Tx Arb Limit Counter Value */
+	TXA_CTRL	= 0x0210,/*  8 bit	Tx Arbiter Control Register */
+	TXA_TEST	= 0x0211,/*  8 bit	Tx Arbiter Test Register */
+	TXA_STAT	= 0x0212,/*  8 bit	Tx Arbiter Status Register */
+
+	RSS_KEY		= 0x0220, /* RSS Key setup */
+	RSS_CFG		= 0x0248, /* RSS Configuration */
+};
+
+enum {
+	HASH_TCP_IPV6_EX_CTRL	= 1<<5,
+	HASH_IPV6_EX_CTRL	= 1<<4,
+	HASH_TCP_IPV6_CTRL	= 1<<3,
+	HASH_IPV6_CTRL		= 1<<2,
+	HASH_TCP_IPV4_CTRL	= 1<<1,
+	HASH_IPV4_CTRL		= 1<<0,
+
+	HASH_ALL		= 0x3f,
+};
+
+enum {
+	B6_EXT_REG	= 0x0300,/* External registers (GENESIS only) */
+	B7_CFG_SPC	= 0x0380,/* copy of the Configuration register */
+	B8_RQ1_REGS	= 0x0400,/* Receive Queue 1 */
+	B8_RQ2_REGS	= 0x0480,/* Receive Queue 2 */
+	B8_TS1_REGS	= 0x0600,/* Transmit sync queue 1 */
+	B8_TA1_REGS	= 0x0680,/* Transmit async queue 1 */
+	B8_TS2_REGS	= 0x0700,/* Transmit sync queue 2 */
+	B8_TA2_REGS	= 0x0780,/* Transmit sync queue 2 */
+	B16_RAM_REGS	= 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+	B8_Q_REGS = 0x0400, /* base of Queue registers */
+	Q_D	= 0x00,	/* 8*32	bit	Current Descriptor */
+	Q_VLAN  = 0x20, /* 16 bit	Current VLAN Tag */
+	Q_DONE	= 0x24,	/* 16 bit	Done Index */
+	Q_AC_L	= 0x28,	/* 32 bit	Current Address Counter Low dWord */
+	Q_AC_H	= 0x2c,	/* 32 bit	Current Address Counter High dWord */
+	Q_BC	= 0x30,	/* 32 bit	Current Byte Counter */
+	Q_CSR	= 0x34,	/* 32 bit	BMU Control/Status Register */
+	Q_TEST	= 0x38,	/* 32 bit	Test/Control Register */
+
+/* Yukon-2 */
+	Q_WM	= 0x40,	/* 16 bit	FIFO Watermark */
+	Q_AL	= 0x42,	/*  8 bit	FIFO Alignment */
+	Q_RSP	= 0x44,	/* 16 bit	FIFO Read Shadow Pointer */
+	Q_RSL	= 0x46,	/*  8 bit	FIFO Read Shadow Level */
+	Q_RP	= 0x48,	/*  8 bit	FIFO Read Pointer */
+	Q_RL	= 0x4a,	/*  8 bit	FIFO Read Level */
+	Q_WP	= 0x4c,	/*  8 bit	FIFO Write Pointer */
+	Q_WSP	= 0x4d,	/*  8 bit	FIFO Write Shadow Pointer */
+	Q_WL	= 0x4e,	/*  8 bit	FIFO Write Level */
+	Q_WSL	= 0x4f,	/*  8 bit	FIFO Write Shadow Level */
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+/*	Q_TEST				32 bit	Test Register */
+enum {
+	/* Transmit */
+	F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */
+	F_TX_CHK_AUTO_ON  = 1<<30, /* Tx checksum auto calc off (Yukon EX) */
+
+	/* Receive */
+	F_M_RX_RAM_DIS	= 1<<24, /* MAC Rx RAM Read Port disable */
+
+	/* Hardware testbits not used */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+enum {
+	Y2_B8_PREF_REGS		= 0x0450,
+
+	PREF_UNIT_CTRL		= 0x00,	/* 32 bit	Control register */
+	PREF_UNIT_LAST_IDX	= 0x04,	/* 16 bit	Last Index */
+	PREF_UNIT_ADDR_LO	= 0x08,	/* 32 bit	List start addr, low part */
+	PREF_UNIT_ADDR_HI	= 0x0c,	/* 32 bit	List start addr, high part*/
+	PREF_UNIT_GET_IDX	= 0x10,	/* 16 bit	Get Index */
+	PREF_UNIT_PUT_IDX	= 0x14,	/* 16 bit	Put Index */
+	PREF_UNIT_FIFO_WP	= 0x20,	/*  8 bit	FIFO write pointer */
+	PREF_UNIT_FIFO_RP	= 0x24,	/*  8 bit	FIFO read pointer */
+	PREF_UNIT_FIFO_WM	= 0x28,	/*  8 bit	FIFO watermark */
+	PREF_UNIT_FIFO_LEV	= 0x2c,	/*  8 bit	FIFO level */
+
+	PREF_UNIT_MASK_IDX	= 0x0fff,
+};
+#define Y2_QADDR(q,reg)		(Y2_B8_PREF_REGS + (q) + (reg))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+	RB_START	= 0x00,/* 32 bit	RAM Buffer Start Address */
+	RB_END	= 0x04,/* 32 bit	RAM Buffer End Address */
+	RB_WP	= 0x08,/* 32 bit	RAM Buffer Write Pointer */
+	RB_RP	= 0x0c,/* 32 bit	RAM Buffer Read Pointer */
+	RB_RX_UTPP	= 0x10,/* 32 bit	Rx Upper Threshold, Pause Packet */
+	RB_RX_LTPP	= 0x14,/* 32 bit	Rx Lower Threshold, Pause Packet */
+	RB_RX_UTHP	= 0x18,/* 32 bit	Rx Upper Threshold, High Prio */
+	RB_RX_LTHP	= 0x1c,/* 32 bit	Rx Lower Threshold, High Prio */
+	/* 0x10 - 0x1f:	reserved at Tx RAM Buffer Registers */
+	RB_PC	= 0x20,/* 32 bit	RAM Buffer Packet Counter */
+	RB_LEV	= 0x24,/* 32 bit	RAM Buffer Level Register */
+	RB_CTRL	= 0x28,/* 32 bit	RAM Buffer Control Register */
+	RB_TST1	= 0x29,/*  8 bit	RAM Buffer Test Register 1 */
+	RB_TST2	= 0x2a,/*  8 bit	RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+	Q_R1	= 0x0000,	/* Receive Queue 1 */
+	Q_R2	= 0x0080,	/* Receive Queue 2 */
+	Q_XS1	= 0x0200,	/* Synchronous Transmit Queue 1 */
+	Q_XA1	= 0x0280,	/* Asynchronous Transmit Queue 1 */
+	Q_XS2	= 0x0300,	/* Synchronous Transmit Queue 2 */
+	Q_XA2	= 0x0380,	/* Asynchronous Transmit Queue 2 */
+};
+
+/* Different PHY Types */
+enum {
+	PHY_ADDR_MARV	= 0,
+};
+
+#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
+
+
+enum {
+	LNK_SYNC_INI	= 0x0c30,/* 32 bit	Link Sync Cnt Init Value */
+	LNK_SYNC_VAL	= 0x0c34,/* 32 bit	Link Sync Cnt Current Value */
+	LNK_SYNC_CTRL	= 0x0c38,/*  8 bit	Link Sync Cnt Control Register */
+	LNK_SYNC_TST	= 0x0c39,/*  8 bit	Link Sync Cnt Test Register */
+
+	LNK_LED_REG	= 0x0c3c,/*  8 bit	Link LED Register */
+
+/* Receive GMAC FIFO (YUKON and Yukon-2) */
+
+	RX_GMF_EA	= 0x0c40,/* 32 bit	Rx GMAC FIFO End Address */
+	RX_GMF_AF_THR	= 0x0c44,/* 32 bit	Rx GMAC FIFO Almost Full Thresh. */
+	RX_GMF_CTRL_T	= 0x0c48,/* 32 bit	Rx GMAC FIFO Control/Test */
+	RX_GMF_FL_MSK	= 0x0c4c,/* 32 bit	Rx GMAC FIFO Flush Mask */
+	RX_GMF_FL_THR	= 0x0c50,/* 16 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_FL_CTRL	= 0x0c52,/* 16 bit	Rx GMAC FIFO Flush Control */
+	RX_GMF_TR_THR	= 0x0c54,/* 32 bit	Rx Truncation Threshold (Yukon-2) */
+	RX_GMF_UP_THR	= 0x0c58,/* 16 bit	Rx Upper Pause Thr (Yukon-EC_U) */
+	RX_GMF_LP_THR	= 0x0c5a,/* 16 bit	Rx Lower Pause Thr (Yukon-EC_U) */
+	RX_GMF_VLAN	= 0x0c5c,/* 32 bit	Rx VLAN Type Register (Yukon-2) */
+	RX_GMF_WP	= 0x0c60,/* 32 bit	Rx GMAC FIFO Write Pointer */
+
+	RX_GMF_WLEV	= 0x0c68,/* 32 bit	Rx GMAC FIFO Write Level */
+
+	RX_GMF_RP	= 0x0c70,/* 32 bit	Rx GMAC FIFO Read Pointer */
+
+	RX_GMF_RLEV	= 0x0c78,/* 32 bit	Rx GMAC FIFO Read Level */
+};
+
+
+/*	Q_BC			32 bit	Current Byte Counter */
+
+/* BMU Control Status Registers */
+/*	B0_R1_CSR		32 bit	BMU Ctrl/Stat Rx Queue 1 */
+/*	B0_R2_CSR		32 bit	BMU Ctrl/Stat Rx Queue 2 */
+/*	B0_XA1_CSR		32 bit	BMU Ctrl/Stat Sync Tx Queue 1 */
+/*	B0_XS1_CSR		32 bit	BMU Ctrl/Stat Async Tx Queue 1 */
+/*	B0_XA2_CSR		32 bit	BMU Ctrl/Stat Sync Tx Queue 2 */
+/*	B0_XS2_CSR		32 bit	BMU Ctrl/Stat Async Tx Queue 2 */
+/*	Q_CSR			32 bit	BMU Control/Status Register */
+
+/* Rx BMU Control / Status Registers (Yukon-2) */
+enum {
+	BMU_IDLE	= 1<<31, /* BMU Idle State */
+	BMU_RX_TCP_PKT	= 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
+	BMU_RX_IP_PKT	= 1<<29, /* Rx IP  Packet (when RSS Hash enabled) */
+
+	BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable  Rx RSS Hash */
+	BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
+	BMU_ENA_RX_CHKSUM = 1<<13, /* Enable  Rx TCP/IP Checksum Check */
+	BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
+	BMU_CLR_IRQ_PAR	= 1<<11, /* Clear IRQ on Parity errors (Rx) */
+	BMU_CLR_IRQ_TCP	= 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
+	BMU_CLR_IRQ_CHK	= 1<<10, /* Clear IRQ Check */
+	BMU_STOP	= 1<<9, /* Stop  Rx/Tx Queue */
+	BMU_START	= 1<<8, /* Start Rx/Tx Queue */
+	BMU_FIFO_OP_ON	= 1<<7, /* FIFO Operational On */
+	BMU_FIFO_OP_OFF	= 1<<6, /* FIFO Operational Off */
+	BMU_FIFO_ENA	= 1<<5, /* Enable FIFO */
+	BMU_FIFO_RST	= 1<<4, /* Reset  FIFO */
+	BMU_OP_ON	= 1<<3, /* BMU Operational On */
+	BMU_OP_OFF	= 1<<2, /* BMU Operational Off */
+	BMU_RST_CLR	= 1<<1, /* Clear BMU Reset (Enable) */
+	BMU_RST_SET	= 1<<0, /* Set   BMU Reset */
+
+	BMU_CLR_RESET	= BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
+	BMU_OPER_INIT	= BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
+			  BMU_FIFO_ENA | BMU_OP_ON,
+
+	BMU_WM_DEFAULT = 0x600,
+	BMU_WM_PEX     = 0x80,
+};
+
+/* Tx BMU Control / Status Registers (Yukon-2) */
+								/* Bit 31: same as for Rx */
+enum {
+	BMU_TX_IPIDINCR_ON	= 1<<13, /* Enable  IP ID Increment */
+	BMU_TX_IPIDINCR_OFF	= 1<<12, /* Disable IP ID Increment */
+	BMU_TX_CLR_IRQ_TCP	= 1<<11, /* Clear IRQ on TCP segment length mismatch */
+};
+
+/*	TBMU_TEST			0x06B8	Transmit BMU Test Register */
+enum {
+	TBMU_TEST_BMU_TX_CHK_AUTO_OFF		= 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
+	TBMU_TEST_BMU_TX_CHK_AUTO_ON		= 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
+	TBMU_TEST_HOME_ADD_PAD_FIX1_EN		= 1<<29, /* Home Address Paddiing FIX1 Enable */
+	TBMU_TEST_HOME_ADD_PAD_FIX1_DIS		= 1<<28, /* Home Address Paddiing FIX1 Disable */
+	TBMU_TEST_ROUTING_ADD_FIX_EN		= 1<<27, /* Routing Address Fix Enable */
+	TBMU_TEST_ROUTING_ADD_FIX_DIS		= 1<<26, /* Routing Address Fix Disable */
+	TBMU_TEST_HOME_ADD_FIX_EN		= 1<<25, /* Home address checksum fix enable */
+	TBMU_TEST_HOME_ADD_FIX_DIS		= 1<<24, /* Home address checksum fix disable */
+
+	TBMU_TEST_TEST_RSPTR_ON			= 1<<22, /* Testmode Shadow Read Ptr On */
+	TBMU_TEST_TEST_RSPTR_OFF		= 1<<21, /* Testmode Shadow Read Ptr Off */
+	TBMU_TEST_TESTSTEP_RSPTR		= 1<<20, /* Teststep Shadow Read Ptr */
+
+	TBMU_TEST_TEST_RPTR_ON			= 1<<18, /* Testmode Read Ptr On */
+	TBMU_TEST_TEST_RPTR_OFF			= 1<<17, /* Testmode Read Ptr Off */
+	TBMU_TEST_TESTSTEP_RPTR			= 1<<16, /* Teststep Read Ptr */
+
+	TBMU_TEST_TEST_WSPTR_ON			= 1<<14, /* Testmode Shadow Write Ptr On */
+	TBMU_TEST_TEST_WSPTR_OFF		= 1<<13, /* Testmode Shadow Write Ptr Off */
+	TBMU_TEST_TESTSTEP_WSPTR		= 1<<12, /* Teststep Shadow Write Ptr */
+
+	TBMU_TEST_TEST_WPTR_ON			= 1<<10, /* Testmode Write Ptr On */
+	TBMU_TEST_TEST_WPTR_OFF			= 1<<9, /* Testmode Write Ptr Off */
+	TBMU_TEST_TESTSTEP_WPTR			= 1<<8,			/* Teststep Write Ptr */
+
+	TBMU_TEST_TEST_REQ_NB_ON		= 1<<6, /* Testmode Req Nbytes/Addr On */
+	TBMU_TEST_TEST_REQ_NB_OFF		= 1<<5, /* Testmode Req Nbytes/Addr Off */
+	TBMU_TEST_TESTSTEP_REQ_NB		= 1<<4, /* Teststep Req Nbytes/Addr */
+
+	TBMU_TEST_TEST_DONE_IDX_ON		= 1<<2, /* Testmode Done Index On */
+	TBMU_TEST_TEST_DONE_IDX_OFF		= 1<<1, /* Testmode Done Index Off */
+	TBMU_TEST_TESTSTEP_DONE_IDX		= 1<<0,	/* Teststep Done Index */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+/* PREF_UNIT_CTRL	32 bit	Prefetch Control register */
+enum {
+	PREF_UNIT_OP_ON		= 1<<3,	/* prefetch unit operational */
+	PREF_UNIT_OP_OFF	= 1<<2,	/* prefetch unit not operational */
+	PREF_UNIT_RST_CLR	= 1<<1,	/* Clear Prefetch Unit Reset */
+	PREF_UNIT_RST_SET	= 1<<0,	/* Set   Prefetch Unit Reset */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/*	RB_START		32 bit	RAM Buffer Start Address */
+/*	RB_END			32 bit	RAM Buffer End Address */
+/*	RB_WP			32 bit	RAM Buffer Write Pointer */
+/*	RB_RP			32 bit	RAM Buffer Read Pointer */
+/*	RB_RX_UTPP		32 bit	Rx Upper Threshold, Pause Pack */
+/*	RB_RX_LTPP		32 bit	Rx Lower Threshold, Pause Pack */
+/*	RB_RX_UTHP		32 bit	Rx Upper Threshold, High Prio */
+/*	RB_RX_LTHP		32 bit	Rx Lower Threshold, High Prio */
+/*	RB_PC			32 bit	RAM Buffer Packet Counter */
+/*	RB_LEV			32 bit	RAM Buffer Level Register */
+
+#define RB_MSK	0x0007ffff	/* Bit 18.. 0:	RAM Buffer Pointer Bits */
+/*	RB_TST2			 8 bit	RAM Buffer Test Register 2 */
+/*	RB_TST1			 8 bit	RAM Buffer Test Register 1 */
+
+/*	RB_CTRL			 8 bit	RAM Buffer Control Register */
+enum {
+	RB_ENA_STFWD	= 1<<5,	/* Enable  Store & Forward */
+	RB_DIS_STFWD	= 1<<4,	/* Disable Store & Forward */
+	RB_ENA_OP_MD	= 1<<3,	/* Enable  Operation Mode */
+	RB_DIS_OP_MD	= 1<<2,	/* Disable Operation Mode */
+	RB_RST_CLR	= 1<<1,	/* Clear RAM Buf STM Reset */
+	RB_RST_SET	= 1<<0,	/* Set   RAM Buf STM Reset */
+};
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+	TX_GMF_EA	= 0x0d40,/* 32 bit	Tx GMAC FIFO End Address */
+	TX_GMF_AE_THR	= 0x0d44,/* 32 bit	Tx GMAC FIFO Almost Empty Thresh.*/
+	TX_GMF_CTRL_T	= 0x0d48,/* 32 bit	Tx GMAC FIFO Control/Test */
+
+	TX_GMF_WP	= 0x0d60,/* 32 bit 	Tx GMAC FIFO Write Pointer */
+	TX_GMF_WSP	= 0x0d64,/* 32 bit 	Tx GMAC FIFO Write Shadow Ptr. */
+	TX_GMF_WLEV	= 0x0d68,/* 32 bit 	Tx GMAC FIFO Write Level */
+
+	TX_GMF_RP	= 0x0d70,/* 32 bit 	Tx GMAC FIFO Read Pointer */
+	TX_GMF_RSTP	= 0x0d74,/* 32 bit 	Tx GMAC FIFO Restart Pointer */
+	TX_GMF_RLEV	= 0x0d78,/* 32 bit 	Tx GMAC FIFO Read Level */
+
+	/* Threshold values for Yukon-EC Ultra and Extreme */
+	ECU_AE_THR	= 0x0070, /* Almost Empty Threshold */
+	ECU_TXFF_LEV	= 0x01a0, /* Tx BMU FIFO Level */
+	ECU_JUMBO_WM	= 0x0080, /* Jumbo Mode Watermark */
+};
+
+/* Descriptor Poll Timer Registers */
+enum {
+	B28_DPT_INI	= 0x0e00,/* 24 bit	Descriptor Poll Timer Init Val */
+	B28_DPT_VAL	= 0x0e04,/* 24 bit	Descriptor Poll Timer Curr Val */
+	B28_DPT_CTRL	= 0x0e08,/*  8 bit	Descriptor Poll Timer Ctrl Reg */
+
+	B28_DPT_TST	= 0x0e0a,/*  8 bit	Descriptor Poll Timer Test Reg */
+};
+
+/* Time Stamp Timer Registers (YUKON only) */
+enum {
+	GMAC_TI_ST_VAL	= 0x0e14,/* 32 bit	Time Stamp Timer Curr Val */
+	GMAC_TI_ST_CTRL	= 0x0e18,/*  8 bit	Time Stamp Timer Ctrl Reg */
+	GMAC_TI_ST_TST	= 0x0e1a,/*  8 bit	Time Stamp Timer Test Reg */
+};
+
+/* Polling Unit Registers (Yukon-2 only) */
+enum {
+	POLL_CTRL	= 0x0e20, /* 32 bit	Polling Unit Control Reg */
+	POLL_LAST_IDX	= 0x0e24,/* 16 bit	Polling Unit List Last Index */
+
+	POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit	Poll. List Start Addr (low) */
+	POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit	Poll. List Start Addr (high) */
+};
+
+enum {
+	SMB_CFG		 = 0x0e40, /* 32 bit	SMBus Config Register */
+	SMB_CSR		 = 0x0e44, /* 32 bit	SMBus Control/Status Register */
+};
+
+enum {
+	CPU_WDOG	 = 0x0e48, /* 32 bit	Watchdog Register  */
+	CPU_CNTR	 = 0x0e4C, /* 32 bit	Counter Register  */
+	CPU_TIM		 = 0x0e50,/* 32 bit	Timer Compare Register  */
+	CPU_AHB_ADDR	 = 0x0e54, /* 32 bit	CPU AHB Debug  Register  */
+	CPU_AHB_WDATA	 = 0x0e58, /* 32 bit	CPU AHB Debug  Register  */
+	CPU_AHB_RDATA	 = 0x0e5C, /* 32 bit	CPU AHB Debug  Register  */
+	HCU_MAP_BASE	 = 0x0e60, /* 32 bit	Reset Mapping Base */
+	CPU_AHB_CTRL	 = 0x0e64, /* 32 bit	CPU AHB Debug  Register  */
+	HCU_CCSR	 = 0x0e68, /* 32 bit	CPU Control and Status Register */
+	HCU_HCSR	 = 0x0e6C, /* 32 bit	Host Control and Status Register */
+};
+
+/* ASF Subsystem Registers (Yukon-2 only) */
+enum {
+	B28_Y2_SMB_CONFIG  = 0x0e40,/* 32 bit	ASF SMBus Config Register */
+	B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit	ASF SMB Control/Status/Data */
+	B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit	ASF IRQ Vector Base */
+
+	B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit	ASF Status and Command Reg */
+	B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit	ASF Host Communication Reg */
+	B28_Y2_DATA_REG_1  = 0x0e70,/* 32 bit	ASF/Host Data Register 1 */
+	B28_Y2_DATA_REG_2  = 0x0e74,/* 32 bit	ASF/Host Data Register 2 */
+	B28_Y2_DATA_REG_3  = 0x0e78,/* 32 bit	ASF/Host Data Register 3 */
+	B28_Y2_DATA_REG_4  = 0x0e7c,/* 32 bit	ASF/Host Data Register 4 */
+};
+
+/* Status BMU Registers (Yukon-2 only)*/
+enum {
+	STAT_CTRL	= 0x0e80,/* 32 bit	Status BMU Control Reg */
+	STAT_LAST_IDX	= 0x0e84,/* 16 bit	Status BMU Last Index */
+
+	STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit	Status List Start Addr (low) */
+	STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit	Status List Start Addr (high) */
+	STAT_TXA1_RIDX	= 0x0e90,/* 16 bit	Status TxA1 Report Index Reg */
+	STAT_TXS1_RIDX	= 0x0e92,/* 16 bit	Status TxS1 Report Index Reg */
+	STAT_TXA2_RIDX	= 0x0e94,/* 16 bit	Status TxA2 Report Index Reg */
+	STAT_TXS2_RIDX	= 0x0e96,/* 16 bit	Status TxS2 Report Index Reg */
+	STAT_TX_IDX_TH	= 0x0e98,/* 16 bit	Status Tx Index Threshold Reg */
+	STAT_PUT_IDX	= 0x0e9c,/* 16 bit	Status Put Index Reg */
+
+/* FIFO Control/Status Registers (Yukon-2 only)*/
+	STAT_FIFO_WP	= 0x0ea0,/*  8 bit	Status FIFO Write Pointer Reg */
+	STAT_FIFO_RP	= 0x0ea4,/*  8 bit	Status FIFO Read Pointer Reg */
+	STAT_FIFO_RSP	= 0x0ea6,/*  8 bit	Status FIFO Read Shadow Ptr */
+	STAT_FIFO_LEVEL	= 0x0ea8,/*  8 bit	Status FIFO Level Reg */
+	STAT_FIFO_SHLVL	= 0x0eaa,/*  8 bit	Status FIFO Shadow Level Reg */
+	STAT_FIFO_WM	= 0x0eac,/*  8 bit	Status FIFO Watermark Reg */
+	STAT_FIFO_ISR_WM= 0x0ead,/*  8 bit	Status FIFO ISR Watermark Reg */
+
+/* Level and ISR Timer Registers (Yukon-2 only)*/
+	STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit	Level Timer Init. Value Reg */
+	STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit	Level Timer Counter Reg */
+	STAT_LEV_TIMER_CTRL= 0x0eb8,/*  8 bit	Level Timer Control Reg */
+	STAT_LEV_TIMER_TEST= 0x0eb9,/*  8 bit	Level Timer Test Reg */
+	STAT_TX_TIMER_INI  = 0x0ec0,/* 32 bit	Tx Timer Init. Value Reg */
+	STAT_TX_TIMER_CNT  = 0x0ec4,/* 32 bit	Tx Timer Counter Reg */
+	STAT_TX_TIMER_CTRL = 0x0ec8,/*  8 bit	Tx Timer Control Reg */
+	STAT_TX_TIMER_TEST = 0x0ec9,/*  8 bit	Tx Timer Test Reg */
+	STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit	ISR Timer Init. Value Reg */
+	STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit	ISR Timer Counter Reg */
+	STAT_ISR_TIMER_CTRL= 0x0ed8,/*  8 bit	ISR Timer Control Reg */
+	STAT_ISR_TIMER_TEST= 0x0ed9,/*  8 bit	ISR Timer Test Reg */
+};
+
+enum {
+	LINKLED_OFF 	     = 0x01,
+	LINKLED_ON  	     = 0x02,
+	LINKLED_LINKSYNC_OFF = 0x04,
+	LINKLED_LINKSYNC_ON  = 0x08,
+	LINKLED_BLINK_OFF    = 0x10,
+	LINKLED_BLINK_ON     = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+	GMAC_CTRL	= 0x0f00,/* 32 bit	GMAC Control Reg */
+	GPHY_CTRL	= 0x0f04,/* 32 bit	GPHY Control Reg */
+	GMAC_IRQ_SRC	= 0x0f08,/*  8 bit	GMAC Interrupt Source Reg */
+	GMAC_IRQ_MSK	= 0x0f0c,/*  8 bit	GMAC Interrupt Mask Reg */
+	GMAC_LINK_CTRL	= 0x0f10,/* 16 bit	Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+	WOL_CTRL_STAT	= 0x0f20,/* 16 bit	WOL Control/Status Reg */
+	WOL_MATCH_CTL	= 0x0f22,/*  8 bit	WOL Match Control Reg */
+	WOL_MATCH_RES	= 0x0f23,/*  8 bit	WOL Match Result Reg */
+	WOL_MAC_ADDR	= 0x0f24,/* 32 bit	WOL MAC Address */
+	WOL_PATT_RPTR	= 0x0f2c,/*  8 bit	WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+	WOL_PATT_LEN_LO	= 0x0f30,/* 32 bit	WOL Pattern Length 3..0 */
+	WOL_PATT_LEN_HI	= 0x0f34,/* 24 bit	WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
+	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
+};
+#define WOL_REGS(port, x)	(x + (port)*0x80)
+
+enum {
+	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
+	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
+};
+#define WOL_PATT_RAM_BASE(port)	(WOL_PATT_RAM_1 + (port)*0x400)
+
+enum {
+	BASE_GMAC_1	= 0x2800,/* GMAC 1 registers */
+	BASE_GMAC_2	= 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+	PHY_MARV_CTRL		= 0x00,/* 16 bit r/w	PHY Control Register */
+	PHY_MARV_STAT		= 0x01,/* 16 bit r/o	PHY Status Register */
+	PHY_MARV_ID0		= 0x02,/* 16 bit r/o	PHY ID0 Register */
+	PHY_MARV_ID1		= 0x03,/* 16 bit r/o	PHY ID1 Register */
+	PHY_MARV_AUNE_ADV	= 0x04,/* 16 bit r/w	Auto-Neg. Advertisement */
+	PHY_MARV_AUNE_LP	= 0x05,/* 16 bit r/o	Link Part Ability Reg */
+	PHY_MARV_AUNE_EXP	= 0x06,/* 16 bit r/o	Auto-Neg. Expansion Reg */
+	PHY_MARV_NEPG		= 0x07,/* 16 bit r/w	Next Page Register */
+	PHY_MARV_NEPG_LP	= 0x08,/* 16 bit r/o	Next Page Link Partner */
+	/* Marvel-specific registers */
+	PHY_MARV_1000T_CTRL	= 0x09,/* 16 bit r/w	1000Base-T Control Reg */
+	PHY_MARV_1000T_STAT	= 0x0a,/* 16 bit r/o	1000Base-T Status Reg */
+	PHY_MARV_EXT_STAT	= 0x0f,/* 16 bit r/o	Extended Status Reg */
+	PHY_MARV_PHY_CTRL	= 0x10,/* 16 bit r/w	PHY Specific Ctrl Reg */
+	PHY_MARV_PHY_STAT	= 0x11,/* 16 bit r/o	PHY Specific Stat Reg */
+	PHY_MARV_INT_MASK	= 0x12,/* 16 bit r/w	Interrupt Mask Reg */
+	PHY_MARV_INT_STAT	= 0x13,/* 16 bit r/o	Interrupt Status Reg */
+	PHY_MARV_EXT_CTRL	= 0x14,/* 16 bit r/w	Ext. PHY Specific Ctrl */
+	PHY_MARV_RXE_CNT	= 0x15,/* 16 bit r/w	Receive Error Counter */
+	PHY_MARV_EXT_ADR	= 0x16,/* 16 bit r/w	Ext. Ad. for Cable Diag. */
+	PHY_MARV_PORT_IRQ	= 0x17,/* 16 bit r/o	Port 0 IRQ (88E1111 only) */
+	PHY_MARV_LED_CTRL	= 0x18,/* 16 bit r/w	LED Control Reg */
+	PHY_MARV_LED_OVER	= 0x19,/* 16 bit r/w	Manual LED Override Reg */
+	PHY_MARV_EXT_CTRL_2	= 0x1a,/* 16 bit r/w	Ext. PHY Specific Ctrl 2 */
+	PHY_MARV_EXT_P_STAT	= 0x1b,/* 16 bit r/w	Ext. PHY Spec. Stat Reg */
+	PHY_MARV_CABLE_DIAG	= 0x1c,/* 16 bit r/o	Cable Diagnostic Reg */
+	PHY_MARV_PAGE_ADDR	= 0x1d,/* 16 bit r/w	Extended Page Address Reg */
+	PHY_MARV_PAGE_DATA	= 0x1e,/* 16 bit r/w	Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+	PHY_MARV_FE_LED_PAR	= 0x16,/* 16 bit r/w	LED Parallel Select Reg. */
+	PHY_MARV_FE_LED_SER	= 0x17,/* 16 bit r/w	LED Stream Select S. LED */
+	PHY_MARV_FE_VCT_TX	= 0x1a,/* 16 bit r/w	VCT Reg. for TXP/N Pins */
+	PHY_MARV_FE_VCT_RX	= 0x1b,/* 16 bit r/o	VCT Reg. for RXP/N Pins */
+	PHY_MARV_FE_SPEC_2	= 0x1c,/* 16 bit r/w	Specific Control Reg. 2 */
+};
+
+enum {
+	PHY_CT_RESET	= 1<<15, /* Bit 15: (sc)	clear all PHY related regs */
+	PHY_CT_LOOP	= 1<<14, /* Bit 14:	enable Loopback over PHY */
+	PHY_CT_SPS_LSB	= 1<<13, /* Bit 13:	Speed select, lower bit */
+	PHY_CT_ANE	= 1<<12, /* Bit 12:	Auto-Negotiation Enabled */
+	PHY_CT_PDOWN	= 1<<11, /* Bit 11:	Power Down Mode */
+	PHY_CT_ISOL	= 1<<10, /* Bit 10:	Isolate Mode */
+	PHY_CT_RE_CFG	= 1<<9, /* Bit  9:	(sc) Restart Auto-Negotiation */
+	PHY_CT_DUP_MD	= 1<<8, /* Bit  8:	Duplex Mode */
+	PHY_CT_COL_TST	= 1<<7, /* Bit  7:	Collision Test enabled */
+	PHY_CT_SPS_MSB	= 1<<6, /* Bit  6:	Speed select, upper bit */
+};
+
+enum {
+	PHY_CT_SP1000	= PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+	PHY_CT_SP100	= PHY_CT_SPS_LSB, /* enable speed of  100 Mbps */
+	PHY_CT_SP10	= 0,		  /* enable speed of   10 Mbps */
+};
+
+enum {
+	PHY_ST_EXT_ST	= 1<<8, /* Bit  8:	Extended Status Present */
+
+	PHY_ST_PRE_SUP	= 1<<6, /* Bit  6:	Preamble Suppression */
+	PHY_ST_AN_OVER	= 1<<5, /* Bit  5:	Auto-Negotiation Over */
+	PHY_ST_REM_FLT	= 1<<4, /* Bit  4:	Remote Fault Condition Occurred */
+	PHY_ST_AN_CAP	= 1<<3, /* Bit  3:	Auto-Negotiation Capability */
+	PHY_ST_LSYNC	= 1<<2, /* Bit  2:	Link Synchronized */
+	PHY_ST_JAB_DET	= 1<<1, /* Bit  1:	Jabber Detected */
+	PHY_ST_EXT_REG	= 1<<0, /* Bit  0:	Extended Register available */
+};
+
+enum {
+	PHY_I1_OUI_MSK	= 0x3f<<10, /* Bit 15..10:	Organization Unique ID */
+	PHY_I1_MOD_NUM	= 0x3f<<4, /* Bit  9.. 4:	Model Number */
+	PHY_I1_REV_MSK	= 0xf, /* Bit  3.. 0:	Revision Number */
+};
+
+/* different Marvell PHY Ids */
+enum {
+	PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+
+	PHY_BCOM_ID1_A1	= 0x6041,
+	PHY_BCOM_ID1_B2	= 0x6043,
+	PHY_BCOM_ID1_C0	= 0x6044,
+	PHY_BCOM_ID1_C5	= 0x6047,
+
+	PHY_MARV_ID1_B0	= 0x0C23, /* Yukon 	(PHY 88E1011) */
+	PHY_MARV_ID1_B2	= 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+	PHY_MARV_ID1_C2	= 0x0CC2, /* Yukon-EC	(PHY 88E1111) */
+	PHY_MARV_ID1_Y2	= 0x0C91, /* Yukon-2	(PHY 88E1112) */
+	PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE   (PHY 88E3082 Rev.A1) */
+	PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU  (PHY 88E1149 Rev.B2?) */
+};
+
+/* Advertisement register bits */
+enum {
+	PHY_AN_NXT_PG	= 1<<15, /* Bit 15:	Request Next Page */
+	PHY_AN_ACK	= 1<<14, /* Bit 14:	(ro) Acknowledge Received */
+	PHY_AN_RF	= 1<<13, /* Bit 13:	Remote Fault Bits */
+
+	PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11:	Try for asymmetric */
+	PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10:	Try for pause */
+	PHY_AN_100BASE4	= 1<<9, /* Bit 9:	Try for 100mbps 4k packets */
+	PHY_AN_100FULL	= 1<<8, /* Bit 8:	Try for 100mbps full-duplex */
+	PHY_AN_100HALF	= 1<<7, /* Bit 7:	Try for 100mbps half-duplex */
+	PHY_AN_10FULL	= 1<<6, /* Bit 6:	Try for 10mbps full-duplex */
+	PHY_AN_10HALF	= 1<<5, /* Bit 5:	Try for 10mbps half-duplex */
+	PHY_AN_CSMA	= 1<<0, /* Bit 0:	Only selector supported */
+	PHY_AN_SEL	= 0x1f, /* Bit 4..0:	Selector Field, 00001=Ethernet*/
+	PHY_AN_FULL	= PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+	PHY_AN_ALL	= PHY_AN_10HALF | PHY_AN_10FULL |
+		  	  PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/*****  PHY_BCOM_1000T_STAT	16 bit r/o	1000Base-T Status Reg *****/
+/*****  PHY_MARV_1000T_STAT	16 bit r/o	1000Base-T Status Reg *****/
+enum {
+	PHY_B_1000S_MSF	= 1<<15, /* Bit 15:	Master/Slave Fault */
+	PHY_B_1000S_MSR	= 1<<14, /* Bit 14:	Master/Slave Result */
+	PHY_B_1000S_LRS	= 1<<13, /* Bit 13:	Local Receiver Status */
+	PHY_B_1000S_RRS	= 1<<12, /* Bit 12:	Remote Receiver Status */
+	PHY_B_1000S_LP_FD	= 1<<11, /* Bit 11:	Link Partner can FD */
+	PHY_B_1000S_LP_HD	= 1<<10, /* Bit 10:	Link Partner can HD */
+									/* Bit  9..8:	reserved */
+	PHY_B_1000S_IEC	= 0xff, /* Bit  7..0:	Idle Error Count */
+};
+
+/** Marvell-Specific */
+enum {
+	PHY_M_AN_NXT_PG	= 1<<15, /* Request Next Page */
+	PHY_M_AN_ACK	= 1<<14, /* (ro)	Acknowledge Received */
+	PHY_M_AN_RF	= 1<<13, /* Remote Fault */
+
+	PHY_M_AN_ASP	= 1<<11, /* Asymmetric Pause */
+	PHY_M_AN_PC	= 1<<10, /* MAC Pause implemented */
+	PHY_M_AN_100_T4	= 1<<9, /* Not cap. 100Base-T4 (always 0) */
+	PHY_M_AN_100_FD	= 1<<8, /* Advertise 100Base-TX Full Duplex */
+	PHY_M_AN_100_HD	= 1<<7, /* Advertise 100Base-TX Half Duplex */
+	PHY_M_AN_10_FD	= 1<<6, /* Advertise 10Base-TX Full Duplex */
+	PHY_M_AN_10_HD	= 1<<5, /* Advertise 10Base-TX Half Duplex */
+	PHY_M_AN_SEL_MSK =0x1f<<4,	/* Bit  4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+	PHY_M_AN_ASP_X	= 1<<8, /* Asymmetric Pause */
+	PHY_M_AN_PC_X	= 1<<7, /* MAC Pause implemented */
+	PHY_M_AN_1000X_AHD	= 1<<6, /* Advertise 10000Base-X Half Duplex */
+	PHY_M_AN_1000X_AFD	= 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+	PHY_M_P_NO_PAUSE_X	= 0<<7,/* Bit  8.. 7:	no Pause Mode */
+	PHY_M_P_SYM_MD_X	= 1<<7, /* Bit  8.. 7:	symmetric Pause Mode */
+	PHY_M_P_ASYM_MD_X	= 2<<7,/* Bit  8.. 7:	asymmetric Pause Mode */
+	PHY_M_P_BOTH_MD_X	= 3<<7,/* Bit  8.. 7:	both Pause Mode */
+};
+
+/*****  PHY_MARV_1000T_CTRL	16 bit r/w	1000Base-T Control Reg *****/
+enum {
+	PHY_M_1000C_TEST	= 7<<13,/* Bit 15..13:	Test Modes */
+	PHY_M_1000C_MSE	= 1<<12, /* Manual Master/Slave Enable */
+	PHY_M_1000C_MSC	= 1<<11, /* M/S Configuration (1=Master) */
+	PHY_M_1000C_MPD	= 1<<10, /* Multi-Port Device */
+	PHY_M_1000C_AFD	= 1<<9, /* Advertise Full Duplex */
+	PHY_M_1000C_AHD	= 1<<8, /* Advertise Half Duplex */
+};
+
+/*****  PHY_MARV_PHY_CTRL	16 bit r/w	PHY Specific Ctrl Reg *****/
+enum {
+	PHY_M_PC_TX_FFD_MSK	= 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+	PHY_M_PC_RX_FFD_MSK	= 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+	PHY_M_PC_ASS_CRS_TX	= 1<<11, /* Assert CRS on Transmit */
+	PHY_M_PC_FL_GOOD	= 1<<10, /* Force Link Good */
+	PHY_M_PC_EN_DET_MSK	= 3<<8,/* Bit  9.. 8: Energy Detect Mask */
+	PHY_M_PC_ENA_EXT_D	= 1<<7, /* Enable Ext. Distance (10BT) */
+	PHY_M_PC_MDIX_MSK	= 3<<5,/* Bit  6.. 5: MDI/MDIX Config. Mask */
+	PHY_M_PC_DIS_125CLK	= 1<<4, /* Disable 125 CLK */
+	PHY_M_PC_MAC_POW_UP	= 1<<3, /* MAC Power up */
+	PHY_M_PC_SQE_T_ENA	= 1<<2, /* SQE Test Enabled */
+	PHY_M_PC_POL_R_DIS	= 1<<1, /* Polarity Reversal Disabled */
+	PHY_M_PC_DIS_JABBER	= 1<<0, /* Disable Jabber */
+};
+
+enum {
+	PHY_M_PC_EN_DET		= 2<<8,	/* Energy Detect (Mode 1) */
+	PHY_M_PC_EN_DET_PLUS	= 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+#define PHY_M_PC_MDI_XMODE(x)	(((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
+
+enum {
+	PHY_M_PC_MAN_MDI	= 0, /* 00 = Manual MDI configuration */
+	PHY_M_PC_MAN_MDIX	= 1, /* 01 = Manual MDIX configuration */
+	PHY_M_PC_ENA_AUTO	= 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */
+enum {
+	PHY_M_PC_COP_TX_DIS	= 1<<3, /* Copper Transmitter Disable */
+	PHY_M_PC_POW_D_ENA	= 1<<2,	/* Power Down Enable */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+	PHY_M_PC_ENA_DTE_DT	= 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+	PHY_M_PC_ENA_ENE_DT	= 1<<14, /* Enable Energy Detect (sense & pulse) */
+	PHY_M_PC_DIS_NLP_CK	= 1<<13, /* Disable Normal Link Puls (NLP) Check */
+	PHY_M_PC_ENA_LIP_NP	= 1<<12, /* Enable Link Partner Next Page Reg. */
+	PHY_M_PC_DIS_NLP_GN	= 1<<11, /* Disable Normal Link Puls Generation */
+
+	PHY_M_PC_DIS_SCRAMB	= 1<<9, /* Disable Scrambler */
+	PHY_M_PC_DIS_FEFI	= 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+	PHY_M_PC_SH_TP_SEL	= 1<<6, /* Shielded Twisted Pair Select */
+	PHY_M_PC_RX_FD_MSK	= 3<<2,/* Bit  3.. 2: Rx FIFO Depth Mask */
+};
+
+/*****  PHY_MARV_PHY_STAT	16 bit r/o	PHY Specific Status Reg *****/
+enum {
+	PHY_M_PS_SPEED_MSK	= 3<<14, /* Bit 15..14: Speed Mask */
+	PHY_M_PS_SPEED_1000	= 1<<15, /*		10 = 1000 Mbps */
+	PHY_M_PS_SPEED_100	= 1<<14, /*		01 =  100 Mbps */
+	PHY_M_PS_SPEED_10	= 0,	 /*		00 =   10 Mbps */
+	PHY_M_PS_FULL_DUP	= 1<<13, /* Full Duplex */
+	PHY_M_PS_PAGE_REC	= 1<<12, /* Page Received */
+	PHY_M_PS_SPDUP_RES	= 1<<11, /* Speed & Duplex Resolved */
+	PHY_M_PS_LINK_UP	= 1<<10, /* Link Up */
+	PHY_M_PS_CABLE_MSK	= 7<<7,  /* Bit  9.. 7: Cable Length Mask */
+	PHY_M_PS_MDI_X_STAT	= 1<<6,  /* MDI Crossover Stat (1=MDIX) */
+	PHY_M_PS_DOWNS_STAT	= 1<<5,  /* Downshift Status (1=downsh.) */
+	PHY_M_PS_ENDET_STAT	= 1<<4,  /* Energy Detect Status (1=act) */
+	PHY_M_PS_TX_P_EN	= 1<<3,  /* Tx Pause Enabled */
+	PHY_M_PS_RX_P_EN	= 1<<2,  /* Rx Pause Enabled */
+	PHY_M_PS_POL_REV	= 1<<1,  /* Polarity Reversed */
+	PHY_M_PS_JABBER		= 1<<0,  /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK	(PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+	PHY_M_PS_DTE_DETECT	= 1<<15, /* Data Terminal Equipment (DTE) Detected */
+	PHY_M_PS_RES_SPEED	= 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+	PHY_M_IS_AN_ERROR	= 1<<15, /* Auto-Negotiation Error */
+	PHY_M_IS_LSP_CHANGE	= 1<<14, /* Link Speed Changed */
+	PHY_M_IS_DUP_CHANGE	= 1<<13, /* Duplex Mode Changed */
+	PHY_M_IS_AN_PR		= 1<<12, /* Page Received */
+	PHY_M_IS_AN_COMPL	= 1<<11, /* Auto-Negotiation Completed */
+	PHY_M_IS_LST_CHANGE	= 1<<10, /* Link Status Changed */
+	PHY_M_IS_SYMB_ERROR	= 1<<9, /* Symbol Error */
+	PHY_M_IS_FALSE_CARR	= 1<<8, /* False Carrier */
+	PHY_M_IS_FIFO_ERROR	= 1<<7, /* FIFO Overflow/Underrun Error */
+	PHY_M_IS_MDI_CHANGE	= 1<<6, /* MDI Crossover Changed */
+	PHY_M_IS_DOWNSH_DET	= 1<<5, /* Downshift Detected */
+	PHY_M_IS_END_CHANGE	= 1<<4, /* Energy Detect Changed */
+
+	PHY_M_IS_DTE_CHANGE	= 1<<2, /* DTE Power Det. Status Changed */
+	PHY_M_IS_POL_CHANGE	= 1<<1, /* Polarity Changed */
+	PHY_M_IS_JABBER		= 1<<0, /* Jabber */
+
+	PHY_M_DEF_MSK		= PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
+				 | PHY_M_IS_DUP_CHANGE,
+	PHY_M_AN_MSK	       = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+
+/*****  PHY_MARV_EXT_CTRL	16 bit r/w	Ext. PHY Specific Ctrl *****/
+enum {
+	PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+	PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+	PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+	PHY_M_EC_M_DSC_MSK  = 3<<10, /* Bit 11..10:	Master Downshift Counter */
+					/* (88E1011 only) */
+	PHY_M_EC_S_DSC_MSK  = 3<<8,/* Bit  9.. 8:	Slave  Downshift Counter */
+				       /* (88E1011 only) */
+	PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9:	Master Downshift Counter */
+					/* (88E1111 only) */
+	PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
+					/* !!! Errata in spec. (1 = disable) */
+	PHY_M_EC_RX_TIM_CT  = 1<<7, /* RGMII Rx Timing Control*/
+	PHY_M_EC_MAC_S_MSK  = 7<<4,/* Bit  6.. 4:	Def. MAC interface speed */
+	PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+	PHY_M_EC_DTE_D_ENA  = 1<<2, /* DTE Detect Enable (88E1111 only) */
+	PHY_M_EC_TX_TIM_CT  = 1<<1, /* RGMII Tx Timing Control */
+	PHY_M_EC_TRANS_DIS  = 1<<0, /* Transmitter Disable (88E1111 only) */
+
+	PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */
+};
+#define PHY_M_EC_M_DSC(x)	((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
+					/* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x)	((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
+					/* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_DSC_2(x)	((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2)
+					/* 000=1x; 001=2x; 010=3x; 011=4x */
+#define PHY_M_EC_MAC_S(x)	((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK)
+					/* 01X=0; 110=2.5; 111=25 (MHz) */
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+enum {
+	PHY_M_PC_DIS_LINK_Pa	= 1<<15,/* Disable Link Pulses */
+	PHY_M_PC_DSC_MSK	= 7<<12,/* Bit 14..12:	Downshift Counter */
+	PHY_M_PC_DOWN_S_ENA	= 1<<11,/* Downshift Enable */
+};
+/* !!! Errata in spec. (1 = disable) */
+
+#define PHY_M_PC_DSC(x)			(((u16)(x)<<12) & PHY_M_PC_DSC_MSK)
+											/* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+	MAC_TX_CLK_0_MHZ	= 2,
+	MAC_TX_CLK_2_5_MHZ	= 6,
+	MAC_TX_CLK_25_MHZ 	= 7,
+};
+
+/*****  PHY_MARV_LED_CTRL	16 bit r/w	LED Control Reg *****/
+enum {
+	PHY_M_LEDC_DIS_LED	= 1<<15, /* Disable LED */
+	PHY_M_LEDC_PULS_MSK	= 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+	PHY_M_LEDC_F_INT	= 1<<11, /* Force Interrupt */
+	PHY_M_LEDC_BL_R_MSK	= 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+	PHY_M_LEDC_DP_C_LSB	= 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+	PHY_M_LEDC_TX_C_LSB	= 1<<6, /* Tx Control (LSB, 88E1111 only) */
+	PHY_M_LEDC_LK_C_MSK	= 7<<3,/* Bit  5.. 3: Link Control Mask */
+					/* (88E1111 only) */
+};
+
+enum {
+	PHY_M_LEDC_LINK_MSK	= 3<<3,/* Bit  4.. 3: Link Control Mask */
+									/* (88E1011 only) */
+	PHY_M_LEDC_DP_CTRL	= 1<<2, /* Duplex Control */
+	PHY_M_LEDC_DP_C_MSB	= 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+	PHY_M_LEDC_RX_CTRL	= 1<<1, /* Rx Activity / Link */
+	PHY_M_LEDC_TX_CTRL	= 1<<0, /* Tx Activity / Link */
+	PHY_M_LEDC_TX_C_MSB	= 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+#define PHY_M_LED_PULS_DUR(x)	(((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+
+/*****  PHY_MARV_PHY_STAT (page 3)16 bit r/w	Polarity Control Reg. *****/
+enum {
+	PHY_M_POLC_LS1M_MSK	= 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
+	PHY_M_POLC_IS0M_MSK	= 0xf<<8,  /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
+	PHY_M_POLC_LOS_MSK	= 0x3<<6,  /* Bit  7.. 6: LOS Pol. Ctrl. Mask */
+	PHY_M_POLC_INIT_MSK	= 0x3<<4,  /* Bit  5.. 4: INIT Pol. Ctrl. Mask */
+	PHY_M_POLC_STA1_MSK	= 0x3<<2,  /* Bit  3.. 2: STAT1 Pol. Ctrl. Mask */
+	PHY_M_POLC_STA0_MSK	= 0x3,     /* Bit  1.. 0: STAT0 Pol. Ctrl. Mask */
+};
+
+#define PHY_M_POLC_LS1_P_MIX(x)	(((x)<<12) & PHY_M_POLC_LS1M_MSK)
+#define PHY_M_POLC_IS0_P_MIX(x)	(((x)<<8) & PHY_M_POLC_IS0M_MSK)
+#define PHY_M_POLC_LOS_CTRL(x)	(((x)<<6) & PHY_M_POLC_LOS_MSK)
+#define PHY_M_POLC_INIT_CTRL(x)	(((x)<<4) & PHY_M_POLC_INIT_MSK)
+#define PHY_M_POLC_STA1_CTRL(x)	(((x)<<2) & PHY_M_POLC_STA1_MSK)
+#define PHY_M_POLC_STA0_CTRL(x)	(((x)<<0) & PHY_M_POLC_STA0_MSK)
+
+enum {
+	PULS_NO_STR	= 0,/* no pulse stretching */
+	PULS_21MS	= 1,/* 21 ms to 42 ms */
+	PULS_42MS	= 2,/* 42 ms to 84 ms */
+	PULS_84MS	= 3,/* 84 ms to 170 ms */
+	PULS_170MS	= 4,/* 170 ms to 340 ms */
+	PULS_340MS	= 5,/* 340 ms to 670 ms */
+	PULS_670MS	= 6,/* 670 ms to 1.3 s */
+	PULS_1300MS	= 7,/* 1.3 s to 2.7 s */
+};
+
+#define PHY_M_LED_BLINK_RT(x)	(((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+	BLINK_42MS	= 0,/* 42 ms */
+	BLINK_84MS	= 1,/* 84 ms */
+	BLINK_170MS	= 2,/* 170 ms */
+	BLINK_340MS	= 3,/* 340 ms */
+	BLINK_670MS	= 4,/* 670 ms */
+};
+
+/*****  PHY_MARV_LED_OVER	16 bit r/w	Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x)	((x)<<14)	/* Bit 15..14:  SGMII AN Timer */
+
+#define PHY_M_LED_MO_DUP(x)	((x)<<10)	/* Bit 11..10:  Duplex */
+#define PHY_M_LED_MO_10(x)	((x)<<8)	/* Bit  9.. 8:  Link 10 */
+#define PHY_M_LED_MO_100(x)	((x)<<6)	/* Bit  7.. 6:  Link 100 */
+#define PHY_M_LED_MO_1000(x)	((x)<<4)	/* Bit  5.. 4:  Link 1000 */
+#define PHY_M_LED_MO_RX(x)	((x)<<2)	/* Bit  3.. 2:  Rx */
+#define PHY_M_LED_MO_TX(x)	((x)<<0)	/* Bit  1.. 0:  Tx */
+
+enum led_mode {
+	MO_LED_NORM  = 0,
+	MO_LED_BLINK = 1,
+	MO_LED_OFF   = 2,
+	MO_LED_ON    = 3,
+};
+
+/*****  PHY_MARV_EXT_CTRL_2	16 bit r/w	Ext. PHY Specific Ctrl 2 *****/
+enum {
+	PHY_M_EC2_FI_IMPED	= 1<<6, /* Fiber Input  Impedance */
+	PHY_M_EC2_FO_IMPED	= 1<<5, /* Fiber Output Impedance */
+	PHY_M_EC2_FO_M_CLK	= 1<<4, /* Fiber Mode Clock Enable */
+	PHY_M_EC2_FO_BOOST	= 1<<3, /* Fiber Output Boost */
+	PHY_M_EC2_FO_AM_MSK	= 7,/* Bit  2.. 0:	Fiber Output Amplitude */
+};
+
+/*****  PHY_MARV_EXT_P_STAT 16 bit r/w	Ext. PHY Specific Status *****/
+enum {
+	PHY_M_FC_AUTO_SEL	= 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+	PHY_M_FC_AN_REG_ACC	= 1<<14, /* Fiber/Copper AN Reg. Access */
+	PHY_M_FC_RESOLUTION	= 1<<13, /* Fiber/Copper Resolution */
+	PHY_M_SER_IF_AN_BP	= 1<<12, /* Ser. IF AN Bypass Enable */
+	PHY_M_SER_IF_BP_ST	= 1<<11, /* Ser. IF AN Bypass Status */
+	PHY_M_IRQ_POLARITY	= 1<<10, /* IRQ polarity */
+	PHY_M_DIS_AUT_MED	= 1<<9, /* Disable Aut. Medium Reg. Selection */
+	/* (88E1111 only) */
+
+	PHY_M_UNDOC1		= 1<<7, /* undocumented bit !! */
+	PHY_M_DTE_POW_STAT	= 1<<4, /* DTE Power Status (88E1111 only) */
+	PHY_M_MODE_MASK	= 0xf, /* Bit  3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/*****  PHY_MARV_FE_LED_PAR		16 bit r/w	LED Parallel Select Reg. *****/
+									/* Bit 15..12: reserved (used internally) */
+enum {
+	PHY_M_FELP_LED2_MSK = 0xf<<8,	/* Bit 11.. 8: LED2 Mask (LINK) */
+	PHY_M_FELP_LED1_MSK = 0xf<<4,	/* Bit  7.. 4: LED1 Mask (ACT) */
+	PHY_M_FELP_LED0_MSK = 0xf, /* Bit  3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x)	(((u16)(x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x)	(((u16)(x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x)	(((u16)(x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+	LED_PAR_CTRL_COLX	= 0x00,
+	LED_PAR_CTRL_ERROR	= 0x01,
+	LED_PAR_CTRL_DUPLEX	= 0x02,
+	LED_PAR_CTRL_DP_COL	= 0x03,
+	LED_PAR_CTRL_SPEED	= 0x04,
+	LED_PAR_CTRL_LINK	= 0x05,
+	LED_PAR_CTRL_TX		= 0x06,
+	LED_PAR_CTRL_RX		= 0x07,
+	LED_PAR_CTRL_ACT	= 0x08,
+	LED_PAR_CTRL_LNK_RX	= 0x09,
+	LED_PAR_CTRL_LNK_AC	= 0x0a,
+	LED_PAR_CTRL_ACT_BL	= 0x0b,
+	LED_PAR_CTRL_TX_BL	= 0x0c,
+	LED_PAR_CTRL_RX_BL	= 0x0d,
+	LED_PAR_CTRL_COL_BL	= 0x0e,
+	LED_PAR_CTRL_INACT	= 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2		16 bit r/w	Specific Control Reg. 2 *****/
+enum {
+	PHY_M_FESC_DIS_WAIT	= 1<<2, /* Disable TDR Waiting Period */
+	PHY_M_FESC_ENA_MCLK	= 1<<1, /* Enable MAC Rx Clock in sleep mode */
+	PHY_M_FESC_SEL_CL_A	= 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/*****  PHY_MARV_PHY_CTRL (page 1)		16 bit r/w	Fiber Specific Ctrl *****/
+enum {
+	PHY_M_FIB_FORCE_LNK	= 1<<10,/* Force Link Good */
+	PHY_M_FIB_SIGD_POL	= 1<<9,	/* SIGDET Polarity */
+	PHY_M_FIB_TX_DIS	= 1<<3,	/* Transmitter Disable */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/*****  PHY_MARV_PHY_CTRL (page 2)		16 bit r/w	MAC Specific Ctrl *****/
+enum {
+	PHY_M_MAC_MD_MSK	= 7<<7, /* Bit  9.. 7: Mode Select Mask */
+	PHY_M_MAC_GMIF_PUP	= 1<<3,	/* GMII Power Up (88E1149 only) */
+	PHY_M_MAC_MD_AUTO	= 3,/* Auto Copper/1000Base-X */
+	PHY_M_MAC_MD_COPPER	= 5,/* Copper only */
+	PHY_M_MAC_MD_1000BX	= 7,/* 1000Base-X only */
+};
+#define PHY_M_MAC_MODE_SEL(x)	(((x)<<7) & PHY_M_MAC_MD_MSK)
+
+/*****  PHY_MARV_PHY_CTRL (page 3)		16 bit r/w	LED Control Reg. *****/
+enum {
+	PHY_M_LEDC_LOS_MSK	= 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
+	PHY_M_LEDC_INIT_MSK	= 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+	PHY_M_LEDC_STA1_MSK	= 0xf<<4,/* Bit  7.. 4: STAT1 LED Ctrl. Mask */
+	PHY_M_LEDC_STA0_MSK	= 0xf, /* Bit  3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x)	(((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x)	(((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x)	(((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x)	(((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers  */
+/* Port Registers */
+enum {
+	GM_GP_STAT	= 0x0000,	/* 16 bit r/o	General Purpose Status */
+	GM_GP_CTRL	= 0x0004,	/* 16 bit r/w	General Purpose Control */
+	GM_TX_CTRL	= 0x0008,	/* 16 bit r/w	Transmit Control Reg. */
+	GM_RX_CTRL	= 0x000c,	/* 16 bit r/w	Receive Control Reg. */
+	GM_TX_FLOW_CTRL	= 0x0010,	/* 16 bit r/w	Transmit Flow-Control */
+	GM_TX_PARAM	= 0x0014,	/* 16 bit r/w	Transmit Parameter Reg. */
+	GM_SERIAL_MODE	= 0x0018,	/* 16 bit r/w	Serial Mode Register */
+/* Source Address Registers */
+	GM_SRC_ADDR_1L	= 0x001c,	/* 16 bit r/w	Source Address 1 (low) */
+	GM_SRC_ADDR_1M	= 0x0020,	/* 16 bit r/w	Source Address 1 (middle) */
+	GM_SRC_ADDR_1H	= 0x0024,	/* 16 bit r/w	Source Address 1 (high) */
+	GM_SRC_ADDR_2L	= 0x0028,	/* 16 bit r/w	Source Address 2 (low) */
+	GM_SRC_ADDR_2M	= 0x002c,	/* 16 bit r/w	Source Address 2 (middle) */
+	GM_SRC_ADDR_2H	= 0x0030,	/* 16 bit r/w	Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+	GM_MC_ADDR_H1	= 0x0034,	/* 16 bit r/w	Multicast Address Hash 1 */
+	GM_MC_ADDR_H2	= 0x0038,	/* 16 bit r/w	Multicast Address Hash 2 */
+	GM_MC_ADDR_H3	= 0x003c,	/* 16 bit r/w	Multicast Address Hash 3 */
+	GM_MC_ADDR_H4	= 0x0040,	/* 16 bit r/w	Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+	GM_TX_IRQ_SRC	= 0x0044,	/* 16 bit r/o	Tx Overflow IRQ Source */
+	GM_RX_IRQ_SRC	= 0x0048,	/* 16 bit r/o	Rx Overflow IRQ Source */
+	GM_TR_IRQ_SRC	= 0x004c,	/* 16 bit r/o	Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+	GM_TX_IRQ_MSK	= 0x0050,	/* 16 bit r/w	Tx Overflow IRQ Mask */
+	GM_RX_IRQ_MSK	= 0x0054,	/* 16 bit r/w	Rx Overflow IRQ Mask */
+	GM_TR_IRQ_MSK	= 0x0058,	/* 16 bit r/w	Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+	GM_SMI_CTRL	= 0x0080,	/* 16 bit r/w	SMI Control Register */
+	GM_SMI_DATA	= 0x0084,	/* 16 bit r/w	SMI Data Register */
+	GM_PHY_ADDR	= 0x0088,	/* 16 bit r/w	GPHY Address Register */
+/* MIB Counters */
+	GM_MIB_CNT_BASE	= 0x0100,	/* Base Address of MIB Counters */
+	GM_MIB_CNT_END	= 0x025C,	/* Last MIB counter */
+};
+
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word	(32 bit r/o)
+ */
+enum {
+	GM_RXF_UC_OK    = GM_MIB_CNT_BASE + 0,	/* Unicast Frames Received OK */
+	GM_RXF_BC_OK	= GM_MIB_CNT_BASE + 8,	/* Broadcast Frames Received OK */
+	GM_RXF_MPAUSE	= GM_MIB_CNT_BASE + 16,	/* Pause MAC Ctrl Frames Received */
+	GM_RXF_MC_OK	= GM_MIB_CNT_BASE + 24,	/* Multicast Frames Received OK */
+	GM_RXF_FCS_ERR	= GM_MIB_CNT_BASE + 32,	/* Rx Frame Check Seq. Error */
+
+	GM_RXO_OK_LO	= GM_MIB_CNT_BASE + 48,	/* Octets Received OK Low */
+	GM_RXO_OK_HI	= GM_MIB_CNT_BASE + 56,	/* Octets Received OK High */
+	GM_RXO_ERR_LO	= GM_MIB_CNT_BASE + 64,	/* Octets Received Invalid Low */
+	GM_RXO_ERR_HI	= GM_MIB_CNT_BASE + 72,	/* Octets Received Invalid High */
+	GM_RXF_SHT	= GM_MIB_CNT_BASE + 80,	/* Frames <64 Byte Received OK */
+	GM_RXE_FRAG	= GM_MIB_CNT_BASE + 88,	/* Frames <64 Byte Received with FCS Err */
+	GM_RXF_64B	= GM_MIB_CNT_BASE + 96,	/* 64 Byte Rx Frame */
+	GM_RXF_127B	= GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */
+	GM_RXF_255B	= GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */
+	GM_RXF_511B	= GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */
+	GM_RXF_1023B	= GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */
+	GM_RXF_1518B	= GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */
+	GM_RXF_MAX_SZ	= GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */
+	GM_RXF_LNG_ERR	= GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */
+	GM_RXF_JAB_PKT	= GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */
+
+	GM_RXE_FIFO_OV	= GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */
+	GM_TXF_UC_OK	= GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */
+	GM_TXF_BC_OK	= GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */
+	GM_TXF_MPAUSE	= GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */
+	GM_TXF_MC_OK	= GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */
+	GM_TXO_OK_LO	= GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */
+	GM_TXO_OK_HI	= GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */
+	GM_TXF_64B	= GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */
+	GM_TXF_127B	= GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */
+	GM_TXF_255B	= GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */
+	GM_TXF_511B	= GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */
+	GM_TXF_1023B	= GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */
+	GM_TXF_1518B	= GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */
+	GM_TXF_MAX_SZ	= GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */
+
+	GM_TXF_COL	= GM_MIB_CNT_BASE + 304,/* Tx Collision */
+	GM_TXF_LAT_COL	= GM_MIB_CNT_BASE + 312,/* Tx Late Collision */
+	GM_TXF_ABO_COL	= GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */
+	GM_TXF_MUL_COL	= GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */
+	GM_TXF_SNG_COL	= GM_MIB_CNT_BASE + 336,/* Tx Single Collision */
+	GM_TXE_FIFO_UR	= GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/*	GM_GP_STAT	16 bit r/o	General Purpose Status Register */
+enum {
+	GM_GPSR_SPEED		= 1<<15, /* Bit 15:	Port Speed (1 = 100 Mbps) */
+	GM_GPSR_DUPLEX		= 1<<14, /* Bit 14:	Duplex Mode (1 = Full) */
+	GM_GPSR_FC_TX_DIS	= 1<<13, /* Bit 13:	Tx Flow-Control Mode Disabled */
+	GM_GPSR_LINK_UP		= 1<<12, /* Bit 12:	Link Up Status */
+	GM_GPSR_PAUSE		= 1<<11, /* Bit 11:	Pause State */
+	GM_GPSR_TX_ACTIVE	= 1<<10, /* Bit 10:	Tx in Progress */
+	GM_GPSR_EXC_COL		= 1<<9,	/* Bit  9:	Excessive Collisions Occurred */
+	GM_GPSR_LAT_COL		= 1<<8,	/* Bit  8:	Late Collisions Occurred */
+
+	GM_GPSR_PHY_ST_CH	= 1<<5,	/* Bit  5:	PHY Status Change */
+	GM_GPSR_GIG_SPEED	= 1<<4,	/* Bit  4:	Gigabit Speed (1 = 1000 Mbps) */
+	GM_GPSR_PART_MODE	= 1<<3,	/* Bit  3:	Partition mode */
+	GM_GPSR_FC_RX_DIS	= 1<<2,	/* Bit  2:	Rx Flow-Control Mode Disabled */
+	GM_GPSR_PROM_EN		= 1<<1,	/* Bit  1:	Promiscuous Mode Enabled */
+};
+
+/*	GM_GP_CTRL	16 bit r/w	General Purpose Control Register */
+enum {
+	GM_GPCR_PROM_ENA	= 1<<14,	/* Bit 14:	Enable Promiscuous Mode */
+	GM_GPCR_FC_TX_DIS	= 1<<13, /* Bit 13:	Disable Tx Flow-Control Mode */
+	GM_GPCR_TX_ENA		= 1<<12, /* Bit 12:	Enable Transmit */
+	GM_GPCR_RX_ENA		= 1<<11, /* Bit 11:	Enable Receive */
+	GM_GPCR_BURST_ENA	= 1<<10, /* Bit 10:	Enable Burst Mode */
+	GM_GPCR_LOOP_ENA	= 1<<9,	/* Bit  9:	Enable MAC Loopback Mode */
+	GM_GPCR_PART_ENA	= 1<<8,	/* Bit  8:	Enable Partition Mode */
+	GM_GPCR_GIGS_ENA	= 1<<7,	/* Bit  7:	Gigabit Speed (1000 Mbps) */
+	GM_GPCR_FL_PASS		= 1<<6,	/* Bit  6:	Force Link Pass */
+	GM_GPCR_DUP_FULL	= 1<<5,	/* Bit  5:	Full Duplex Mode */
+	GM_GPCR_FC_RX_DIS	= 1<<4,	/* Bit  4:	Disable Rx Flow-Control Mode */
+	GM_GPCR_SPEED_100	= 1<<3,   /* Bit  3:	Port Speed 100 Mbps */
+	GM_GPCR_AU_DUP_DIS	= 1<<2,	/* Bit  2:	Disable Auto-Update Duplex */
+	GM_GPCR_AU_FCT_DIS	= 1<<1,	/* Bit  1:	Disable Auto-Update Flow-C. */
+	GM_GPCR_AU_SPD_DIS	= 1<<0,	/* Bit  0:	Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000	(GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+
+/*	GM_TX_CTRL			16 bit r/w	Transmit Control Register */
+enum {
+	GM_TXCR_FORCE_JAM	= 1<<15, /* Bit 15:	Force Jam / Flow-Control */
+	GM_TXCR_CRC_DIS		= 1<<14, /* Bit 14:	Disable insertion of CRC */
+	GM_TXCR_PAD_DIS		= 1<<13, /* Bit 13:	Disable padding of packets */
+	GM_TXCR_COL_THR_MSK	= 7<<10, /* Bit 12..10:	Collision Threshold */
+};
+
+#define TX_COL_THR(x)		(((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF		0x04
+
+/*	GM_RX_CTRL			16 bit r/w	Receive Control Register */
+enum {
+	GM_RXCR_UCF_ENA	= 1<<15, /* Bit 15:	Enable Unicast filtering */
+	GM_RXCR_MCF_ENA	= 1<<14, /* Bit 14:	Enable Multicast filtering */
+	GM_RXCR_CRC_DIS	= 1<<13, /* Bit 13:	Remove 4-byte CRC */
+	GM_RXCR_PASS_FC	= 1<<12, /* Bit 12:	Pass FC packets to FIFO */
+};
+
+/*	GM_TX_PARAM		16 bit r/w	Transmit Parameter Register */
+enum {
+	GM_TXPA_JAMLEN_MSK	= 0x03<<14,	/* Bit 15..14:	Jam Length */
+	GM_TXPA_JAMIPG_MSK	= 0x1f<<9,	/* Bit 13..9:	Jam IPG */
+	GM_TXPA_JAMDAT_MSK	= 0x1f<<4,	/* Bit  8..4:	IPG Jam to Data */
+	GM_TXPA_BO_LIM_MSK	= 0x0f,		/* Bit  3.. 0: Backoff Limit Mask */
+
+	TX_JAM_LEN_DEF		= 0x03,
+	TX_JAM_IPG_DEF		= 0x0b,
+	TX_IPG_JAM_DEF		= 0x1c,
+	TX_BOF_LIM_DEF		= 0x04,
+};
+
+#define TX_JAM_LEN_VAL(x)	(((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x)	(((x)<<9)  & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x)	(((x)<<4)  & GM_TXPA_JAMDAT_MSK)
+#define TX_BACK_OFF_LIM(x)	((x) & GM_TXPA_BO_LIM_MSK)
+
+
+/*	GM_SERIAL_MODE			16 bit r/w	Serial Mode Register */
+enum {
+	GM_SMOD_DATABL_MSK	= 0x1f<<11, /* Bit 15..11:	Data Blinder (r/o) */
+	GM_SMOD_LIMIT_4		= 1<<10, /* 4 consecutive Tx trials */
+	GM_SMOD_VLAN_ENA	= 1<<9,	 /* Enable VLAN  (Max. Frame Len) */
+	GM_SMOD_JUMBO_ENA	= 1<<8,	 /* Enable Jumbo (Max. Frame Len) */
+
+	GM_NEW_FLOW_CTRL	= 1<<6,	 /* Enable New Flow-Control */
+
+	GM_SMOD_IPG_MSK		= 0x1f	 /* Bit 4..0:	Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x)	(((x)<<11) & GM_SMOD_DATABL_MSK)
+#define IPG_DATA_VAL(x)		(x & GM_SMOD_IPG_MSK)
+
+#define DATA_BLIND_DEF		0x04
+#define IPG_DATA_DEF_1000	0x1e
+#define IPG_DATA_DEF_10_100	0x18
+
+/*	GM_SMI_CTRL			16 bit r/w	SMI Control Register */
+enum {
+	GM_SMI_CT_PHY_A_MSK	= 0x1f<<11,/* Bit 15..11:	PHY Device Address */
+	GM_SMI_CT_REG_A_MSK	= 0x1f<<6,/* Bit 10.. 6:	PHY Register Address */
+	GM_SMI_CT_OP_RD		= 1<<5,	/* Bit  5:	OpCode Read (0=Write)*/
+	GM_SMI_CT_RD_VAL	= 1<<4,	/* Bit  4:	Read Valid (Read completed) */
+	GM_SMI_CT_BUSY		= 1<<3,	/* Bit  3:	Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x)	(((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x)	(((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/*	GM_PHY_ADDR				16 bit r/w	GPHY Address Register */
+enum {
+	GM_PAR_MIB_CLR	= 1<<5,	/* Bit  5:	Set MIB Clear Counter Mode */
+	GM_PAR_MIB_TST	= 1<<4,	/* Bit  4:	MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+	GMR_FS_LEN	= 0x7fff<<16, /* Bit 30..16:	Rx Frame Length */
+	GMR_FS_VLAN	= 1<<13, /* VLAN Packet */
+	GMR_FS_JABBER	= 1<<12, /* Jabber Packet */
+	GMR_FS_UN_SIZE	= 1<<11, /* Undersize Packet */
+	GMR_FS_MC	= 1<<10, /* Multicast Packet */
+	GMR_FS_BC	= 1<<9,  /* Broadcast Packet */
+	GMR_FS_RX_OK	= 1<<8,  /* Receive OK (Good Packet) */
+	GMR_FS_GOOD_FC	= 1<<7,  /* Good Flow-Control Packet */
+	GMR_FS_BAD_FC	= 1<<6,  /* Bad  Flow-Control Packet */
+	GMR_FS_MII_ERR	= 1<<5,  /* MII Error */
+	GMR_FS_LONG_ERR	= 1<<4,  /* Too Long Packet */
+	GMR_FS_FRAGMENT	= 1<<3,  /* Fragment */
+
+	GMR_FS_CRC_ERR	= 1<<1,  /* CRC Error */
+	GMR_FS_RX_FF_OV	= 1<<0,  /* Rx FIFO Overflow */
+
+	GMR_FS_ANY_ERR	= GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
+			  GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
+		  	  GMR_FS_MII_ERR | GMR_FS_BAD_FC |
+			  GMR_FS_UN_SIZE | GMR_FS_JABBER,
+};
+
+/*	RX_GMF_CTRL_T	32 bit	Rx GMAC FIFO Control/Test */
+enum {
+	RX_GCLKMAC_ENA	= 1<<31,	/* RX MAC Clock Gating Enable */
+	RX_GCLKMAC_OFF	= 1<<30,
+
+	RX_STFW_DIS	= 1<<29,	/* RX Store and Forward Enable */
+	RX_STFW_ENA	= 1<<28,
+
+	RX_TRUNC_ON	= 1<<27,  	/* enable  packet truncation */
+	RX_TRUNC_OFF	= 1<<26, 	/* disable packet truncation */
+	RX_VLAN_STRIP_ON = 1<<25,	/* enable  VLAN stripping */
+	RX_VLAN_STRIP_OFF = 1<<24,	/* disable VLAN stripping */
+
+	RX_MACSEC_FLUSH_ON  = 1<<23,
+	RX_MACSEC_FLUSH_OFF = 1<<22,
+	RX_MACSEC_ASF_FLUSH_ON = 1<<21,
+	RX_MACSEC_ASF_FLUSH_OFF = 1<<20,
+
+	GMF_RX_OVER_ON      = 1<<19,	/* enable flushing on receive overrun */
+	GMF_RX_OVER_OFF     = 1<<18,	/* disable flushing on receive overrun */
+	GMF_ASF_RX_OVER_ON  = 1<<17,	/* enable flushing of ASF when overrun */
+	GMF_ASF_RX_OVER_OFF = 1<<16,	/* disable flushing of ASF when overrun */
+
+	GMF_WP_TST_ON	= 1<<14,	/* Write Pointer Test On */
+	GMF_WP_TST_OFF	= 1<<13,	/* Write Pointer Test Off */
+	GMF_WP_STEP	= 1<<12,	/* Write Pointer Step/Increment */
+
+	GMF_RP_TST_ON	= 1<<10,	/* Read Pointer Test On */
+	GMF_RP_TST_OFF	= 1<<9,		/* Read Pointer Test Off */
+	GMF_RP_STEP	= 1<<8,		/* Read Pointer Step/Increment */
+	GMF_RX_F_FL_ON	= 1<<7,		/* Rx FIFO Flush Mode On */
+	GMF_RX_F_FL_OFF	= 1<<6,		/* Rx FIFO Flush Mode Off */
+	GMF_CLI_RX_FO	= 1<<5,		/* Clear IRQ Rx FIFO Overrun */
+	GMF_CLI_RX_C	= 1<<4,		/* Clear IRQ Rx Frame Complete */
+
+	GMF_OPER_ON	= 1<<3,		/* Operational Mode On */
+	GMF_OPER_OFF	= 1<<2,		/* Operational Mode Off */
+	GMF_RST_CLR	= 1<<1,		/* Clear GMAC FIFO Reset */
+	GMF_RST_SET	= 1<<0,		/* Set   GMAC FIFO Reset */
+
+	RX_GMF_FL_THR_DEF = 0xa,	/* flush threshold (default) */
+
+	GMF_RX_CTRL_DEF	= GMF_OPER_ON | GMF_RX_F_FL_ON,
+};
+
+/*	RX_GMF_FL_CTRL	16 bit	Rx GMAC FIFO Flush Control (Yukon-Supreme) */
+enum {
+	RX_IPV6_SA_MOB_ENA	= 1<<9,	/* IPv6 SA Mobility Support Enable */
+	RX_IPV6_SA_MOB_DIS	= 1<<8,	/* IPv6 SA Mobility Support Disable */
+	RX_IPV6_DA_MOB_ENA	= 1<<7,	/* IPv6 DA Mobility Support Enable */
+	RX_IPV6_DA_MOB_DIS	= 1<<6,	/* IPv6 DA Mobility Support Disable */
+	RX_PTR_SYNCDLY_ENA	= 1<<5,	/* Pointers Delay Synch Enable */
+	RX_PTR_SYNCDLY_DIS	= 1<<4,	/* Pointers Delay Synch Disable */
+	RX_ASF_NEWFLAG_ENA	= 1<<3,	/* RX ASF Flag New Logic Enable */
+	RX_ASF_NEWFLAG_DIS	= 1<<2,	/* RX ASF Flag New Logic Disable */
+	RX_FLSH_MISSPKT_ENA	= 1<<1,	/* RX Flush Miss-Packet Enable */
+	RX_FLSH_MISSPKT_DIS	= 1<<0,	/* RX Flush Miss-Packet Disable */
+};
+
+/*	TX_GMF_EA		32 bit	Tx GMAC FIFO End Address */
+enum {
+	TX_DYN_WM_ENA	= 3,	/* Yukon-FE+ specific */
+};
+
+/*	TX_GMF_CTRL_T	32 bit	Tx GMAC FIFO Control/Test */
+enum {
+	TX_STFW_DIS	= 1<<31,/* Disable Store & Forward */
+	TX_STFW_ENA	= 1<<30,/* Enable  Store & Forward */
+
+	TX_VLAN_TAG_ON	= 1<<25,/* enable  VLAN tagging */
+	TX_VLAN_TAG_OFF	= 1<<24,/* disable VLAN tagging */
+
+	TX_PCI_JUM_ENA  = 1<<23,/* PCI Jumbo Mode enable */
+	TX_PCI_JUM_DIS  = 1<<22,/* PCI Jumbo Mode enable */
+
+	GMF_WSP_TST_ON	= 1<<18,/* Write Shadow Pointer Test On */
+	GMF_WSP_TST_OFF	= 1<<17,/* Write Shadow Pointer Test Off */
+	GMF_WSP_STEP	= 1<<16,/* Write Shadow Pointer Step/Increment */
+
+	GMF_CLI_TX_FU	= 1<<6,	/* Clear IRQ Tx FIFO Underrun */
+	GMF_CLI_TX_FC	= 1<<5,	/* Clear IRQ Tx Frame Complete */
+	GMF_CLI_TX_PE	= 1<<4,	/* Clear IRQ Tx Parity Error */
+};
+
+/*	GMAC_TI_ST_CTRL	 8 bit	Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+	GMT_ST_START	= 1<<2,	/* Start Time Stamp Timer */
+	GMT_ST_STOP	= 1<<1,	/* Stop  Time Stamp Timer */
+	GMT_ST_CLR_IRQ	= 1<<0,	/* Clear Time Stamp Timer IRQ */
+};
+
+/* B28_Y2_ASF_STAT_CMD		32 bit	ASF Status and Command Reg */
+enum {
+	Y2_ASF_OS_PRES	= 1<<4,	/* ASF operation system present */
+	Y2_ASF_RESET	= 1<<3,	/* ASF system in reset state */
+	Y2_ASF_RUNNING	= 1<<2,	/* ASF system operational */
+	Y2_ASF_CLR_HSTI = 1<<1,	/* Clear ASF IRQ */
+	Y2_ASF_IRQ	= 1<<0,	/* Issue an IRQ to ASF system */
+
+	Y2_ASF_UC_STATE = 3<<2,	/* ASF uC State */
+	Y2_ASF_CLK_HALT	= 0,	/* ASF system clock stopped */
+};
+
+/* B28_Y2_ASF_HOST_COM	32 bit	ASF Host Communication Reg */
+enum {
+	Y2_ASF_CLR_ASFI = 1<<1,	/* Clear host IRQ */
+	Y2_ASF_HOST_IRQ = 1<<0,	/* Issue an IRQ to HOST system */
+};
+/*	HCU_CCSR	CPU Control and Status Register */
+enum {
+	HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
+	HCU_CCSR_CPU_SLEEP	= 1<<26, /* CPU sleep status */
+	/* Clock Stretching Timeout */
+	HCU_CCSR_CS_TO		= 1<<25,
+	HCU_CCSR_WDOG		= 1<<24, /* Watchdog Reset */
+
+	HCU_CCSR_CLR_IRQ_HOST	= 1<<17, /* Clear IRQ_HOST */
+	HCU_CCSR_SET_IRQ_HCU	= 1<<16, /* Set IRQ_HCU */
+
+	HCU_CCSR_AHB_RST	= 1<<9, /* Reset AHB bridge */
+	HCU_CCSR_CPU_RST_MODE	= 1<<8, /* CPU Reset Mode */
+
+	HCU_CCSR_SET_SYNC_CPU	= 1<<5,
+	HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
+	HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
+	HCU_CCSR_OS_PRSNT	= 1<<2, /* ASF OS Present */
+/* Microcontroller State */
+	HCU_CCSR_UC_STATE_MSK	= 3,
+	HCU_CCSR_UC_STATE_BASE	= 1<<0,
+	HCU_CCSR_ASF_RESET	= 0,
+	HCU_CCSR_ASF_HALTED	= 1<<1,
+	HCU_CCSR_ASF_RUNNING	= 1<<0,
+};
+
+/*	HCU_HCSR	Host Control and Status Register */
+enum {
+	HCU_HCSR_SET_IRQ_CPU	= 1<<16, /* Set IRQ_CPU */
+
+	HCU_HCSR_CLR_IRQ_HCU	= 1<<1, /* Clear IRQ_HCU */
+	HCU_HCSR_SET_IRQ_HOST	= 1<<0,	/* Set IRQ_HOST */
+};
+
+/*	STAT_CTRL		32 bit	Status BMU control register (Yukon-2 only) */
+enum {
+	SC_STAT_CLR_IRQ	= 1<<4,	/* Status Burst IRQ clear */
+	SC_STAT_OP_ON	= 1<<3,	/* Operational Mode On */
+	SC_STAT_OP_OFF	= 1<<2,	/* Operational Mode Off */
+	SC_STAT_RST_CLR	= 1<<1,	/* Clear Status Unit Reset (Enable) */
+	SC_STAT_RST_SET	= 1<<0,	/* Set   Status Unit Reset */
+};
+
+/*	GMAC_CTRL		32 bit	GMAC Control Reg (YUKON only) */
+enum {
+	GMC_SET_RST	    = 1<<15,/* MAC SEC RST */
+	GMC_SEC_RST_OFF     = 1<<14,/* MAC SEC RSt OFF */
+	GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */
+	GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */
+	GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */
+	GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX  off*/
+	GMC_BYP_RETR_ON	= 1<<9, /* Bypass retransmit FIFO On */
+	GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */
+
+	GMC_H_BURST_ON	= 1<<7,	/* Half Duplex Burst Mode On */
+	GMC_H_BURST_OFF	= 1<<6,	/* Half Duplex Burst Mode Off */
+	GMC_F_LOOPB_ON	= 1<<5,	/* FIFO Loopback On */
+	GMC_F_LOOPB_OFF	= 1<<4,	/* FIFO Loopback Off */
+	GMC_PAUSE_ON	= 1<<3,	/* Pause On */
+	GMC_PAUSE_OFF	= 1<<2,	/* Pause Off */
+	GMC_RST_CLR	= 1<<1,	/* Clear GMAC Reset */
+	GMC_RST_SET	= 1<<0,	/* Set   GMAC Reset */
+};
+
+/*	GPHY_CTRL		32 bit	GPHY Control Reg (YUKON only) */
+enum {
+	GPC_TX_PAUSE	= 1<<30, /* Tx pause enabled (ro) */
+	GPC_RX_PAUSE	= 1<<29, /* Rx pause enabled (ro) */
+	GPC_SPEED	= 3<<27, /* PHY speed (ro) */
+	GPC_LINK	= 1<<26, /* Link up (ro) */
+	GPC_DUPLEX	= 1<<25, /* Duplex (ro) */
+	GPC_CLOCK	= 1<<24, /* 125Mhz clock stable (ro) */
+
+	GPC_PDOWN	= 1<<23, /* Internal regulator 2.5 power down */
+	GPC_TSTMODE	= 1<<22, /* Test mode */
+	GPC_REG18	= 1<<21, /* Reg18 Power down */
+	GPC_REG12SEL	= 3<<19, /* Reg12 power setting */
+	GPC_REG18SEL	= 3<<17, /* Reg18 power setting */
+	GPC_SPILOCK	= 1<<16, /* SPI lock (ASF) */
+
+	GPC_LEDMUX	= 3<<14, /* LED Mux */
+	GPC_INTPOL	= 1<<13, /* Interrupt polarity */
+	GPC_DETECT	= 1<<12, /* Energy detect */
+	GPC_1000HD	= 1<<11, /* Enable 1000Mbit HD */
+	GPC_SLAVE	= 1<<10, /* Slave mode */
+	GPC_PAUSE	= 1<<9, /* Pause enable */
+	GPC_LEDCTL	= 3<<6, /* GPHY Leds */
+
+	GPC_RST_CLR	= 1<<1,	/* Clear GPHY Reset */
+	GPC_RST_SET	= 1<<0,	/* Set   GPHY Reset */
+};
+
+/*	GMAC_IRQ_SRC	 8 bit	GMAC Interrupt Source Reg (YUKON only) */
+/*	GMAC_IRQ_MSK	 8 bit	GMAC Interrupt Mask   Reg (YUKON only) */
+enum {
+	GM_IS_TX_CO_OV	= 1<<5,	/* Transmit Counter Overflow IRQ */
+	GM_IS_RX_CO_OV	= 1<<4,	/* Receive Counter Overflow IRQ */
+	GM_IS_TX_FF_UR	= 1<<3,	/* Transmit FIFO Underrun */
+	GM_IS_TX_COMPL	= 1<<2,	/* Frame Transmission Complete */
+	GM_IS_RX_FF_OR	= 1<<1,	/* Receive FIFO Overrun */
+	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
+
+#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+};
+
+/*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
+enum {						/* Bits 15.. 2:	reserved */
+	GMLC_RST_CLR	= 1<<1,	/* Clear GMAC Link Reset */
+	GMLC_RST_SET	= 1<<0,	/* Set   GMAC Link Reset */
+};
+
+
+/*	WOL_CTRL_STAT	16 bit	WOL Control/Status Reg */
+enum {
+	WOL_CTL_LINK_CHG_OCC		= 1<<15,
+	WOL_CTL_MAGIC_PKT_OCC		= 1<<14,
+	WOL_CTL_PATTERN_OCC		= 1<<13,
+	WOL_CTL_CLEAR_RESULT		= 1<<12,
+	WOL_CTL_ENA_PME_ON_LINK_CHG	= 1<<11,
+	WOL_CTL_DIS_PME_ON_LINK_CHG	= 1<<10,
+	WOL_CTL_ENA_PME_ON_MAGIC_PKT	= 1<<9,
+	WOL_CTL_DIS_PME_ON_MAGIC_PKT	= 1<<8,
+	WOL_CTL_ENA_PME_ON_PATTERN	= 1<<7,
+	WOL_CTL_DIS_PME_ON_PATTERN	= 1<<6,
+	WOL_CTL_ENA_LINK_CHG_UNIT	= 1<<5,
+	WOL_CTL_DIS_LINK_CHG_UNIT	= 1<<4,
+	WOL_CTL_ENA_MAGIC_PKT_UNIT	= 1<<3,
+	WOL_CTL_DIS_MAGIC_PKT_UNIT	= 1<<2,
+	WOL_CTL_ENA_PATTERN_UNIT	= 1<<1,
+	WOL_CTL_DIS_PATTERN_UNIT	= 1<<0,
+};
+
+
+/* Control flags */
+enum {
+	UDPTCP	= 1<<0,
+	CALSUM	= 1<<1,
+	WR_SUM	= 1<<2,
+	INIT_SUM= 1<<3,
+	LOCK_SUM= 1<<4,
+	INS_VLAN= 1<<5,
+	EOP	= 1<<7,
+};
+
+enum {
+	HW_OWNER 	= 1<<7,
+	OP_TCPWRITE	= 0x11,
+	OP_TCPSTART	= 0x12,
+	OP_TCPINIT	= 0x14,
+	OP_TCPLCK	= 0x18,
+	OP_TCPCHKSUM	= OP_TCPSTART,
+	OP_TCPIS	= OP_TCPINIT | OP_TCPSTART,
+	OP_TCPLW	= OP_TCPLCK | OP_TCPWRITE,
+	OP_TCPLSW	= OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
+	OP_TCPLISW	= OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
+
+	OP_ADDR64	= 0x21,
+	OP_VLAN		= 0x22,
+	OP_ADDR64VLAN	= OP_ADDR64 | OP_VLAN,
+	OP_LRGLEN	= 0x24,
+	OP_LRGLENVLAN	= OP_LRGLEN | OP_VLAN,
+	OP_MSS		= 0x28,
+	OP_MSSVLAN	= OP_MSS | OP_VLAN,
+
+	OP_BUFFER	= 0x40,
+	OP_PACKET	= 0x41,
+	OP_LARGESEND	= 0x43,
+	OP_LSOV2	= 0x45,
+
+/* YUKON-2 STATUS opcodes defines */
+	OP_RXSTAT	= 0x60,
+	OP_RXTIMESTAMP	= 0x61,
+	OP_RXVLAN	= 0x62,
+	OP_RXCHKS	= 0x64,
+	OP_RXCHKSVLAN	= OP_RXCHKS | OP_RXVLAN,
+	OP_RXTIMEVLAN	= OP_RXTIMESTAMP | OP_RXVLAN,
+	OP_RSS_HASH	= 0x65,
+	OP_TXINDEXLE	= 0x68,
+	OP_MACSEC	= 0x6c,
+	OP_PUTIDX	= 0x70,
+};
+
+enum status_css {
+	CSS_TCPUDPCSOK	= 1<<7,	/* TCP / UDP checksum is ok */
+	CSS_ISUDP	= 1<<6, /* packet is a UDP packet */
+	CSS_ISTCP	= 1<<5, /* packet is a TCP packet */
+	CSS_ISIPFRAG	= 1<<4, /* packet is a TCP/UDP frag, CS calc not done */
+	CSS_ISIPV6	= 1<<3, /* packet is a IPv6 packet */
+	CSS_IPV4CSUMOK	= 1<<2, /* IP v4: TCP header checksum is ok */
+	CSS_ISIPV4	= 1<<1, /* packet is a IPv4 packet */
+	CSS_LINK_BIT	= 1<<0, /* port number (legacy) */
+};
+
+/* Yukon 2 hardware interface */
+struct sky2_tx_le {
+	__le32	addr;
+	__le16	length;	/* also vlan tag or checksum start */
+	u8	ctrl;
+	u8	opcode;
+} __packed;
+
+struct sky2_rx_le {
+	__le32	addr;
+	__le16	length;
+	u8	ctrl;
+	u8	opcode;
+} __packed;
+
+struct sky2_status_le {
+	__le32	status;	/* also checksum */
+	__le16	length;	/* also vlan tag */
+	u8	css;
+	u8	opcode;
+} __packed;
+
+struct tx_ring_info {
+	struct sk_buff	*skb;
+	unsigned long flags;
+#define TX_MAP_SINGLE   0x0001
+#define TX_MAP_PAGE     0x0002
+	DEFINE_DMA_UNMAP_ADDR(mapaddr);
+	DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct rx_ring_info {
+	struct sk_buff	*skb;
+	dma_addr_t	data_addr;
+	DEFINE_DMA_UNMAP_LEN(data_size);
+	dma_addr_t	frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
+};
+
+enum flow_control {
+	FC_NONE	= 0,
+	FC_TX	= 1,
+	FC_RX	= 2,
+	FC_BOTH	= 3,
+};
+
+struct sky2_stats {
+	struct u64_stats_sync syncp;
+	u64		packets;
+	u64		bytes;
+};
+
+struct sky2_port {
+	struct sky2_hw	     *hw;
+	struct net_device    *netdev;
+	unsigned	     port;
+	u32		     msg_enable;
+	spinlock_t	     phy_lock;
+
+	struct tx_ring_info  *tx_ring;
+	struct sky2_tx_le    *tx_le;
+	struct sky2_stats    tx_stats;
+
+	u16		     tx_ring_size;
+	u16		     tx_cons;		/* next le to check */
+	u16		     tx_prod;		/* next le to use */
+	u16		     tx_next;		/* debug only */
+
+	u16		     tx_pending;
+	u16		     tx_last_mss;
+	u32		     tx_last_upper;
+	u32		     tx_tcpsum;
+
+	struct rx_ring_info  *rx_ring ____cacheline_aligned_in_smp;
+	struct sky2_rx_le    *rx_le;
+	struct sky2_stats    rx_stats;
+
+	u16		     rx_next;		/* next re to check */
+	u16		     rx_put;		/* next le index to use */
+	u16		     rx_pending;
+	u16		     rx_data_size;
+	u16		     rx_nfrags;
+
+	struct {
+		unsigned long last;
+		u32	mac_rp;
+		u8	mac_lev;
+		u8	fifo_rp;
+		u8	fifo_lev;
+	} check;
+
+	dma_addr_t	     rx_le_map;
+	dma_addr_t	     tx_le_map;
+
+	u16		     advertising;	/* ADVERTISED_ bits */
+	u16		     speed;		/* SPEED_1000, SPEED_100, ... */
+	u8		     wol;		/* WAKE_ bits */
+	u8		     duplex;		/* DUPLEX_HALF, DUPLEX_FULL */
+	u16		     flags;
+#define SKY2_FLAG_AUTO_SPEED		0x0002
+#define SKY2_FLAG_AUTO_PAUSE		0x0004
+
+ 	enum flow_control    flow_mode;
+ 	enum flow_control    flow_status;
+
+#ifdef CONFIG_SKY2_DEBUG
+	struct dentry	     *debugfs;
+#endif
+};
+
+struct sky2_hw {
+	void __iomem  	     *regs;
+	struct pci_dev	     *pdev;
+	struct napi_struct   napi;
+	struct net_device    *dev[2];
+	unsigned long	     flags;
+#define SKY2_HW_USE_MSI		0x00000001
+#define SKY2_HW_FIBRE_PHY	0x00000002
+#define SKY2_HW_GIGABIT		0x00000004
+#define SKY2_HW_NEWER_PHY	0x00000008
+#define SKY2_HW_RAM_BUFFER	0x00000010
+#define SKY2_HW_NEW_LE		0x00000020	/* new LSOv2 format */
+#define SKY2_HW_AUTO_TX_SUM	0x00000040	/* new IP decode for Tx */
+#define SKY2_HW_ADV_POWER_CTL	0x00000080	/* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN	0x00000100
+#define SKY2_HW_VLAN_BROKEN     0x00000200
+#define SKY2_HW_RSS_CHKSUM	0x00000400	/* RSS requires chksum */
+#define SKY2_HW_IRQ_SETUP	0x00000800
+
+	u8	     	     chip_id;
+	u8		     chip_rev;
+	u8		     pmd_type;
+	u8		     ports;
+
+	struct sky2_status_le *st_le;
+	u32		     st_size;
+	u32		     st_idx;
+	dma_addr_t   	     st_dma;
+
+	struct timer_list    watchdog_timer;
+	struct work_struct   restart_work;
+	wait_queue_head_t    msi_wait;
+
+	char		     irq_name[0];
+};
+
+static inline int sky2_is_copper(const struct sky2_hw *hw)
+{
+	return !(hw->flags & SKY2_HW_FIBRE_PHY);
+}
+
+/* Register accessor for memory mapped device */
+static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
+{
+	return readl(hw->regs + reg);
+}
+
+static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
+{
+	return readw(hw->regs + reg);
+}
+
+static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
+{
+	return readb(hw->regs + reg);
+}
+
+static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
+{
+	writel(val, hw->regs + reg);
+}
+
+static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
+{
+	writew(val, hw->regs + reg);
+}
+
+static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
+{
+	writeb(val, hw->regs + reg);
+}
+
+/* Yukon PHY related registers */
+#define SK_GMAC_REG(port,reg) \
+	(BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+#define GM_PHY_RETRIES	100
+
+static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	return sky2_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	unsigned base = SK_GMAC_REG(port, reg);
+	return (u32) sky2_read16(hw, base)
+		| (u32) sky2_read16(hw, base+4) << 16;
+}
+
+static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	unsigned base = SK_GMAC_REG(port, reg);
+
+	return (u64) sky2_read16(hw, base)
+		| (u64) sky2_read16(hw, base+4) << 16
+		| (u64) sky2_read16(hw, base+8) << 32
+		| (u64) sky2_read16(hw, base+12) << 48;
+}
+
+/* There is no way to atomically read32 bit values from PHY, so retry */
+static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	u32 val;
+
+	do {
+		val = gma_read32(hw, port, reg);
+	} while (gma_read32(hw, port, reg) != val);
+
+	return val;
+}
+
+static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	u64 val;
+
+	do {
+		val = gma_read64(hw, port, reg);
+	} while (gma_read64(hw, port, reg) != val);
+
+	return val;
+}
+
+static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
+{
+	sky2_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
+				    const u8 *addr)
+{
+	gma_write16(hw, port, reg,  (u16) addr[0] | ((u16) addr[1] << 8));
+	gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+	gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+
+/* PCI config space access */
+static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
+{
+	return sky2_read32(hw, Y2_CFG_SPC + reg);
+}
+
+static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
+{
+	return sky2_read16(hw, Y2_CFG_SPC + reg);
+}
+
+static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
+{
+	sky2_write32(hw, Y2_CFG_SPC + reg, val);
+}
+
+static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
+{
+	sky2_write16(hw, Y2_CFG_SPC + reg, val);
+}
+#endif