dpdk: remove old patches
Change-Id: I31244207ca5420558c6ff00b2021126ff5628e08
Signed-off-by: Damjan Marion <damarion@cisco.com>
diff --git a/dpdk/Makefile b/dpdk/Makefile
index 5d072e5..2f5037d 100644
--- a/dpdk/Makefile
+++ b/dpdk/Makefile
@@ -28,7 +28,6 @@
DPDK_BASE_URL ?= http://fast.dpdk.org/rel
DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz
DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL)
-DPDK_16.04_TARBALL_MD5_CKSUM := d1f82e7d7589b3b2f623c155442b8306
DPDK_16.07_TARBALL_MD5_CKSUM := 690a2bb570103e58d12f9806e8bf21be
DPDK_16.11_TARBALL_MD5_CKSUM := 06c1c577795360719d0b4fafaeee21e9
DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION)
diff --git a/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch
deleted file mode 100644
index 044a417..0000000
--- a/dpdk/dpdk-16.04_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From c085c9f9a7332c63d002169581edc89ef99fdbb1 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 03:21:21 +0100
-Subject: [PATCH 1/6] e1000: Set VLAN Rx Offload tag correctly
-
----
- drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++
- lib/librte_ether/rte_ether.h | 3 +++
- 2 files changed, 33 insertions(+)
-
-diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
-index 4a987e3..d6a4ce5 100644
---- a/drivers/net/e1000/igb_rxtx.c
-+++ b/drivers/net/e1000/igb_rxtx.c
-@@ -904,6 +904,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-+ {
-+ /*
-+ * Check packet for VLAN ethernet types and set
-+ * RX Offload flag PKT_RX_VLAN_PKT accordingly.
-+ */
-+ struct ether_hdr *eth_hdr =
-+ rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-+
-+ if ((eth_type == ETHER_TYPE_VLAN) ||
-+ (eth_type == ETHER_TYPE_VLAN_AD) ||
-+ (eth_type == ETHER_TYPE_VLAN_9100) ||
-+ (eth_type == ETHER_TYPE_VLAN_9200))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ }
- rxm->ol_flags = pkt_flags;
- rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
- lo_dword.hs_rss.pkt_info);
-@@ -1140,6 +1155,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-+ {
-+ /*
-+ * Check packet for VLAN ethernet types and set
-+ * RX Offload flag PKT_RX_VLAN_PKT accordingly.
-+ */
-+ struct ether_hdr *eth_hdr =
-+ rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-+
-+ if ((eth_type == ETHER_TYPE_VLAN) ||
-+ (eth_type == ETHER_TYPE_VLAN_AD) ||
-+ (eth_type == ETHER_TYPE_VLAN_9100) ||
-+ (eth_type == ETHER_TYPE_VLAN_9200))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ }
- first_seg->ol_flags = pkt_flags;
- first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
- lower.lo_dword.hs_rss.pkt_info);
-diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
-index 1d62d8e..341121a 100644
---- a/lib/librte_ether/rte_ether.h
-+++ b/lib/librte_ether/rte_ether.h
-@@ -332,6 +332,9 @@ struct vxlan_hdr {
- #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
- #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
- #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
-+#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */
-+#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */
-+#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */
-
- #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
- /**< VXLAN tunnel header length. */
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch
deleted file mode 100644
index 4b38546..0000000
--- a/dpdk/dpdk-16.04_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 8e1be5044b5ee29c8cb3921051fb6d0722b60651 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 03:22:11 +0100
-Subject: [PATCH 2/6] ixgbe: Wait a bit longer for autonegotiation to leave
-
----
- drivers/net/ixgbe/base/ixgbe_82599.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
-index 154c1f1..817a8b5 100644
---- a/drivers/net/ixgbe/base/ixgbe_82599.c
-+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
-@@ -2470,7 +2470,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
- /* Wait for AN to leave state 0 */
-- for (i = 0; i < 10; i++) {
-+ for (i = 0; i < 50; i++) {
- msec_delay(4);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch b/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch
deleted file mode 100644
index 8c53d0f..0000000
--- a/dpdk/dpdk-16.04_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From 1ee05e874eaa3f03ee7b5fbd6a32dff7304bd620 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 03:29:22 +0100
-Subject: [PATCH 3/6] virtio: Cleanup virtio pmd debug log output, reset
-
----
- drivers/net/virtio/virtio_ethdev.c | 5 -----
- drivers/net/virtio/virtio_rxtx.c | 4 +++-
- 2 files changed, 3 insertions(+), 6 deletions(-)
-
-diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
-index 63a368a..ed4e757 100644
---- a/drivers/net/virtio/virtio_ethdev.c
-+++ b/drivers/net/virtio/virtio_ethdev.c
-@@ -1405,18 +1405,13 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
- link.link_speed = SPEED_10G;
-
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
-- PMD_INIT_LOG(DEBUG, "Get link status from hw");
- vtpci_read_dev_config(hw,
- offsetof(struct virtio_net_config, status),
- &status, sizeof(status));
- if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
- link.link_status = ETH_LINK_DOWN;
-- PMD_INIT_LOG(DEBUG, "Port %d is down",
-- dev->data->port_id);
- } else {
- link.link_status = ETH_LINK_UP;
-- PMD_INIT_LOG(DEBUG, "Port %d is up",
-- dev->data->port_id);
- }
- } else {
- link.link_status = ETH_LINK_UP;
-diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
-index ef21d8e..7fe14ad 100644
---- a/drivers/net/virtio/virtio_rxtx.c
-+++ b/drivers/net/virtio/virtio_rxtx.c
-@@ -643,6 +643,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->next = NULL;
- rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
- rxm->data_len = (uint16_t)(len[i] - hdr_size);
-+ rxm->ol_flags = 0;
-
- if (hw->vlan_strip)
- rte_vlan_strip(rxm);
-@@ -760,6 +761,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
- rxm->vlan_tci = 0;
- rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
- rxm->data_len = (uint16_t)(len[0] - hdr_size);
-+ rxm->ol_flags = 0;
-
- rxm->port = rxvq->port_id;
- rx_pkts[nb_rx] = rxm;
-@@ -863,7 +865,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
- if (unlikely(nb_pkts < 1))
- return nb_pkts;
-
-- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
-+ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(txvq);
-
- virtio_rmb();
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch
deleted file mode 100644
index 78d0c63..0000000
--- a/dpdk/dpdk-16.04_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From eed80f56477e26a5711ea3749d1881797b3c82a5 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 04:25:23 +0100
-Subject: [PATCH 4/6] mbuf: rearrange rte_mbuf metadata to suit vpp
-
----
- .../linuxapp/eal/include/exec-env/rte_kni_common.h | 5 +++--
- lib/librte_mbuf/rte_mbuf.h | 20 ++++++++++++--------
- 2 files changed, 15 insertions(+), 10 deletions(-)
-
-diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-index 7e5e598..fdbeb4a 100644
---- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-@@ -118,11 +118,12 @@ struct rte_kni_mbuf {
- char pad2[4];
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
-+ char pad3[8];
-+ void *next;
-
- /* fields on second cache line */
-- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE)));
-+ char pad4[16] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE)));
- void *pool;
-- void *next;
- };
-
- /*
-diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
-index 75a227d..ca4d0fb 100644
---- a/lib/librte_mbuf/rte_mbuf.h
-+++ b/lib/librte_mbuf/rte_mbuf.h
-@@ -731,6 +731,12 @@ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
- /**
- * The generic rte_mbuf, containing a packet mbuf.
- */
-+/*
-+ * offload in the second cache line, next in the first. Better for vpp
-+ * at least as of right now.
-+ * If you change this structure, you must change the user-mode
-+ * version in rte_mbuf.h
-+ */
- struct rte_mbuf {
- MARKER cacheline0;
-
-@@ -783,6 +789,12 @@ struct rte_mbuf {
- uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
- uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
-+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
-+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
-+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
-+
-+ /* second cache line - fields only used in slow path or on TX */
-+ MARKER cacheline1 __rte_cache_min_aligned;
-
- union {
- uint32_t rss; /**< RSS hash result if RSS enabled */
-@@ -806,20 +818,12 @@ struct rte_mbuf {
- uint32_t usr; /**< User defined tags. See rte_distributor_process() */
- } hash; /**< hash information */
-
-- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
--
-- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
--
-- /* second cache line - fields only used in slow path or on TX */
-- MARKER cacheline1 __rte_cache_min_aligned;
--
- union {
- void *userdata; /**< Can be used for external metadata */
- uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
- };
-
- struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
-- struct rte_mbuf *next; /**< Next segment of scattered packet. */
-
- /* fields to support TX offloads */
- union {
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch b/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch
deleted file mode 100644
index 8a32f60..0000000
--- a/dpdk/dpdk-16.04_patches/0005-Allow-applications-to-override-rte_delay_us.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 3432c140c9c51e671a4d58bb428d5852426add1f Mon Sep 17 00:00:00 2001
-From: "Todd Foggoa (tfoggoa)" <tfoggoa@cisco.com>
-Date: Wed, 3 Feb 2016 08:35:27 -0800
-Subject: [PATCH 5/6] Allow applications to override rte_delay_us()
-
-Some applications may wish to define their own implentation of
-usec delay other than the existing blocking one. The default
-behavior remains unchanged.
-
-Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
----
- lib/librte_eal/common/eal_common_timer.c | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
-diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c
-index c4227cd..cc26b91 100644
---- a/lib/librte_eal/common/eal_common_timer.c
-+++ b/lib/librte_eal/common/eal_common_timer.c
-@@ -47,9 +47,21 @@
- /* The frequency of the RDTSC timer resolution */
- static uint64_t eal_tsc_resolution_hz;
-
-+/* Allow an override of the rte_delay_us function */
-+int rte_delay_us_override (unsigned us) __attribute__((weak));
-+
-+int
-+rte_delay_us_override(__attribute__((unused)) unsigned us)
-+{
-+ return 0;
-+}
-+
- void
- rte_delay_us(unsigned us)
- {
-+ if (rte_delay_us_override(us))
-+ return;
-+
- const uint64_t start = rte_get_timer_cycles();
- const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
- while ((rte_get_timer_cycles() - start) < ticks)
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch b/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch
deleted file mode 100644
index 2241522..0000000
--- a/dpdk/dpdk-16.04_patches/0006-Temporarily-disable-unthrottled-log-message.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From 454e25ed57c17ec18ee76ead4a75f9abdf579608 Mon Sep 17 00:00:00 2001
-From: Dave Barach <dave@barachs.net>
-Date: Tue, 9 Feb 2016 10:22:39 -0500
-Subject: [PATCH 6/6] Temporarily disable unthrottled log message.
-
-Signed-off-by: Dave Barach <dave@barachs.net>
----
- lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-index 06b26a9..8d918a4 100644
---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-@@ -711,6 +711,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
- if (errno == EINTR || errno == EWOULDBLOCK)
- continue;
-
-+ /* $$$ disable to avoid filling /var/log */
-+ if (0)
- RTE_LOG(ERR, EAL, "Error reading from file "
- "descriptor %d: %s\n",
- events[n].data.fd,
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch b/dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch
deleted file mode 100644
index e938c7c..0000000
--- a/dpdk/dpdk-16.04_patches/0007-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From a1020e16640e3b5d8cf32ef7d2914b788a1e06f6 Mon Sep 17 00:00:00 2001
-From: Bud Grise <griseb@cisco.com>
-Date: Tue, 2 Feb 2016 12:45:44 -0800
-Subject: [PATCH 7/8] Fix a crash in igb_uio driver when the device is removed.
-
-This crash happens because the device still has MSI configured,
-the fix is to free the IRQ.
-
-Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
----
- lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-index 72b2692..bf12391 100644
---- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-@@ -506,8 +506,15 @@ static void
- igbuio_pci_remove(struct pci_dev *dev)
- {
- struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
-+ struct uio_info *info = pci_get_drvdata(dev);
-
- sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
-+
-+ if (info->irq && (info->irq != UIO_IRQ_CUSTOM)){
-+ free_irq(info->irq, info->uio_dev);
-+ info->irq = UIO_IRQ_NONE;
-+ }
-+
- uio_unregister_device(&udev->info);
- igbuio_pci_release_iomem(&udev->info);
- if (udev->mode == RTE_INTR_MODE_MSIX)
---
-2.5.4 (Apple Git-61)
-
diff --git a/dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch b/dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch
deleted file mode 100644
index f2ded92..0000000
--- a/dpdk/dpdk-16.04_patches/0008-Add-missing-init-of-packet_type-field.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From dfb597dfb4c8e36edb4f1db0162a12f9e0d9e695 Mon Sep 17 00:00:00 2001
-From: Bud Grise <griseb@cisco.com>
-Date: Mon, 1 Feb 2016 14:28:01 -0500
-Subject: [PATCH 8/8] Add missing init of packet_type field.
-
-This can cause packets to be mishandled in systems with more than
-one type of driver in use.
-
-Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
----
- drivers/net/e1000/em_rxtx.c | 2 ++
- drivers/net/virtio/virtio_rxtx.c | 2 ++
- drivers/net/vmxnet3/vmxnet3_rxtx.c | 1 +
- 3 files changed, 5 insertions(+)
-
-diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
-index 441ccad..3bac431 100644
---- a/drivers/net/e1000/em_rxtx.c
-+++ b/drivers/net/e1000/em_rxtx.c
-@@ -793,6 +793,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
- rxm->ol_flags = rxm->ol_flags |
- rx_desc_error_to_pkt_flags(rxd.errors);
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
-@@ -1019,6 +1020,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
- first_seg->ol_flags = first_seg->ol_flags |
- rx_desc_error_to_pkt_flags(rxd.errors);
-+ first_seg->packet_type = RTE_PTYPE_UNKNOWN;
-
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
-diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
-index 7fe14ad..4959d8f 100644
---- a/drivers/net/virtio/virtio_rxtx.c
-+++ b/drivers/net/virtio/virtio_rxtx.c
-@@ -644,6 +644,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
- rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->ol_flags = 0;
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- if (hw->vlan_strip)
- rte_vlan_strip(rxm);
-@@ -762,6 +763,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
- rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
- rxm->data_len = (uint16_t)(len[0] - hdr_size);
- rxm->ol_flags = 0;
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- rxm->port = rxvq->port_id;
- rx_pkts[nb_rx] = rxm;
-diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
-index 4ac0456..d26d2a0 100644
---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
-+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
-@@ -701,6 +701,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxm->ol_flags = 0;
- rxm->vlan_tci = 0;
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- /*
- * If this is the first buffer of the received packet,
---
-2.5.4 (Apple Git-61)
-
diff --git a/dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch b/dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch
deleted file mode 100644
index b7a5e57..0000000
--- a/dpdk/dpdk-16.04_patches/0009-enic-fix-imissed-to-count-drops-due-to-lack-of-RX-bu.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From b41648c53981a534069a8ce1b75f189ba83e24c8 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 26 Apr 2016 13:30:50 -0700
-Subject: [PATCH 09/17] enic: fix 'imissed' to count drops due to lack of RX
- buffers
-
-Fixes: 7182d3e7d177 ("enic: expose Rx missed packets counter")
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index e3da51d..06cacd4 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -243,10 +243,10 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
- r_stats->ibytes = stats->rx.rx_bytes_ok;
- r_stats->obytes = stats->tx.tx_bytes_ok;
-
-- r_stats->ierrors = stats->rx.rx_errors;
-+ r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
- r_stats->oerrors = stats->tx.tx_errors;
-
-- r_stats->imissed = stats->rx.rx_drop;
-+ r_stats->imissed = stats->rx.rx_no_bufs;
-
- r_stats->imcasts = stats->rx.rx_multicast_frames_ok;
- r_stats->rx_nombuf = stats->rx.rx_no_bufs;
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch b/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch
deleted file mode 100644
index 2ce0e7c..0000000
--- a/dpdk/dpdk-16.04_patches/0010-Fix-O0-clang-build.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 2b82c248638bba6e98ecf388c6e0b1f5f0b44028 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Tue, 26 Apr 2016 12:36:52 +0200
-Subject: [PATCH] Fix -O0 clang build
-
-Signed-off-by: Damjan Marion <damarion@cisco.com>
----
- lib/librte_eal/common/include/arch/x86/rte_rtm.h | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
-index d935641..30c1969 100644
---- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h
-+++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
-@@ -50,11 +50,14 @@ void rte_xend(void)
- asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
- }
-
-+#define rte_xabort(x) asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (x) : "memory")
-+#if 0
- static __attribute__((__always_inline__)) inline
- void rte_xabort(const unsigned int status)
- {
- asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory");
- }
-+#endif
-
- static __attribute__((__always_inline__)) inline
- int rte_xtest(void)
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch b/dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch
deleted file mode 100644
index 0d4267b..0000000
--- a/dpdk/dpdk-16.04_patches/0011-enic-fix-misalignment-of-Rx-mbuf-data.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From d91c4e2de969086ebc8c3a1dfa30913ea3de37b4 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Mon, 25 Apr 2016 16:24:53 -0700
-Subject: [PATCH 11/17] enic: fix misalignment of Rx mbuf data
-
-Data DMA used m->data_off of uninitialized mbufs instead of
-RTE_PKTMBUF_HEADROOM, potentially causing Rx data to be
-placed at the wrong alignment in the mbuf.
-
-Fixes: 947d860c821f ("enic: improve Rx performance")
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 5 +++--
- drivers/net/enic/enic_rx.c | 6 ++++--
- 2 files changed, 7 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 06cacd4..b164307 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -355,10 +355,11 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- return -ENOMEM;
- }
-
-- dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off);
-+ dma_addr = (dma_addr_t)(mb->buf_physaddr
-+ + RTE_PKTMBUF_HEADROOM);
-
- rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
-- mb->buf_len);
-+ mb->buf_len - RTE_PKTMBUF_HEADROOM);
- rq->mbuf_ring[i] = mb;
- }
-
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-index 232987a..39bb55c 100644
---- a/drivers/net/enic/enic_rx.c
-+++ b/drivers/net/enic/enic_rx.c
-@@ -314,9 +314,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- + rx_id);
-
- /* Push descriptor for newly allocated mbuf */
-- dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
-+ dma_addr = (dma_addr_t)(nmb->buf_physaddr
-+ + RTE_PKTMBUF_HEADROOM);
- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
-+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-+ - RTE_PKTMBUF_HEADROOM);
-
- /* Fill in the rest of the mbuf */
- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch b/dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch
deleted file mode 100644
index 7acead4..0000000
--- a/dpdk/dpdk-16.04_patches/0012-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch
+++ /dev/null
@@ -1,1844 +0,0 @@
-From 4e1872a43b3ad824e37f840c9ed1e0c1f1b24a32 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 5 Apr 2016 15:19:06 -0700
-Subject: [PATCH 12/17] enic: Optimization of Tx path to reduce Host CPU
- overhead, cleanup
-
-Optimizations and cleanup:
-- flatten packet send path
-- flatten mbuf free path
-- disable CQ entry writing and use CQ messages instead
-- use rte_mempool_put_bulk() to bulk return freed mbufs
-- remove unnecessary fields vnic_bufs struct, use contiguous array of cache
- aligned divisible elements. No next pointers.
-- use local variables inside per packet loop instead of fields in structs.
-- factor book keeping out of the per packet tx loop where possible
- (removed several conditionals)
-- put Tx and Rx code in 1 file (enic_rxtx.c)
-
-Reviewed-by: Nelson Escobar <neescoba@cisco.com>
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/Makefile | 2 +-
- drivers/net/enic/base/enic_vnic_wq.h | 79 ------
- drivers/net/enic/base/vnic_cq.h | 37 +--
- drivers/net/enic/base/vnic_rq.h | 2 +-
- drivers/net/enic/base/vnic_wq.c | 89 +++---
- drivers/net/enic/base/vnic_wq.h | 113 +-------
- drivers/net/enic/enic.h | 27 +-
- drivers/net/enic/enic_ethdev.c | 67 +----
- drivers/net/enic/enic_main.c | 132 +++------
- drivers/net/enic/enic_res.h | 81 +-----
- drivers/net/enic/enic_rx.c | 361 -------------------------
- drivers/net/enic/enic_rxtx.c | 505 +++++++++++++++++++++++++++++++++++
- 12 files changed, 635 insertions(+), 860 deletions(-)
- delete mode 100644 drivers/net/enic/base/enic_vnic_wq.h
- delete mode 100644 drivers/net/enic/enic_rx.c
- create mode 100644 drivers/net/enic/enic_rxtx.c
-
-diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
-index f316274..3926b79 100644
---- a/drivers/net/enic/Makefile
-+++ b/drivers/net/enic/Makefile
-@@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src
- #
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
--SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
-+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
-diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h
-deleted file mode 100644
-index b019109..0000000
---- a/drivers/net/enic/base/enic_vnic_wq.h
-+++ /dev/null
-@@ -1,79 +0,0 @@
--/*
-- * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
-- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-- *
-- * Copyright (c) 2015, Cisco Systems, Inc.
-- * All rights reserved.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions
-- * are met:
-- *
-- * 1. Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- *
-- * 2. Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in
-- * the documentation and/or other materials provided with the
-- * distribution.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- *
-- */
--
--#ifndef _ENIC_VNIC_WQ_H_
--#define _ENIC_VNIC_WQ_H_
--
--#include "vnic_dev.h"
--#include "vnic_cq.h"
--
--static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
--{
-- struct vnic_wq_buf *buf = wq->to_use;
--
-- /* Adding write memory barrier prevents compiler and/or CPU
-- * reordering, thus avoiding descriptor posting before
-- * descriptor is initialized. Otherwise, hardware can read
-- * stale descriptor fields.
-- */
-- wmb();
-- iowrite32(buf->index, &wq->ctrl->posted_index);
--}
--
--static inline void enic_vnic_post_wq(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr,
-- unsigned int len, int sop,
-- uint8_t desc_skip_cnt, uint8_t cq_entry,
-- uint8_t compressed_send, uint64_t wrid)
--{
-- struct vnic_wq_buf *buf = wq->to_use;
--
-- buf->sop = sop;
-- buf->cq_entry = cq_entry;
-- buf->compressed_send = compressed_send;
-- buf->desc_skip_cnt = desc_skip_cnt;
-- buf->os_buf = os_buf;
-- buf->dma_addr = dma_addr;
-- buf->len = len;
-- buf->wr_id = wrid;
--
-- buf = buf->next;
-- wq->ring.desc_avail -= desc_skip_cnt;
-- wq->to_use = buf;
--
-- if (cq_entry)
-- enic_vnic_post_wq_index(wq);
--}
--
--#endif /* _ENIC_VNIC_WQ_H_ */
-diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h
-index 922391b..ffc1aaa 100644
---- a/drivers/net/enic/base/vnic_cq.h
-+++ b/drivers/net/enic/base/vnic_cq.h
-@@ -96,41 +96,46 @@ static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
- {
-- struct cq_desc *cq_desc;
-+ struct cq_desc *cq_desc, *cq_desc_last;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
-- u8 type, color;
-- struct rte_mbuf **rx_pkts = opaque;
-- unsigned int ret;
-+ u8 type, color, type_color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
-- cq_desc_dec(cq_desc, &type, &color,
-- &q_number, &completed_index);
-+
-+ type_color = cq_desc->type_color;
-+ color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-+ if (color == cq->last_color)
-+ return 0;
-
- while (color != cq->last_color) {
-- if (opaque)
-- opaque = (void *)&(rx_pkts[work_done]);
-+ cq_desc_last = cq_desc;
-
-- ret = (*q_service)(cq->vdev, cq_desc, type,
-- q_number, completed_index, opaque);
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
-+ work_done++;
-+ if (work_done >= work_to_do)
-+ break;
-+
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
-- cq_desc_dec(cq_desc, &type, &color,
-- &q_number, &completed_index);
-
-- if (ret)
-- work_done++;
-- if (work_done >= work_to_do)
-- break;
-+ type_color = cq_desc->type_color;
-+ color = (type_color >> CQ_DESC_COLOR_SHIFT)
-+ & CQ_DESC_COLOR_MASK;
-+
- }
-
-+ cq_desc_dec(cq_desc_last, &type, &color,
-+ &q_number, &completed_index);
-+
-+ (*q_service)(cq->vdev, cq_desc, type,
-+ q_number, completed_index, opaque);
- return work_done;
- }
-
-diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
-index e083ccc..424415c 100644
---- a/drivers/net/enic/base/vnic_rq.h
-+++ b/drivers/net/enic/base/vnic_rq.h
-@@ -74,7 +74,7 @@ struct vnic_rq {
- struct vnic_dev_ring ring;
- struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
- unsigned int mbuf_next_idx; /* next mb to consume */
-- void *os_buf_head;
-+ void *mb_head;
- unsigned int pkts_outstanding;
- uint16_t rx_nb_hold;
- uint16_t rx_free_thresh;
-diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
-index a3ef417..ccbbd61 100644
---- a/drivers/net/enic/base/vnic_wq.c
-+++ b/drivers/net/enic/base/vnic_wq.c
-@@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
-
- static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
- {
-- struct vnic_wq_buf *buf;
-- unsigned int i, j, count = wq->ring.desc_count;
-- unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
--
-- for (i = 0; i < blks; i++) {
-- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
-- if (!wq->bufs[i])
-- return -ENOMEM;
-- }
--
-- for (i = 0; i < blks; i++) {
-- buf = wq->bufs[i];
-- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
-- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
-- buf->desc = (u8 *)wq->ring.descs +
-- wq->ring.desc_size * buf->index;
-- if (buf->index + 1 == count) {
-- buf->next = wq->bufs[0];
-- break;
-- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
-- buf->next = wq->bufs[i + 1];
-- } else {
-- buf->next = buf + 1;
-- buf++;
-- }
-- }
-- }
--
-- wq->to_use = wq->to_clean = wq->bufs[0];
--
-+ unsigned int count = wq->ring.desc_count;
-+ /* Allocate the mbuf ring */
-+ wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
-+ sizeof(struct vnic_wq_buf) * count,
-+ RTE_CACHE_LINE_SIZE, wq->socket_id);
-+ wq->head_idx = 0;
-+ wq->tail_idx = 0;
-+ if (wq->bufs == NULL)
-+ return -ENOMEM;
- return 0;
- }
-
- void vnic_wq_free(struct vnic_wq *wq)
- {
- struct vnic_dev *vdev;
-- unsigned int i;
-
- vdev = wq->vdev;
-
- vnic_dev_free_desc_ring(vdev, &wq->ring);
-
-- for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
-- if (wq->bufs[i]) {
-- kfree(wq->bufs[i]);
-- wq->bufs[i] = NULL;
-- }
-- }
--
-+ rte_free(wq->bufs);
- wq->ctrl = NULL;
- }
-
--int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
-- unsigned int desc_size)
--{
-- int mem_size = 0;
--
-- mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size);
--
-- mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) *
-- VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count);
--
-- return mem_size;
--}
--
-
- int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
- unsigned int desc_count, unsigned int desc_size)
-@@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
- iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
- iowrite32(0, &wq->ctrl->error_status);
-
-- wq->to_use = wq->to_clean =
-- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
-- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
-+ wq->head_idx = fetch_index;
-+ wq->tail_idx = wq->head_idx;
- }
-
- void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
-@@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
- vnic_wq_init_start(wq, cq_index, 0, 0,
- error_interrupt_enable,
- error_interrupt_offset);
-+ wq->last_completed_index = 0;
- }
-
- void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
-@@ -219,22 +178,34 @@ int vnic_wq_disable(struct vnic_wq *wq)
- return -ETIMEDOUT;
- }
-
-+static inline uint32_t
-+buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
-+{
-+ idx++;
-+ if (unlikely(idx == n_descriptors))
-+ idx = 0;
-+ return idx;
-+}
-+
- void vnic_wq_clean(struct vnic_wq *wq,
-- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
-+ void (*buf_clean)(struct vnic_wq_buf *buf))
- {
- struct vnic_wq_buf *buf;
-+ unsigned int to_clean = wq->tail_idx;
-
-- buf = wq->to_clean;
-+ buf = &wq->bufs[to_clean];
-
- while (vnic_wq_desc_used(wq) > 0) {
-
-- (*buf_clean)(wq, buf);
-+ (*buf_clean)(buf);
-+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
-
-- buf = wq->to_clean = buf->next;
-+ buf = &wq->bufs[to_clean];
- wq->ring.desc_avail++;
- }
-
-- wq->to_use = wq->to_clean = wq->bufs[0];
-+ wq->head_idx = 0;
-+ wq->tail_idx = 0;
-
- iowrite32(0, &wq->ctrl->fetch_index);
- iowrite32(0, &wq->ctrl->posted_index);
-diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
-index c23de62..37c3ff9 100644
---- a/drivers/net/enic/base/vnic_wq.h
-+++ b/drivers/net/enic/base/vnic_wq.h
-@@ -64,42 +64,23 @@ struct vnic_wq_ctrl {
- u32 pad9;
- };
-
-+/* 16 bytes */
- struct vnic_wq_buf {
-- struct vnic_wq_buf *next;
-- dma_addr_t dma_addr;
-- void *os_buf;
-- unsigned int len;
-- unsigned int index;
-- int sop;
-- void *desc;
-- uint64_t wr_id; /* Cookie */
-- uint8_t cq_entry; /* Gets completion event from hw */
-- uint8_t desc_skip_cnt; /* Num descs to occupy */
-- uint8_t compressed_send; /* Both hdr and payload in one desc */
-+ struct rte_mempool *pool;
-+ void *mb;
- };
-
--/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
--#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
--#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
--#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
-- ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
-- VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
--#define VNIC_WQ_BUF_BLK_SZ(entries) \
-- (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
--#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
-- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
--#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
--
- struct vnic_wq {
- unsigned int index;
- struct vnic_dev *vdev;
- struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
- struct vnic_dev_ring ring;
-- struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
-- struct vnic_wq_buf *to_use;
-- struct vnic_wq_buf *to_clean;
-- unsigned int pkts_outstanding;
-+ struct vnic_wq_buf *bufs;
-+ unsigned int head_idx;
-+ unsigned int tail_idx;
- unsigned int socket_id;
-+ const struct rte_memzone *cqmsg_rz;
-+ uint16_t last_completed_index;
- };
-
- static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
-@@ -114,11 +95,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
- return wq->ring.desc_count - wq->ring.desc_avail - 1;
- }
-
--static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
--{
-- return wq->to_use->desc;
--}
--
- #define PI_LOG2_CACHE_LINE_SIZE 5
- #define PI_INDEX_BITS 12
- #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
-@@ -191,75 +167,6 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
- PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
- }
-
--static inline void vnic_wq_post(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr,
-- unsigned int len, int sop, int eop,
-- uint8_t desc_skip_cnt, uint8_t cq_entry,
-- uint8_t compressed_send, uint64_t wrid)
--{
-- struct vnic_wq_buf *buf = wq->to_use;
--
-- buf->sop = sop;
-- buf->cq_entry = cq_entry;
-- buf->compressed_send = compressed_send;
-- buf->desc_skip_cnt = desc_skip_cnt;
-- buf->os_buf = os_buf;
-- buf->dma_addr = dma_addr;
-- buf->len = len;
-- buf->wr_id = wrid;
--
-- buf = buf->next;
-- if (eop) {
--#ifdef DO_PREFETCH
-- uint64_t wr = vnic_cached_posted_index(dma_addr, len,
-- buf->index);
--#endif
-- /* Adding write memory barrier prevents compiler and/or CPU
-- * reordering, thus avoiding descriptor posting before
-- * descriptor is initialized. Otherwise, hardware can read
-- * stale descriptor fields.
-- */
-- wmb();
--#ifdef DO_PREFETCH
-- /* Intel chipsets seem to limit the rate of PIOs that we can
-- * push on the bus. Thus, it is very important to do a single
-- * 64 bit write here. With two 32-bit writes, my maximum
-- * pkt/sec rate was cut almost in half. -AJF
-- */
-- iowrite64((uint64_t)wr, &wq->ctrl->posted_index);
--#else
-- iowrite32(buf->index, &wq->ctrl->posted_index);
--#endif
-- }
-- wq->to_use = buf;
--
-- wq->ring.desc_avail -= desc_skip_cnt;
--}
--
--static inline void vnic_wq_service(struct vnic_wq *wq,
-- struct cq_desc *cq_desc, u16 completed_index,
-- void (*buf_service)(struct vnic_wq *wq,
-- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
-- void *opaque)
--{
-- struct vnic_wq_buf *buf;
--
-- buf = wq->to_clean;
-- while (1) {
--
-- (*buf_service)(wq, cq_desc, buf, opaque);
--
-- wq->ring.desc_avail++;
--
-- wq->to_clean = buf->next;
--
-- if (buf->index == completed_index)
-- break;
--
-- buf = wq->to_clean;
-- }
--}
--
- void vnic_wq_free(struct vnic_wq *wq);
- int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
- unsigned int desc_count, unsigned int desc_size);
-@@ -275,8 +182,6 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
- void vnic_wq_enable(struct vnic_wq *wq);
- int vnic_wq_disable(struct vnic_wq *wq);
- void vnic_wq_clean(struct vnic_wq *wq,
-- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
--int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
-- unsigned int desc_size);
-+ void (*buf_clean)(struct vnic_wq_buf *buf));
-
- #endif /* _VNIC_WQ_H_ */
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 8c914f5..43b82a6 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -155,6 +155,30 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
- return (struct enic *)eth_dev->data->dev_private;
- }
-
-+static inline uint32_t
-+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-+{
-+ uint32_t d = i0 + i1;
-+ d -= (d >= n_descriptors) ? n_descriptors : 0;
-+ return d;
-+}
-+
-+static inline uint32_t
-+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-+{
-+ int32_t d = i1 - i0;
-+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
-+}
-+
-+static inline uint32_t
-+enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
-+{
-+ idx++;
-+ if (unlikely(idx == n_descriptors))
-+ idx = 0;
-+ return idx;
-+}
-+
- #define RTE_LIBRTE_ENIC_ASSERT_ENABLE
- #ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
- #define ASSERT(x) do { \
-@@ -209,5 +233,6 @@ extern int enic_clsf_init(struct enic *enic);
- extern void enic_clsf_destroy(struct enic *enic);
- uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
--
-+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-+ uint16_t nb_pkts);
- #endif /* _ENIC_H_ */
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index 6bea940..697ff82 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -519,71 +519,6 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui
- enic_del_mac_address(enic);
- }
-
--
--static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-- uint16_t nb_pkts)
--{
-- uint16_t index;
-- unsigned int frags;
-- unsigned int pkt_len;
-- unsigned int seg_len;
-- unsigned int inc_len;
-- unsigned int nb_segs;
-- struct rte_mbuf *tx_pkt, *next_tx_pkt;
-- struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
-- struct enic *enic = vnic_dev_priv(wq->vdev);
-- unsigned short vlan_id;
-- unsigned short ol_flags;
-- uint8_t last_seg, eop;
-- unsigned int host_tx_descs = 0;
--
-- for (index = 0; index < nb_pkts; index++) {
-- tx_pkt = *tx_pkts++;
-- inc_len = 0;
-- nb_segs = tx_pkt->nb_segs;
-- if (nb_segs > vnic_wq_desc_avail(wq)) {
-- if (index > 0)
-- enic_post_wq_index(wq);
--
-- /* wq cleanup and try again */
-- if (!enic_cleanup_wq(enic, wq) ||
-- (nb_segs > vnic_wq_desc_avail(wq))) {
-- return index;
-- }
-- }
--
-- pkt_len = tx_pkt->pkt_len;
-- vlan_id = tx_pkt->vlan_tci;
-- ol_flags = tx_pkt->ol_flags;
-- for (frags = 0; inc_len < pkt_len; frags++) {
-- if (!tx_pkt)
-- break;
-- next_tx_pkt = tx_pkt->next;
-- seg_len = tx_pkt->data_len;
-- inc_len += seg_len;
--
-- host_tx_descs++;
-- last_seg = 0;
-- eop = 0;
-- if ((pkt_len == inc_len) || !next_tx_pkt) {
-- eop = 1;
-- /* post if last packet in batch or > thresh */
-- if ((index == (nb_pkts - 1)) ||
-- (host_tx_descs > ENIC_TX_POST_THRESH)) {
-- last_seg = 1;
-- host_tx_descs = 0;
-- }
-- }
-- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
-- !frags, eop, last_seg, ol_flags, vlan_id);
-- tx_pkt = next_tx_pkt;
-- }
-- }
--
-- enic_cleanup_wq(enic, wq);
-- return index;
--}
--
- static const struct eth_dev_ops enicpmd_eth_dev_ops = {
- .dev_configure = enicpmd_dev_configure,
- .dev_start = enicpmd_dev_start,
-@@ -642,7 +577,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
- enic->rte_dev = eth_dev;
- eth_dev->dev_ops = &enicpmd_eth_dev_ops;
- eth_dev->rx_pkt_burst = &enic_recv_pkts;
-- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
-+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
-
- pdev = eth_dev->pci_dev;
- rte_eth_copy_pci_info(eth_dev, pdev);
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index b164307..9bfdec1 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -40,11 +40,11 @@
- #include <libgen.h>
-
- #include <rte_pci.h>
--#include <rte_memzone.h>
- #include <rte_malloc.h>
- #include <rte_mbuf.h>
- #include <rte_string_fns.h>
- #include <rte_ethdev.h>
-+#include <rte_memzone.h>
-
- #include "enic_compat.h"
- #include "enic.h"
-@@ -58,7 +58,6 @@
- #include "vnic_cq.h"
- #include "vnic_intr.h"
- #include "vnic_nic.h"
--#include "enic_vnic_wq.h"
-
- static inline struct rte_mbuf *
- rte_rxmbuf_alloc(struct rte_mempool *mp)
-@@ -109,38 +108,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
- }
- }
-
--
- void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
- {
- vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
- }
-
--static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
-+static void enic_free_wq_buf(struct vnic_wq_buf *buf)
- {
-- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
-+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
-
- rte_mempool_put(mbuf->pool, mbuf);
-- buf->os_buf = NULL;
--}
--
--static void enic_wq_free_buf(struct vnic_wq *wq,
-- __rte_unused struct cq_desc *cq_desc,
-- struct vnic_wq_buf *buf,
-- __rte_unused void *opaque)
--{
-- enic_free_wq_buf(wq, buf);
--}
--
--static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
--{
-- struct enic *enic = vnic_dev_priv(vdev);
--
-- vnic_wq_service(&enic->wq[q_number], cq_desc,
-- completed_index, enic_wq_free_buf,
-- opaque);
--
-- return 0;
-+ buf->mb = NULL;
- }
-
- static void enic_log_q_error(struct enic *enic)
-@@ -163,64 +141,6 @@ static void enic_log_q_error(struct enic *enic)
- }
- }
-
--unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
--{
-- unsigned int cq = enic_cq_wq(enic, wq->index);
--
-- /* Return the work done */
-- return vnic_cq_service(&enic->cq[cq],
-- -1 /*wq_work_to_do*/, enic_wq_service, NULL);
--}
--
--void enic_post_wq_index(struct vnic_wq *wq)
--{
-- enic_vnic_post_wq_index(wq);
--}
--
--void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
-- struct rte_mbuf *tx_pkt, unsigned short len,
-- uint8_t sop, uint8_t eop, uint8_t cq_entry,
-- uint16_t ol_flags, uint16_t vlan_tag)
--{
-- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
-- uint16_t mss = 0;
-- uint8_t vlan_tag_insert = 0;
-- uint64_t bus_addr = (dma_addr_t)
-- (tx_pkt->buf_physaddr + tx_pkt->data_off);
--
-- if (sop) {
-- if (ol_flags & PKT_TX_VLAN_PKT)
-- vlan_tag_insert = 1;
--
-- if (enic->hw_ip_checksum) {
-- if (ol_flags & PKT_TX_IP_CKSUM)
-- mss |= ENIC_CALC_IP_CKSUM;
--
-- if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
-- mss |= ENIC_CALC_TCP_UDP_CKSUM;
-- }
-- }
--
-- wq_enet_desc_enc(desc,
-- bus_addr,
-- len,
-- mss,
-- 0 /* header_length */,
-- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
-- eop,
-- cq_entry,
-- 0 /* fcoe_encap */,
-- vlan_tag_insert,
-- vlan_tag,
-- 0 /* loopback */);
--
-- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
-- sop,
-- 1 /*desc_skip_cnt*/,
-- cq_entry,
-- 0 /*compressed send*/,
-- 0 /*wrid*/);
--}
-
- void enic_dev_stats_clear(struct enic *enic)
- {
-@@ -298,12 +218,28 @@ void enic_init_vnic_resources(struct enic *enic)
- unsigned int error_interrupt_enable = 1;
- unsigned int error_interrupt_offset = 0;
- unsigned int index = 0;
-+ unsigned int cq_idx;
-+
-+ vnic_dev_stats_clear(enic->vdev);
-
- for (index = 0; index < enic->rq_count; index++) {
- vnic_rq_init(&enic->rq[index],
- enic_cq_rq(enic, index),
- error_interrupt_enable,
- error_interrupt_offset);
-+
-+ cq_idx = enic_cq_rq(enic, index);
-+ vnic_cq_init(&enic->cq[cq_idx],
-+ 0 /* flow_control_enable */,
-+ 1 /* color_enable */,
-+ 0 /* cq_head */,
-+ 0 /* cq_tail */,
-+ 1 /* cq_tail_color */,
-+ 0 /* interrupt_enable */,
-+ 1 /* cq_entry_enable */,
-+ 0 /* cq_message_enable */,
-+ 0 /* interrupt offset */,
-+ 0 /* cq_message_addr */);
- }
-
- for (index = 0; index < enic->wq_count; index++) {
-@@ -311,22 +247,19 @@ void enic_init_vnic_resources(struct enic *enic)
- enic_cq_wq(enic, index),
- error_interrupt_enable,
- error_interrupt_offset);
-- }
--
-- vnic_dev_stats_clear(enic->vdev);
-
-- for (index = 0; index < enic->cq_count; index++) {
-- vnic_cq_init(&enic->cq[index],
-+ cq_idx = enic_cq_wq(enic, index);
-+ vnic_cq_init(&enic->cq[cq_idx],
- 0 /* flow_control_enable */,
- 1 /* color_enable */,
- 0 /* cq_head */,
- 0 /* cq_tail */,
- 1 /* cq_tail_color */,
- 0 /* interrupt_enable */,
-- 1 /* cq_entry_enable */,
-- 0 /* cq_message_enable */,
-+ 0 /* cq_entry_enable */,
-+ 1 /* cq_message_enable */,
- 0 /* interrupt offset */,
-- 0 /* cq_message_addr */);
-+ (u64)enic->wq[index].cqmsg_rz->phys_addr);
- }
-
- vnic_intr_init(&enic->intr,
-@@ -570,6 +503,7 @@ void enic_free_wq(void *txq)
- struct vnic_wq *wq = (struct vnic_wq *)txq;
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
-+ rte_memzone_free(wq->cqmsg_rz);
- vnic_wq_free(wq);
- vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
- }
-@@ -580,6 +514,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- int err;
- struct vnic_wq *wq = &enic->wq[queue_idx];
- unsigned int cq_index = enic_cq_wq(enic, queue_idx);
-+ char name[NAME_MAX];
-+ static int instance;
-
- wq->socket_id = socket_id;
- if (nb_desc) {
-@@ -615,6 +551,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- dev_err(enic, "error in allocation of cq for wq\n");
- }
-
-+ /* setup up CQ message */
-+ snprintf((char *)name, sizeof(name),
-+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
-+ instance++);
-+
-+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
-+ sizeof(uint32_t),
-+ SOCKET_ID_ANY, 0,
-+ ENIC_ALIGN);
-+ if (!wq->cqmsg_rz)
-+ return -ENOMEM;
-+
- return err;
- }
-
-diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
-index 00fa71d..3e1bdf5 100644
---- a/drivers/net/enic/enic_res.h
-+++ b/drivers/net/enic/enic_res.h
-@@ -53,89 +53,10 @@
-
- #define ENIC_NON_TSO_MAX_DESC 16
- #define ENIC_DEFAULT_RX_FREE_THRESH 32
--#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2)
-+#define ENIC_TX_XMIT_MAX 64
-
- #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
-
--static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- unsigned int mss_or_csum_offset, unsigned int hdr_len,
-- int vlan_tag_insert, unsigned int vlan_tag,
-- int offload_mode, int cq_entry, int sop, int eop, int loopback)
--{
-- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
-- u8 desc_skip_cnt = 1;
-- u8 compressed_send = 0;
-- u64 wrid = 0;
--
-- wq_enet_desc_enc(desc,
-- (u64)dma_addr | VNIC_PADDR_TARGET,
-- (u16)len,
-- (u16)mss_or_csum_offset,
-- (u16)hdr_len, (u8)offload_mode,
-- (u8)eop, (u8)cq_entry,
-- 0, /* fcoe_encap */
-- (u8)vlan_tag_insert,
-- (u16)vlan_tag,
-- (u8)loopback);
--
-- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
-- (u8)cq_entry, compressed_send, wrid);
--}
--
--static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- 0, 0, 0, 0, 0,
-- eop, 0 /* !SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
-- dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
-- unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- 0, 0, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_CSUM,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- int ip_csum, int tcpudp_csum, int vlan_tag_insert,
-- unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
-- 0, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_CSUM,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- unsigned int csum_offset, unsigned int hdr_len,
-- int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_CSUM_L4,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
-- unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- mss, hdr_len, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_TSO,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
- struct enic;
-
- int enic_get_vnic_config(struct enic *);
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-deleted file mode 100644
-index 39bb55c..0000000
---- a/drivers/net/enic/enic_rx.c
-+++ /dev/null
-@@ -1,361 +0,0 @@
--/*
-- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
-- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-- *
-- * Copyright (c) 2014, Cisco Systems, Inc.
-- * All rights reserved.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions
-- * are met:
-- *
-- * 1. Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- *
-- * 2. Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in
-- * the documentation and/or other materials provided with the
-- * distribution.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- *
-- */
--
--#include <rte_mbuf.h>
--#include <rte_ethdev.h>
--#include <rte_prefetch.h>
--
--#include "enic_compat.h"
--#include "rq_enet_desc.h"
--#include "enic.h"
--
--#define RTE_PMD_USE_PREFETCH
--
--#ifdef RTE_PMD_USE_PREFETCH
--/*
-- * Prefetch a cache line into all cache levels.
-- */
--#define rte_enic_prefetch(p) rte_prefetch0(p)
--#else
--#define rte_enic_prefetch(p) do {} while (0)
--#endif
--
--#ifdef RTE_PMD_PACKET_PREFETCH
--#define rte_packet_prefetch(p) rte_prefetch1(p)
--#else
--#define rte_packet_prefetch(p) do {} while (0)
--#endif
--
--static inline struct rte_mbuf *
--rte_rxmbuf_alloc(struct rte_mempool *mp)
--{
-- struct rte_mbuf *m;
--
-- m = __rte_mbuf_raw_alloc(mp);
-- __rte_mbuf_sanity_check_raw(m, 0);
-- return m;
--}
--
--static inline uint16_t
--enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
--{
-- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
--}
--
--static inline uint16_t
--enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
--{
-- return(le16_to_cpu(crd->bytes_written_flags) &
-- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_packet_error(uint16_t bwflags)
--{
-- return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_eop(uint16_t ciflags)
--{
-- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-- == CQ_ENET_RQ_DESC_FLAGS_EOP;
--}
--
--static inline uint8_t
--enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
--{
-- return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
-- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
--{
-- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
--{
-- return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
--{
-- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
--}
--
--static inline uint32_t
--enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
--{
-- return le32_to_cpu(cqrd->rss_hash);
--}
--
--static inline uint16_t
--enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
--{
-- return le16_to_cpu(cqrd->vlan);
--}
--
--static inline uint16_t
--enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- return le16_to_cpu(cqrd->bytes_written_flags) &
-- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
--}
--
--static inline uint8_t
--enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- uint16_t bwflags;
-- int ret = 0;
-- uint64_t pkt_err_flags = 0;
--
-- bwflags = enic_cq_rx_desc_bwflags(cqrd);
-- if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
-- pkt_err_flags = PKT_RX_MAC_ERR;
-- ret = 1;
-- }
-- *pkt_err_flags_out = pkt_err_flags;
-- return ret;
--}
--
--/*
-- * Lookup table to translate RX CQ flags to mbuf flags.
-- */
--static inline uint32_t
--enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- uint8_t cqrd_flags = cqrd->flags;
-- static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
-- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-- | RTE_PTYPE_L4_UDP,
-- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-- | RTE_PTYPE_L4_TCP,
-- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-- | RTE_PTYPE_L4_FRAG,
-- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
-- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-- | RTE_PTYPE_L4_UDP,
-- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-- | RTE_PTYPE_L4_TCP,
-- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-- | RTE_PTYPE_L4_FRAG,
-- /* All others reserved */
-- };
-- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-- return cq_type_table[cqrd_flags];
--}
--
--static inline void
--enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- uint16_t ciflags, bwflags, pkt_flags = 0;
-- ciflags = enic_cq_rx_desc_ciflags(cqrd);
-- bwflags = enic_cq_rx_desc_bwflags(cqrd);
--
-- mbuf->ol_flags = 0;
--
-- /* flags are meaningless if !EOP */
-- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-- goto mbuf_flags_done;
--
-- /* VLAN stripping */
-- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-- pkt_flags |= PKT_RX_VLAN_PKT;
-- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-- } else {
-- mbuf->vlan_tci = 0;
-- }
--
-- /* RSS flag */
-- if (enic_cq_rx_desc_rss_type(cqrd)) {
-- pkt_flags |= PKT_RX_RSS_HASH;
-- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-- }
--
-- /* checksum flags */
-- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
-- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
-- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
-- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
-- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
-- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-- }
-- }
--
-- mbuf_flags_done:
-- mbuf->ol_flags = pkt_flags;
--}
--
--static inline uint32_t
--enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
--{
-- uint32_t d = i0 + i1;
-- ASSERT(i0 < n_descriptors);
-- ASSERT(i1 < n_descriptors);
-- d -= (d >= n_descriptors) ? n_descriptors : 0;
-- return d;
--}
--
--
--uint16_t
--enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-- uint16_t nb_pkts)
--{
-- struct vnic_rq *rq = rx_queue;
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-- unsigned int rx_id;
-- struct rte_mbuf *nmb, *rxmb;
-- uint16_t nb_rx = 0;
-- uint16_t nb_hold;
-- struct vnic_cq *cq;
-- volatile struct cq_desc *cqd_ptr;
-- uint8_t color;
--
-- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
--
-- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
--
-- while (nb_rx < nb_pkts) {
-- volatile struct rq_enet_desc *rqd_ptr;
-- dma_addr_t dma_addr;
-- struct cq_desc cqd;
-- uint64_t ol_err_flags;
-- uint8_t packet_error;
--
-- /* Check for pkts available */
-- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-- & CQ_DESC_COLOR_MASK;
-- if (color == cq->last_color)
-- break;
--
-- /* Get the cq descriptor and rq pointer */
-- cqd = *cqd_ptr;
-- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
--
-- /* allocate a new mbuf */
-- nmb = rte_rxmbuf_alloc(rq->mp);
-- if (nmb == NULL) {
-- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
-- enic->port_id, (unsigned)rq->index);
-- rte_eth_devices[enic->port_id].
-- data->rx_mbuf_alloc_failed++;
-- break;
-- }
--
-- /* A packet error means descriptor and data are untrusted */
-- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
--
-- /* Get the mbuf to return and replace with one just allocated */
-- rxmb = rq->mbuf_ring[rx_id];
-- rq->mbuf_ring[rx_id] = nmb;
--
-- /* Increment cqd, rqd, mbuf_table index */
-- rx_id++;
-- if (unlikely(rx_id == rq->ring.desc_count)) {
-- rx_id = 0;
-- cq->last_color = cq->last_color ? 0 : 1;
-- }
--
-- /* Prefetch next mbuf & desc while processing current one */
-- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-- rte_enic_prefetch(cqd_ptr);
-- rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-- + rx_id);
--
-- /* Push descriptor for newly allocated mbuf */
-- dma_addr = (dma_addr_t)(nmb->buf_physaddr
-- + RTE_PKTMBUF_HEADROOM);
-- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-- - RTE_PKTMBUF_HEADROOM);
--
-- /* Fill in the rest of the mbuf */
-- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
-- rxmb->nb_segs = 1;
-- rxmb->next = NULL;
-- rxmb->port = enic->port_id;
-- if (!packet_error) {
-- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-- } else {
-- rxmb->pkt_len = 0;
-- rxmb->packet_type = 0;
-- rxmb->ol_flags = 0;
-- }
-- rxmb->data_len = rxmb->pkt_len;
--
-- /* prefetch mbuf data for caller */
-- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
-- RTE_PKTMBUF_HEADROOM));
--
-- /* store the mbuf address into the next entry of the array */
-- rx_pkts[nb_rx++] = rxmb;
-- }
--
-- nb_hold += nb_rx;
-- cq->to_clean = rx_id;
--
-- if (nb_hold > rq->rx_free_thresh) {
-- rq->posted_index = enic_ring_add(rq->ring.desc_count,
-- rq->posted_index, nb_hold);
-- nb_hold = 0;
-- rte_mb();
-- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-- }
--
-- rq->rx_nb_hold = nb_hold;
--
-- return nb_rx;
--}
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-new file mode 100644
-index 0000000..71ca34e
---- /dev/null
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -0,0 +1,505 @@
-+/*
-+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
-+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-+ *
-+ * Copyright (c) 2016, Cisco Systems, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * 1. Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ *
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ *
-+ */
-+
-+#include <rte_mbuf.h>
-+#include <rte_ethdev.h>
-+#include <rte_prefetch.h>
-+#include <rte_memzone.h>
-+
-+#include "enic_compat.h"
-+#include "rq_enet_desc.h"
-+#include "enic.h"
-+
-+#define RTE_PMD_USE_PREFETCH
-+
-+#ifdef RTE_PMD_USE_PREFETCH
-+/*
-+ * Prefetch a cache line into all cache levels.
-+ */
-+#define rte_enic_prefetch(p) rte_prefetch0(p)
-+#else
-+#define rte_enic_prefetch(p) do {} while (0)
-+#endif
-+
-+#ifdef RTE_PMD_PACKET_PREFETCH
-+#define rte_packet_prefetch(p) rte_prefetch1(p)
-+#else
-+#define rte_packet_prefetch(p) do {} while (0)
-+#endif
-+
-+static inline struct rte_mbuf *
-+rte_rxmbuf_alloc(struct rte_mempool *mp)
-+{
-+ struct rte_mbuf *m;
-+
-+ m = __rte_mbuf_raw_alloc(mp);
-+ __rte_mbuf_sanity_check_raw(m, 0);
-+ return m;
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-+{
-+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-+{
-+ return(le16_to_cpu(crd->bytes_written_flags) &
-+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_packet_error(uint16_t bwflags)
-+{
-+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_eop(uint16_t ciflags)
-+{
-+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
-+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-+{
-+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-+}
-+
-+static inline uint32_t
-+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-+{
-+ return le32_to_cpu(cqrd->rss_hash);
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-+{
-+ return le16_to_cpu(cqrd->vlan);
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ return le16_to_cpu(cqrd->bytes_written_flags) &
-+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint16_t bwflags;
-+ int ret = 0;
-+ uint64_t pkt_err_flags = 0;
-+
-+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
-+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
-+ pkt_err_flags = PKT_RX_MAC_ERR;
-+ ret = 1;
-+ }
-+ *pkt_err_flags_out = pkt_err_flags;
-+ return ret;
-+}
-+
-+/*
-+ * Lookup table to translate RX CQ flags to mbuf flags.
-+ */
-+static inline uint32_t
-+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint8_t cqrd_flags = cqrd->flags;
-+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
-+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_UDP,
-+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_TCP,
-+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_FRAG,
-+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
-+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_UDP,
-+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_TCP,
-+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_FRAG,
-+ /* All others reserved */
-+ };
-+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-+ return cq_type_table[cqrd_flags];
-+}
-+
-+static inline void
-+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint16_t ciflags, bwflags, pkt_flags = 0;
-+ ciflags = enic_cq_rx_desc_ciflags(cqrd);
-+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
-+
-+ mbuf->ol_flags = 0;
-+
-+ /* flags are meaningless if !EOP */
-+ if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-+ goto mbuf_flags_done;
-+
-+ /* VLAN stripping */
-+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-+ } else {
-+ mbuf->vlan_tci = 0;
-+ }
-+
-+ /* RSS flag */
-+ if (enic_cq_rx_desc_rss_type(cqrd)) {
-+ pkt_flags |= PKT_RX_RSS_HASH;
-+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-+ }
-+
-+ /* checksum flags */
-+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
-+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
-+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
-+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
-+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
-+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-+ }
-+ }
-+
-+ mbuf_flags_done:
-+ mbuf->ol_flags = pkt_flags;
-+}
-+
-+uint16_t
-+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-+ uint16_t nb_pkts)
-+{
-+ struct vnic_rq *rq = rx_queue;
-+ struct enic *enic = vnic_dev_priv(rq->vdev);
-+ unsigned int rx_id;
-+ struct rte_mbuf *nmb, *rxmb;
-+ uint16_t nb_rx = 0;
-+ uint16_t nb_hold;
-+ struct vnic_cq *cq;
-+ volatile struct cq_desc *cqd_ptr;
-+ uint8_t color;
-+
-+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+
-+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */
-+
-+ while (nb_rx < nb_pkts) {
-+ volatile struct rq_enet_desc *rqd_ptr;
-+ dma_addr_t dma_addr;
-+ struct cq_desc cqd;
-+ uint64_t ol_err_flags;
-+ uint8_t packet_error;
-+
-+ /* Check for pkts available */
-+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-+ & CQ_DESC_COLOR_MASK;
-+ if (color == cq->last_color)
-+ break;
-+
-+ /* Get the cq descriptor and rq pointer */
-+ cqd = *cqd_ptr;
-+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
-+
-+ /* allocate a new mbuf */
-+ nmb = rte_rxmbuf_alloc(rq->mp);
-+ if (nmb == NULL) {
-+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
-+ enic->port_id, (unsigned)rq->index);
-+ rte_eth_devices[enic->port_id].
-+ data->rx_mbuf_alloc_failed++;
-+ break;
-+ }
-+
-+ /* A packet error means descriptor and data are untrusted */
-+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
-+
-+ /* Get the mbuf to return and replace with one just allocated */
-+ rxmb = rq->mbuf_ring[rx_id];
-+ rq->mbuf_ring[rx_id] = nmb;
-+
-+ /* Increment cqd, rqd, mbuf_table index */
-+ rx_id++;
-+ if (unlikely(rx_id == rq->ring.desc_count)) {
-+ rx_id = 0;
-+ cq->last_color = cq->last_color ? 0 : 1;
-+ }
-+
-+ /* Prefetch next mbuf & desc while processing current one */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+ rte_enic_prefetch(cqd_ptr);
-+ rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-+ + rx_id);
-+
-+ /* Push descriptor for newly allocated mbuf */
-+ dma_addr = (dma_addr_t)(nmb->buf_physaddr
-+ + RTE_PKTMBUF_HEADROOM);
-+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-+ - RTE_PKTMBUF_HEADROOM);
-+
-+ /* Fill in the rest of the mbuf */
-+ rxmb->data_off = RTE_PKTMBUF_HEADROOM;
-+ rxmb->nb_segs = 1;
-+ rxmb->next = NULL;
-+ rxmb->port = enic->port_id;
-+ if (!packet_error) {
-+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-+ } else {
-+ rxmb->pkt_len = 0;
-+ rxmb->packet_type = 0;
-+ rxmb->ol_flags = 0;
-+ }
-+ rxmb->data_len = rxmb->pkt_len;
-+
-+ /* prefetch mbuf data for caller */
-+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
-+ RTE_PKTMBUF_HEADROOM));
-+
-+ /* store the mbuf address into the next entry of the array */
-+ rx_pkts[nb_rx++] = rxmb;
-+ }
-+
-+ nb_hold += nb_rx;
-+ cq->to_clean = rx_id;
-+
-+ if (nb_hold > rq->rx_free_thresh) {
-+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
-+ rq->posted_index, nb_hold);
-+ nb_hold = 0;
-+ rte_mb();
-+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-+ }
-+
-+ rq->rx_nb_hold = nb_hold;
-+
-+ return nb_rx;
-+}
-+
-+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
-+{
-+ struct vnic_wq_buf *buf;
-+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
-+ unsigned int nb_to_free, nb_free = 0, i;
-+ struct rte_mempool *pool;
-+ unsigned int tail_idx;
-+ unsigned int desc_count = wq->ring.desc_count;
-+
-+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
-+ + 1;
-+ tail_idx = wq->tail_idx;
-+ buf = &wq->bufs[tail_idx];
-+ pool = ((struct rte_mbuf *)buf->mb)->pool;
-+ for (i = 0; i < nb_to_free; i++) {
-+ buf = &wq->bufs[tail_idx];
-+ m = (struct rte_mbuf *)(buf->mb);
-+ if (likely(m->pool == pool)) {
-+ ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
-+ free[nb_free++] = m;
-+ } else {
-+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
-+ free[0] = m;
-+ nb_free = 1;
-+ pool = m->pool;
-+ }
-+ tail_idx = enic_ring_incr(desc_count, tail_idx);
-+ buf->mb = NULL;
-+ }
-+
-+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
-+
-+ wq->tail_idx = tail_idx;
-+ wq->ring.desc_avail += nb_to_free;
-+}
-+
-+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
-+{
-+ u16 completed_index;
-+
-+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
-+
-+ if (wq->last_completed_index != completed_index) {
-+ enic_free_wq_bufs(wq, completed_index);
-+ wq->last_completed_index = completed_index;
-+ }
-+ return 0;
-+}
-+
-+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-+ uint16_t nb_pkts)
-+{
-+ uint16_t index;
-+ unsigned int pkt_len, data_len;
-+ unsigned int nb_segs;
-+ struct rte_mbuf *tx_pkt;
-+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
-+ struct enic *enic = vnic_dev_priv(wq->vdev);
-+ unsigned short vlan_id;
-+ unsigned short ol_flags;
-+ unsigned int wq_desc_avail;
-+ int head_idx;
-+ struct vnic_wq_buf *buf;
-+ unsigned int hw_ip_cksum_enabled;
-+ unsigned int desc_count;
-+ struct wq_enet_desc *descs, *desc_p, desc_tmp;
-+ uint16_t mss;
-+ uint8_t vlan_tag_insert;
-+ uint8_t eop;
-+ uint64_t bus_addr;
-+
-+ enic_cleanup_wq(enic, wq);
-+ wq_desc_avail = vnic_wq_desc_avail(wq);
-+ head_idx = wq->head_idx;
-+ desc_count = wq->ring.desc_count;
-+
-+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
-+
-+ hw_ip_cksum_enabled = enic->hw_ip_checksum;
-+ for (index = 0; index < nb_pkts; index++) {
-+ tx_pkt = *tx_pkts++;
-+ nb_segs = tx_pkt->nb_segs;
-+ if (nb_segs > wq_desc_avail) {
-+ if (index > 0)
-+ goto post;
-+ goto done;
-+ }
-+
-+ pkt_len = tx_pkt->pkt_len;
-+ data_len = tx_pkt->data_len;
-+ vlan_id = tx_pkt->vlan_tci;
-+ ol_flags = tx_pkt->ol_flags;
-+
-+ mss = 0;
-+ vlan_tag_insert = 0;
-+ bus_addr = (dma_addr_t)
-+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
-+
-+ descs = (struct wq_enet_desc *)wq->ring.descs;
-+ desc_p = descs + head_idx;
-+
-+ eop = (data_len == pkt_len);
-+
-+ if (ol_flags & PKT_TX_VLAN_PKT)
-+ vlan_tag_insert = 1;
-+
-+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_IP_CKSUM))
-+ mss |= ENIC_CALC_IP_CKSUM;
-+
-+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_TCP_UDP_CKSUM))
-+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
-+
-+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
-+ eop, 0, vlan_tag_insert, vlan_id, 0);
-+
-+ *desc_p = desc_tmp;
-+ buf = &wq->bufs[head_idx];
-+ buf->mb = (void *)tx_pkt;
-+ head_idx = enic_ring_incr(desc_count, head_idx);
-+ wq_desc_avail--;
-+
-+ if (!eop) {
-+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
-+ tx_pkt->next) {
-+ data_len = tx_pkt->data_len;
-+
-+ if (tx_pkt->next == NULL)
-+ eop = 1;
-+ desc_p = descs + head_idx;
-+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
-+ + tx_pkt->data_off);
-+ wq_enet_desc_enc((struct wq_enet_desc *)
-+ &desc_tmp, bus_addr, data_len,
-+ mss, 0, 0, eop, eop, 0,
-+ vlan_tag_insert, vlan_id, 0);
-+
-+ *desc_p = desc_tmp;
-+ buf = &wq->bufs[head_idx];
-+ buf->mb = (void *)tx_pkt;
-+ head_idx = enic_ring_incr(desc_count, head_idx);
-+ wq_desc_avail--;
-+ }
-+ }
-+ }
-+ post:
-+ rte_wmb();
-+ iowrite32(head_idx, &wq->ctrl->posted_index);
-+ done:
-+ wq->ring.desc_avail = wq_desc_avail;
-+ wq->head_idx = head_idx;
-+
-+ return index;
-+}
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch b/dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch
deleted file mode 100644
index e64ed59..0000000
--- a/dpdk/dpdk-16.04_patches/0013-Revert-ixgbe-fix-packet-type-from-vector-Rx.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From 33f94cb41621f2816db702b6b104f4642eefa857 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Fri, 29 Apr 2016 19:51:35 +0200
-Subject: [PATCH 13/17] Revert "ixgbe: fix packet type from vector Rx"
-
-This reverts commit d9a2009a81089093645fea2e04b51dd37edf3e6f.
----
- drivers/net/ixgbe/ixgbe_ethdev.c | 4 +++-
- drivers/net/ixgbe/ixgbe_rxtx_vec.c | 34 +++++++++++++++++++++++-----------
- 2 files changed, 26 insertions(+), 12 deletions(-)
-
-diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
-index 3f1ebc1..c48cb52 100644
---- a/drivers/net/ixgbe/ixgbe_ethdev.c
-+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
-@@ -3000,7 +3000,9 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
- if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
- dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
- dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
-- dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
-+ dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc ||
-+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
-+ dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
- return ptypes;
- return NULL;
- }
-diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
-index 5040704..ccd93c7 100644
---- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
-+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
-@@ -220,6 +220,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- 0, 0 /* ignore pkt_type field */
- );
- __m128i dd_check, eop_check;
-+ __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF,
-+ 0xFFFFFFFF, 0xFFFF07F0);
-
- /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
- nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
-@@ -257,8 +259,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- 13, 12, /* octet 12~13, 16 bits data_len */
- 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
- 13, 12, /* octet 12~13, low 16 bits pkt_len */
-- 0xFF, 0xFF, /* skip 32 bit pkt_type */
-- 0xFF, 0xFF
-+ 0xFF, 0xFF, /* skip high 16 bits pkt_type */
-+ 1, /* octet 1, 8 bits pkt_type field */
-+ 0 /* octet 0, 4 bits offset 4 pkt_type field */
- );
-
- /* Cache is empty -> need to scan the buffer rings, but first move
-@@ -275,6 +278,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
- pos += RTE_IXGBE_DESCS_PER_LOOP,
- rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
-+ __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP];
- __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
- __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
- __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
-@@ -285,7 +289,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-
- /* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
-- descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
-+ descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
-
- /* B.2 copy 2 mbuf point into rx_pkts */
- _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
-@@ -293,10 +297,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- /* B.1 load 1 mbuf point */
- mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
-
-- descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
-+ descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
- /* B.1 load 2 mbuf point */
-- descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
-- descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
-+ descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
-+ descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
-
- /* B.2 copy 2 mbuf point into rx_pkts */
- _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
-@@ -308,6 +312,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
- }
-
-+ /* A* mask out 0~3 bits RSS type */
-+ descs[3] = _mm_and_si128(descs0[3], desc_mask);
-+ descs[2] = _mm_and_si128(descs0[2], desc_mask);
-+
-+ /* A* mask out 0~3 bits RSS type */
-+ descs[1] = _mm_and_si128(descs0[1], desc_mask);
-+ descs[0] = _mm_and_si128(descs0[0], desc_mask);
-+
- /* avoid compiler reorder optimization */
- rte_compiler_barrier();
-
-@@ -315,22 +327,22 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
- pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
-
-- /* D.1 pkt 1,2 convert format from desc to pktmbuf */
-- pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
-- pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
--
- /* C.1 4=>2 filter staterr info only */
- sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
- /* C.1 4=>2 filter staterr info only */
- sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
-
- /* set ol_flags with vlan packet type */
-- desc_to_olflags_v(descs, &rx_pkts[pos]);
-+ desc_to_olflags_v(descs0, &rx_pkts[pos]);
-
- /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
- pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
- pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
-
-+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
-+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
-+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
-+
- /* C.2 get 4 pkts staterr value */
- zero = _mm_xor_si128(dd_check, dd_check);
- staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch b/dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch
deleted file mode 100644
index e510446..0000000
--- a/dpdk/dpdk-16.04_patches/0014-enic-Set-PKT_RX_VLAN_PKT-iff-returned-packet-has-VLA.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From 6a7a9e52ed2ccfa86c2def3a66a368a5577f2fc2 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 3 May 2016 13:56:05 -0700
-Subject: [PATCH] enic: Set PKT_RX_VLAN_PKT iff returned packet has VLAN tag
-
-Only set the ol_flags PKT_RX_VLAN_PKT bit if the packet being passed
-to the application contains a VLAN tag. This is true whether
-stripping is enabled or disabled.
-
-This area of the API is in flux, so behaviour may change in the
-future.
-
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_rxtx.c | 7 +++++--
- 1 file changed, 5 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-index 02b54dd..6a95389 100644
---- a/drivers/net/enic/enic_rxtx.c
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -206,12 +206,15 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
- goto mbuf_flags_done;
-
-- /* VLAN stripping */
-+ /* VLAN stripping. Set PKT_RX_VLAN_PKT only if there is a vlan tag
-+ * in the packet passed up
-+ */
- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-- pkt_flags |= PKT_RX_VLAN_PKT;
- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
- } else {
- mbuf->vlan_tci = 0;
-+ if (enic_cq_rx_desc_vlan(cqrd))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
- }
-
- /* RSS flag */
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch b/dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch
deleted file mode 100644
index 721fd10..0000000
--- a/dpdk/dpdk-16.04_patches/0015-ENIC-counter-improvement.patch
+++ /dev/null
@@ -1,165 +0,0 @@
-From 30a3d6e23880094edfc51b49b11099c8b8bfa8cd Mon Sep 17 00:00:00 2001
-From: John Lo <loj@cisco.com>
-Date: Tue, 7 Jun 2016 12:36:23 +0200
-Subject: [PATCH 15/17] ENIC counter improvement
-
----
- drivers/net/enic/enic.h | 7 +++++++
- drivers/net/enic/enic_main.c | 38 ++++++++++++++++++++++++++++++++++----
- drivers/net/enic/enic_rxtx.c | 15 +++++++--------
- 3 files changed, 48 insertions(+), 12 deletions(-)
-
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 43b82a6..7c1b5c9 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -91,6 +91,11 @@ struct enic_fdir {
- struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
- };
-
-+struct enic_soft_stats {
-+ rte_atomic64_t rx_nombuf;
-+ rte_atomic64_t rx_packet_errors;
-+};
-+
- /* Per-instance private data structure */
- struct enic {
- struct enic *next;
-@@ -133,6 +138,8 @@ struct enic {
- /* interrupt resource */
- struct vnic_intr intr;
- unsigned int intr_count;
-+
-+ struct enic_soft_stats soft_stats;
- };
-
- static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 9bfdec1..a00565a 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -142,22 +142,51 @@ static void enic_log_q_error(struct enic *enic)
- }
-
-
-+static void enic_clear_soft_stats(struct enic *enic)
-+{
-+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
-+ rte_atomic64_clear(&soft_stats->rx_nombuf);
-+ rte_atomic64_clear(&soft_stats->rx_packet_errors);
-+}
-+
-+static void enic_init_soft_stats(struct enic *enic)
-+{
-+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
-+ rte_atomic64_init(&soft_stats->rx_nombuf);
-+ rte_atomic64_init(&soft_stats->rx_packet_errors);
-+ enic_clear_soft_stats(enic);
-+}
-+
- void enic_dev_stats_clear(struct enic *enic)
- {
- if (vnic_dev_stats_clear(enic->vdev))
- dev_err(enic, "Error in clearing stats\n");
-+ enic_clear_soft_stats(enic);
- }
-
- void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
- {
- struct vnic_stats *stats;
-+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
-+ int64_t rx_truncated;
-+ uint64_t rx_packet_errors;
-
- if (vnic_dev_stats_dump(enic->vdev, &stats)) {
- dev_err(enic, "Error in getting stats\n");
- return;
- }
-
-- r_stats->ipackets = stats->rx.rx_frames_ok;
-+ /* The number of truncated packets can only be calculated by
-+ * subtracting a hardware counter from error packets received by
-+ * the driver. Note: this causes transient inaccuracies in the
-+ * ipackets count. Also, the length of truncated packets are
-+ * counted in ibytes even though truncated packets are dropped
-+ * which can make ibytes be slightly higher than it should be.
-+ */
-+ rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
-+ rx_truncated = rx_packet_errors - stats->rx.rx_errors;
-+
-+ r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
- r_stats->opackets = stats->tx.tx_frames_ok;
-
- r_stats->ibytes = stats->rx.rx_bytes_ok;
-@@ -166,10 +195,9 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
- r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
- r_stats->oerrors = stats->tx.tx_errors;
-
-- r_stats->imissed = stats->rx.rx_no_bufs;
-+ r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
-
-- r_stats->imcasts = stats->rx.rx_multicast_frames_ok;
-- r_stats->rx_nombuf = stats->rx.rx_no_bufs;
-+ r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
- }
-
- void enic_del_mac_address(struct enic *enic)
-@@ -755,6 +783,8 @@ int enic_setup_finish(struct enic *enic)
- {
- int ret;
-
-+ enic_init_soft_stats(enic);
-+
- ret = enic_set_rss_nic_cfg(enic);
- if (ret) {
- dev_err(enic, "Failed to config nic, aborting.\n");
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-index 138dfb8..174486b 100644
---- a/drivers/net/enic/enic_rxtx.c
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -251,6 +251,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- struct vnic_cq *cq;
- volatile struct cq_desc *cqd_ptr;
- uint8_t color;
-+ uint16_t nb_err = 0;
-
- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-@@ -278,10 +279,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- /* allocate a new mbuf */
- nmb = rte_rxmbuf_alloc(rq->mp);
- if (nmb == NULL) {
-- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
-- enic->port_id, (unsigned)rq->index);
-- rte_eth_devices[enic->port_id].
-- data->rx_mbuf_alloc_failed++;
-+ rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
- break;
- }
-
-@@ -323,9 +321,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
- } else {
-- rxmb->pkt_len = 0;
-- rxmb->packet_type = 0;
-- rxmb->ol_flags = 0;
-+ rte_pktmbuf_free(rxmb);
-+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
-+ nb_err++;
-+ continue;
- }
- rxmb->data_len = rxmb->pkt_len;
-
-@@ -337,7 +336,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rx_pkts[nb_rx++] = rxmb;
- }
-
-- nb_hold += nb_rx;
-+ nb_hold += nb_rx + nb_err;
- cq->to_clean = rx_id;
-
- if (nb_hold > rq->rx_free_thresh) {
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch b/dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch
deleted file mode 100644
index e0daab0..0000000
--- a/dpdk/dpdk-16.04_patches/0016-ENIC-scatter-RX.patch
+++ /dev/null
@@ -1,672 +0,0 @@
-From f03d5a02fc2b3cc24bf059a273ea1473cdb9993b Mon Sep 17 00:00:00 2001
-From: John Lo <loj@cisco.com>
-Date: Tue, 7 Jun 2016 12:40:07 +0200
-Subject: [PATCH 16/17] ENIC scatter RX
-
----
- drivers/net/enic/base/rq_enet_desc.h | 2 +-
- drivers/net/enic/base/vnic_rq.c | 12 +-
- drivers/net/enic/base/vnic_rq.h | 18 ++-
- drivers/net/enic/enic.h | 10 ++
- drivers/net/enic/enic_main.c | 236 +++++++++++++++++++++++++++--------
- drivers/net/enic/enic_rxtx.c | 139 ++++++++++++++-------
- 6 files changed, 313 insertions(+), 104 deletions(-)
-
-diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h
-index 7292d9d..13e24b4 100644
---- a/drivers/net/enic/base/rq_enet_desc.h
-+++ b/drivers/net/enic/base/rq_enet_desc.h
-@@ -55,7 +55,7 @@ enum rq_enet_type_types {
- #define RQ_ENET_TYPE_BITS 2
- #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
-
--static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
-+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
- u64 address, u8 type, u16 length)
- {
- desc->address = cpu_to_le64(address);
-diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
-index cb62c5e..d97f93e 100644
---- a/drivers/net/enic/base/vnic_rq.c
-+++ b/drivers/net/enic/base/vnic_rq.c
-@@ -84,11 +84,16 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
- iowrite32(cq_index, &rq->ctrl->cq_index);
- iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
- iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
-- iowrite32(0, &rq->ctrl->dropped_packet_count);
- iowrite32(0, &rq->ctrl->error_status);
- iowrite32(fetch_index, &rq->ctrl->fetch_index);
- iowrite32(posted_index, &rq->ctrl->posted_index);
--
-+ if (rq->is_sop) {
-+// printf("Writing 0x%x to %s rq\n",
-+// ((rq->is_sop << 10) | rq->data_queue_idx),
-+// rq->is_sop ? "sop":"data");
-+ iowrite32(((rq->is_sop << 10) | rq->data_queue_idx),
-+ &rq->ctrl->data_ring);
-+ }
- }
-
- void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
-@@ -96,6 +101,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
- unsigned int error_interrupt_offset)
- {
- u32 fetch_index = 0;
-+
- /* Use current fetch_index as the ring starting point */
- fetch_index = ioread32(&rq->ctrl->fetch_index);
-
-@@ -110,6 +116,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
- error_interrupt_offset);
- rq->rxst_idx = 0;
- rq->tot_pkts = 0;
-+ rq->pkt_first_seg = NULL;
-+ rq->pkt_last_seg = NULL;
- }
-
- void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
-diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
-index 424415c..d1e2f52 100644
---- a/drivers/net/enic/base/vnic_rq.h
-+++ b/drivers/net/enic/base/vnic_rq.h
-@@ -60,10 +60,18 @@ struct vnic_rq_ctrl {
- u32 pad7;
- u32 error_status; /* 0x48 */
- u32 pad8;
-- u32 dropped_packet_count; /* 0x50 */
-+ u32 tcp_sn; /* 0x50 */
- u32 pad9;
-- u32 dropped_packet_count_rc; /* 0x58 */
-+ u32 unused; /* 0x58 */
- u32 pad10;
-+ u32 dca_select; /* 0x60 */
-+ u32 pad11;
-+ u32 dca_value; /* 0x68 */
-+ u32 pad12;
-+ u32 data_ring; /* 0x70 */
-+ u32 pad13;
-+ u32 header_split; /* 0x78 */
-+ u32 pad14;
- };
-
- struct vnic_rq {
-@@ -82,6 +90,12 @@ struct vnic_rq {
- struct rte_mempool *mp;
- uint16_t rxst_idx;
- uint32_t tot_pkts;
-+ uint16_t data_queue_idx;
-+ uint8_t is_sop;
-+ uint8_t in_use;
-+ struct rte_mbuf *pkt_first_seg;
-+ struct rte_mbuf *pkt_last_seg;
-+ unsigned int max_mbufs_per_pkt;
- };
-
- static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 7c1b5c9..d2de6ee 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -142,6 +142,16 @@ struct enic {
- struct enic_soft_stats soft_stats;
- };
-
-+static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq)
-+{
-+ return rq * 2;
-+}
-+
-+static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned int rq)
-+{
-+ return rq * 2 + 1;
-+}
-+
- static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
- {
- return rq;
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index a00565a..be17707 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -247,15 +247,23 @@ void enic_init_vnic_resources(struct enic *enic)
- unsigned int error_interrupt_offset = 0;
- unsigned int index = 0;
- unsigned int cq_idx;
-+ struct vnic_rq *data_rq;
-
- vnic_dev_stats_clear(enic->vdev);
-
- for (index = 0; index < enic->rq_count; index++) {
-- vnic_rq_init(&enic->rq[index],
-+ vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)],
- enic_cq_rq(enic, index),
- error_interrupt_enable,
- error_interrupt_offset);
-
-+ data_rq = &enic->rq[enic_data_rq(enic, index)];
-+ if (data_rq->in_use)
-+ vnic_rq_init(data_rq,
-+ enic_cq_rq(enic, index),
-+ error_interrupt_enable,
-+ error_interrupt_offset);
-+
- cq_idx = enic_cq_rq(enic, index);
- vnic_cq_init(&enic->cq[cq_idx],
- 0 /* flow_control_enable */,
-@@ -305,6 +313,9 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- unsigned i;
- dma_addr_t dma_addr;
-
-+ if (!rq->in_use)
-+ return 0;
-+
- dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
- rq->ring.desc_count);
-
-@@ -316,20 +327,20 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- return -ENOMEM;
- }
-
-- dma_addr = (dma_addr_t)(mb->buf_physaddr
-- + RTE_PKTMBUF_HEADROOM);
--
-- rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
-- mb->buf_len - RTE_PKTMBUF_HEADROOM);
-+ dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
-+ rq_enet_desc_enc(rqd, dma_addr,
-+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
-+ : RQ_ENET_TYPE_NOT_SOP),
-+ mb->buf_len - RTE_PKTMBUF_HEADROOM);
- rq->mbuf_ring[i] = mb;
- }
-
- /* make sure all prior writes are complete before doing the PIO write */
- rte_rmb();
-
-- /* Post all but the last 2 cache lines' worth of descriptors */
-- rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
-- / sizeof(struct rq_enet_desc));
-+ /* Post all but the last buffer to VIC. */
-+ rq->posted_index = rq->ring.desc_count - 1;
-+
- rq->rx_nb_hold = 0;
-
- dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
-@@ -337,6 +348,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
- rte_rmb();
-
-+// printf("posted %d buffers to %s rq\n", rq->ring.desc_count,
-+// rq->is_sop ? "sop" : "data");
- return 0;
-
- }
-@@ -398,17 +411,25 @@ int enic_enable(struct enic *enic)
- "Flow director feature will not work\n");
-
- for (index = 0; index < enic->rq_count; index++) {
-- err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
-+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_sop_rq(enic, index)]);
- if (err) {
-- dev_err(enic, "Failed to alloc RX queue mbufs\n");
-+ dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
-+ return err;
-+ }
-+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_data_rq(enic, index)]);
-+ if (err) {
-+ /* release the previously allocated mbufs for the sop rq */
-+ enic_rxmbuf_queue_release(enic, &enic->rq[enic_sop_rq(enic, index)]);
-+
-+ dev_err(enic, "Failed to alloc data RX queue mbufs\n");
- return err;
- }
- }
-
- for (index = 0; index < enic->wq_count; index++)
-- vnic_wq_enable(&enic->wq[index]);
-+ enic_start_wq(enic, index);
- for (index = 0; index < enic->rq_count; index++)
-- vnic_rq_enable(&enic->rq[index]);
-+ enic_start_rq(enic, index);
-
- vnic_dev_enable_wait(enic->vdev);
-
-@@ -440,14 +461,26 @@ int enic_alloc_intr_resources(struct enic *enic)
-
- void enic_free_rq(void *rxq)
- {
-- struct vnic_rq *rq = (struct vnic_rq *)rxq;
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-+ struct vnic_rq *rq_sop = (struct vnic_rq *)rxq;
-+ struct enic *enic = vnic_dev_priv(rq_sop->vdev);
-+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
-
-- enic_rxmbuf_queue_release(enic, rq);
-- rte_free(rq->mbuf_ring);
-- rq->mbuf_ring = NULL;
-- vnic_rq_free(rq);
-- vnic_cq_free(&enic->cq[rq->index]);
-+ enic_rxmbuf_queue_release(enic, rq_sop);
-+ if (rq_data->in_use)
-+ enic_rxmbuf_queue_release(enic, rq_data);
-+
-+ rte_free(rq_sop->mbuf_ring);
-+ if (rq_data->in_use)
-+ rte_free(rq_data->mbuf_ring);
-+
-+ rq_sop->mbuf_ring = NULL;
-+ rq_data->mbuf_ring = NULL;
-+
-+ vnic_rq_free(rq_sop);
-+ if (rq_data->in_use)
-+ vnic_rq_free(rq_data);
-+
-+ vnic_cq_free(&enic->cq[rq_sop->index]);
- }
-
- void enic_start_wq(struct enic *enic, uint16_t queue_idx)
-@@ -462,12 +495,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
-
- void enic_start_rq(struct enic *enic, uint16_t queue_idx)
- {
-- vnic_rq_enable(&enic->rq[queue_idx]);
-+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
-+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
-+
-+ if (rq_data->in_use)
-+ vnic_rq_enable(rq_data);
-+ rte_mb();
-+ vnic_rq_enable(rq_sop);
-+
- }
-
- int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
- {
-- return vnic_rq_disable(&enic->rq[queue_idx]);
-+ int ret1 = 0, ret2 = 0;
-+
-+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
-+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
-+
-+ ret2 = vnic_rq_disable(rq_sop);
-+ rte_mb();
-+ if (rq_data->in_use)
-+ ret1 = vnic_rq_disable(rq_data);
-+
-+ if (ret2)
-+ return ret2;
-+ else
-+ return ret1;
- }
-
- int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
-@@ -475,53 +528,128 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- uint16_t nb_desc)
- {
- int rc;
-- struct vnic_rq *rq = &enic->rq[queue_idx];
--
-- rq->socket_id = socket_id;
-- rq->mp = mp;
-+ uint16_t sop_queue_idx = enic_sop_rq(enic, queue_idx);
-+ uint16_t data_queue_idx = enic_data_rq(enic, queue_idx);
-+ struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
-+ struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
-+ unsigned int mbuf_size, mbufs_per_pkt;
-+ unsigned int nb_sop_desc, nb_data_desc;
-+ uint16_t min_sop, max_sop, min_data, max_data;
-+
-+ rq_sop->is_sop = 1;
-+ rq_sop->data_queue_idx = data_queue_idx;
-+ rq_data->is_sop = 0;
-+ rq_data->data_queue_idx = 0;
-+ rq_sop->socket_id = socket_id;
-+ rq_sop->mp = mp;
-+ rq_data->socket_id = socket_id;
-+ rq_data->mp = mp;
-+ rq_sop->in_use = 1;
-+
-+ mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM);
-+
-+ /* ceil(mtu/mbuf_size) */
-+ mbufs_per_pkt = (enic->config.mtu + (mbuf_size - 1)) / mbuf_size;
-+
-+ if (mbufs_per_pkt > 1)
-+ rq_data->in_use = 1;
-+ else
-+ rq_data->in_use = 0;
-+
-+ /* number of descriptors have to be a multiple of 32 */
-+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
-+ nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
-+
-+ rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
-+ rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
-+
-+ //printf("mtu = %u, mbuf_size = %u, mbuf_per_pkt = %u\n",
-+ // enic->config.mtu, mbuf_size, mbufs_per_pkt);
-+
-+ if (mbufs_per_pkt > 1) {
-+ min_sop = 64;
-+ max_sop = ((enic->config.rq_desc_count / (mbufs_per_pkt - 1)) & ~0x1F);
-+ min_data = min_sop * (mbufs_per_pkt - 1);
-+ max_data = enic->config.rq_desc_count;
-+ } else {
-+ min_sop = 64;
-+ max_sop = enic->config.rq_desc_count;
-+ min_data = 0;
-+ max_data = 0;
-+ }
-
-- if (nb_desc) {
-- if (nb_desc > enic->config.rq_desc_count) {
-- dev_warning(enic,
-- "RQ %d - number of rx desc in cmd line (%d)"\
-- "is greater than that in the UCSM/CIMC adapter"\
-- "policy. Applying the value in the adapter "\
-- "policy (%d).\n",
-- queue_idx, nb_desc, enic->config.rq_desc_count);
-- nb_desc = enic->config.rq_desc_count;
-- }
-- dev_info(enic, "RX Queues - effective number of descs:%d\n",
-- nb_desc);
-+ if (nb_desc < (min_sop + min_data)) {
-+ dev_warning(enic,
-+ "Number of rx descs too low, adjusting to minimum\n");
-+ nb_sop_desc = min_sop;
-+ nb_data_desc = min_data;
-+ } else if (nb_desc > (max_sop + max_data)){
-+ dev_warning(enic,
-+ "Number of rx_descs too high, adjusting to maximum\n");
-+ nb_sop_desc = max_sop;
-+ nb_data_desc = max_data;
- }
-+ dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
-+ enic->config.mtu, mbuf_size, min_sop + min_data, max_sop + max_data);
-
-- /* Allocate queue resources */
-- rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
-- nb_desc, sizeof(struct rq_enet_desc));
-+ dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
-+ nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
-+
-+ /* Allocate sop queue resources */
-+ rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
-+ nb_sop_desc, sizeof(struct rq_enet_desc));
- if (rc) {
-- dev_err(enic, "error in allocation of rq\n");
-+ dev_err(enic, "error in allocation of sop rq\n");
- goto err_exit;
- }
--
-+ nb_sop_desc = rq_sop->ring.desc_count;
-+
-+ if (rq_data->in_use) {
-+ /* Allocate data queue resources */
-+ rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
-+ nb_data_desc,
-+ sizeof(struct rq_enet_desc));
-+ if (rc) {
-+ dev_err(enic, "error in allocation of data rq\n");
-+ goto err_free_rq_sop;
-+ }
-+ nb_data_desc = rq_data->ring.desc_count;
-+ }
- rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
-- socket_id, nb_desc,
-- sizeof(struct cq_enet_rq_desc));
-+ socket_id, nb_sop_desc + nb_data_desc,
-+ sizeof(struct cq_enet_rq_desc));
- if (rc) {
- dev_err(enic, "error in allocation of cq for rq\n");
-- goto err_free_rq_exit;
-+ goto err_free_rq_data;
- }
-
-- /* Allocate the mbuf ring */
-- rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
-- sizeof(struct rte_mbuf *) * nb_desc,
-- RTE_CACHE_LINE_SIZE, rq->socket_id);
-+ /* Allocate the mbuf rings */
-+ rq_sop->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
-+ sizeof(struct rte_mbuf *) * nb_sop_desc,
-+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
-+ if (rq_sop->mbuf_ring == NULL)
-+ goto err_free_cq;
-+
-+ if (rq_data->in_use) {
-+ rq_data->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
-+ sizeof(struct rte_mbuf *) * nb_data_desc,
-+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
-+ if (rq_data->mbuf_ring == NULL)
-+ goto err_free_sop_mbuf;
-+ }
-
-- if (rq->mbuf_ring != NULL)
-- return 0;
-+ return 0;
-
-+err_free_sop_mbuf:
-+ rte_free(rq_sop->mbuf_ring);
-+err_free_cq:
- /* cleanup on error */
- vnic_cq_free(&enic->cq[queue_idx]);
--err_free_rq_exit:
-- vnic_rq_free(rq);
-+err_free_rq_data:
-+ if (rq_data->in_use)
-+ vnic_rq_free(rq_data);
-+err_free_rq_sop:
-+ vnic_rq_free(rq_sop);
- err_exit:
- return -ENOMEM;
- }
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-index 174486b..463b954 100644
---- a/drivers/net/enic/enic_rxtx.c
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -242,22 +242,27 @@ uint16_t
- enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
- {
-- struct vnic_rq *rq = rx_queue;
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-- unsigned int rx_id;
-+ struct vnic_rq *sop_rq = rx_queue;
-+ struct vnic_rq *data_rq;
-+ struct vnic_rq *rq;
-+ struct enic *enic = vnic_dev_priv(sop_rq->vdev);
-+ uint16_t cq_idx;
-+ uint16_t rq_idx;
-+ uint16_t rq_num;
- struct rte_mbuf *nmb, *rxmb;
- uint16_t nb_rx = 0;
-- uint16_t nb_hold;
- struct vnic_cq *cq;
- volatile struct cq_desc *cqd_ptr;
- uint8_t color;
-- uint16_t nb_err = 0;
-+ uint16_t seg_length;
-+ struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
-+ struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
-
-- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+ cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
-+ cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
-
-- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
-+ data_rq = &enic->rq[sop_rq->data_queue_idx];
-
- while (nb_rx < nb_pkts) {
- volatile struct rq_enet_desc *rqd_ptr;
-@@ -265,6 +270,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- struct cq_desc cqd;
- uint64_t ol_err_flags;
- uint8_t packet_error;
-+ uint16_t ciflags;
-
- /* Check for pkts available */
- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-@@ -272,9 +278,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- if (color == cq->last_color)
- break;
-
-- /* Get the cq descriptor and rq pointer */
-+ /* Get the cq descriptor and extract rq info from it */
- cqd = *cqd_ptr;
-- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
-+ rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
-+ rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
-+
-+ rq = &enic->rq[rq_num];
-+ rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
-
- /* allocate a new mbuf */
- nmb = rte_rxmbuf_alloc(rq->mp);
-@@ -287,67 +297,106 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
-
- /* Get the mbuf to return and replace with one just allocated */
-- rxmb = rq->mbuf_ring[rx_id];
-- rq->mbuf_ring[rx_id] = nmb;
-+ rxmb = rq->mbuf_ring[rq_idx];
-+ rq->mbuf_ring[rq_idx] = nmb;
-
- /* Increment cqd, rqd, mbuf_table index */
-- rx_id++;
-- if (unlikely(rx_id == rq->ring.desc_count)) {
-- rx_id = 0;
-+ cq_idx++;
-+ if (unlikely(cq_idx == cq->ring.desc_count)) {
-+ cq_idx = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- /* Prefetch next mbuf & desc while processing current one */
-- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
- rte_enic_prefetch(cqd_ptr);
-- rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-- + rx_id);
-+// rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-+// rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-+// + rx_id);
-+
-+ ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd);
-
- /* Push descriptor for newly allocated mbuf */
-- dma_addr = (dma_addr_t)(nmb->buf_physaddr
-- + RTE_PKTMBUF_HEADROOM);
-- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-- - RTE_PKTMBUF_HEADROOM);
-+
-+ dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
-+ rq_enet_desc_enc(rqd_ptr, dma_addr,
-+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
-+ : RQ_ENET_TYPE_NOT_SOP),
-+ nmb->buf_len - RTE_PKTMBUF_HEADROOM);
-
- /* Fill in the rest of the mbuf */
-- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
-- rxmb->nb_segs = 1;
-+ seg_length = enic_cq_rx_desc_n_bytes(&cqd);
-+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-+ if (rq->is_sop) {
-+ first_seg = rxmb;
-+ first_seg->nb_segs = 1;
-+ first_seg->pkt_len = seg_length;
-+ } else {
-+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
-+ + seg_length);
-+ first_seg->nb_segs++;
-+ last_seg->next = rxmb;
-+ }
-+
- rxmb->next = NULL;
- rxmb->port = enic->port_id;
-- if (!packet_error) {
-- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-- } else {
-- rte_pktmbuf_free(rxmb);
-+ rxmb->data_len = seg_length;
-+
-+ rq->rx_nb_hold++;
-+
-+ if (!(enic_cq_rx_desc_eop(ciflags))) {
-+ last_seg = rxmb;
-+ continue;
-+ }
-+
-+ if (unlikely(packet_error)) {
-+ rte_pktmbuf_free(first_seg);
- rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
-- nb_err++;
-+
- continue;
- }
-- rxmb->data_len = rxmb->pkt_len;
-+
-+
-+// printf("EOP: final packet length is %d\n", first_seg->pkt_len);
-+// rte_pktmbuf_dump(stdout, first_seg, 64);
-
- /* prefetch mbuf data for caller */
-- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
-+ rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
- RTE_PKTMBUF_HEADROOM));
-
- /* store the mbuf address into the next entry of the array */
-- rx_pkts[nb_rx++] = rxmb;
-+ rx_pkts[nb_rx++] = first_seg;
- }
-
-- nb_hold += nb_rx + nb_err;
-- cq->to_clean = rx_id;
-+ sop_rq->pkt_first_seg = first_seg;
-+ sop_rq->pkt_last_seg = last_seg;
-+
-+ cq->to_clean = cq_idx;
-+
-+ if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > sop_rq->rx_free_thresh) {
-+ if (data_rq->in_use) {
-+ data_rq->posted_index = enic_ring_add(data_rq->ring.desc_count,
-+ data_rq->posted_index,
-+ data_rq->rx_nb_hold);
-+ //printf("Processed %d data descs. Posted index now %d\n",
-+ // data_rq->rx_nb_hold, data_rq->posted_index);
-+ data_rq->rx_nb_hold = 0;
-+ }
-+ sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
-+ sop_rq->posted_index,
-+ sop_rq->rx_nb_hold);
-+ //printf("Processed %d sop descs. Posted index now %d\n",
-+ // sop_rq->rx_nb_hold, sop_rq->posted_index);
-+ sop_rq->rx_nb_hold = 0;
-
-- if (nb_hold > rq->rx_free_thresh) {
-- rq->posted_index = enic_ring_add(rq->ring.desc_count,
-- rq->posted_index, nb_hold);
-- nb_hold = 0;
- rte_mb();
-- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-+ if (data_rq->in_use)
-+ iowrite32(data_rq->posted_index, &data_rq->ctrl->posted_index);
-+ rte_compiler_barrier();
-+ iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
- }
-
-- rq->rx_nb_hold = nb_hold;
-
- return nb_rx;
- }
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch b/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch
deleted file mode 100644
index 2553997..0000000
--- a/dpdk/dpdk-16.04_patches/0017-NXP-DPAA2-Poll-Mode-Driver-Support.patch
+++ /dev/null
@@ -1,40404 +0,0 @@
-From b8d83a0825f2d7d0d626c00f79de7b415f8dc344 Mon Sep 17 00:00:00 2001
-From: Sachin Saxena <sachin.saxena@nxp.com>
-Date: Fri, 17 Jun 2016 12:32:28 +0530
-Subject: [PATCH 17/17] NXP DPAA2 Poll Mode Driver Support
-
- Upstreaming of DPAA2 driver changes is in progress.This patch will
- temporary add the support in VPP in built DPDK.
-
- Two types of changes:
- 1. Driver specfic independent files. No impact on any other functionality.
- 2. Changes in common EAL framework. These changes are done in compile time DPAA2
- specific flag, so no impact is expected on other existing features if not
- compiling for DPAA2.
-
-Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
----
- config/defconfig_arm64-dpaa2-linuxapp-gcc | 61 +
- drivers/net/Makefile | 1 +
- drivers/net/dpaa2/Makefile | 102 +
- drivers/net/dpaa2/dpaa2_logs.h | 77 +
- drivers/net/dpaa2/mc/dpaiop.c | 457 ++++
- drivers/net/dpaa2/mc/dpbp.c | 432 ++++
- drivers/net/dpaa2/mc/dpci.c | 501 ++++
- drivers/net/dpaa2/mc/dpcon.c | 401 +++
- drivers/net/dpaa2/mc/dpdbg.c | 547 +++++
- drivers/net/dpaa2/mc/dpdcei.c | 449 ++++
- drivers/net/dpaa2/mc/dpdmai.c | 452 ++++
- drivers/net/dpaa2/mc/dpdmux.c | 567 +++++
- drivers/net/dpaa2/mc/dpio.c | 468 ++++
- drivers/net/dpaa2/mc/dpmac.c | 422 ++++
- drivers/net/dpaa2/mc/dpmcp.c | 312 +++
- drivers/net/dpaa2/mc/dpmng.c | 58 +
- drivers/net/dpaa2/mc/dpni.c | 1907 +++++++++++++++
- drivers/net/dpaa2/mc/dprc.c | 786 ++++++
- drivers/net/dpaa2/mc/dprtc.c | 509 ++++
- drivers/net/dpaa2/mc/dpseci.c | 502 ++++
- drivers/net/dpaa2/mc/dpsw.c | 1639 +++++++++++++
- drivers/net/dpaa2/mc/fsl_dpaiop.h | 494 ++++
- drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h | 190 ++
- drivers/net/dpaa2/mc/fsl_dpbp.h | 438 ++++
- drivers/net/dpaa2/mc/fsl_dpbp_cmd.h | 172 ++
- drivers/net/dpaa2/mc/fsl_dpci.h | 594 +++++
- drivers/net/dpaa2/mc/fsl_dpci_cmd.h | 200 ++
- drivers/net/dpaa2/mc/fsl_dpcon.h | 407 +++
- drivers/net/dpaa2/mc/fsl_dpcon_cmd.h | 162 ++
- drivers/net/dpaa2/mc/fsl_dpdbg.h | 635 +++++
- drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h | 249 ++
- drivers/net/dpaa2/mc/fsl_dpdcei.h | 515 ++++
- drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h | 182 ++
- drivers/net/dpaa2/mc/fsl_dpdmai.h | 521 ++++
- drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h | 191 ++
- drivers/net/dpaa2/mc/fsl_dpdmux.h | 724 ++++++
- drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 256 ++
- drivers/net/dpaa2/mc/fsl_dpio.h | 460 ++++
- drivers/net/dpaa2/mc/fsl_dpio_cmd.h | 184 ++
- drivers/net/dpaa2/mc/fsl_dpkg.h | 174 ++
- drivers/net/dpaa2/mc/fsl_dpmac.h | 593 +++++
- drivers/net/dpaa2/mc/fsl_dpmac_cmd.h | 195 ++
- drivers/net/dpaa2/mc/fsl_dpmcp.h | 332 +++
- drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h | 135 +
- drivers/net/dpaa2/mc/fsl_dpmng.h | 74 +
- drivers/net/dpaa2/mc/fsl_dpmng_cmd.h | 46 +
- drivers/net/dpaa2/mc/fsl_dpni.h | 2581 ++++++++++++++++++++
- drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1058 ++++++++
- drivers/net/dpaa2/mc/fsl_dprc.h | 1032 ++++++++
- drivers/net/dpaa2/mc/fsl_dprc_cmd.h | 755 ++++++
- drivers/net/dpaa2/mc/fsl_dprtc.h | 434 ++++
- drivers/net/dpaa2/mc/fsl_dprtc_cmd.h | 181 ++
- drivers/net/dpaa2/mc/fsl_dpseci.h | 647 +++++
- drivers/net/dpaa2/mc/fsl_dpseci_cmd.h | 241 ++
- drivers/net/dpaa2/mc/fsl_dpsw.h | 2164 ++++++++++++++++
- drivers/net/dpaa2/mc/fsl_dpsw_cmd.h | 916 +++++++
- drivers/net/dpaa2/mc/fsl_mc_cmd.h | 221 ++
- drivers/net/dpaa2/mc/fsl_mc_sys.h | 95 +
- drivers/net/dpaa2/mc/fsl_net.h | 480 ++++
- drivers/net/dpaa2/mc/mc_sys.c | 129 +
- drivers/net/dpaa2/qbman/driver/qbman_debug.c | 926 +++++++
- drivers/net/dpaa2/qbman/driver/qbman_debug.h | 140 ++
- drivers/net/dpaa2/qbman/driver/qbman_portal.c | 1407 +++++++++++
- drivers/net/dpaa2/qbman/driver/qbman_portal.h | 266 ++
- drivers/net/dpaa2/qbman/driver/qbman_private.h | 165 ++
- drivers/net/dpaa2/qbman/driver/qbman_sys.h | 367 +++
- drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h | 68 +
- drivers/net/dpaa2/qbman/include/compat.h | 597 +++++
- .../dpaa2/qbman/include/drivers/fsl_qbman_base.h | 151 ++
- .../dpaa2/qbman/include/drivers/fsl_qbman_portal.h | 1089 +++++++++
- drivers/net/dpaa2/rte_eth_dpaa2_pvt.h | 313 +++
- drivers/net/dpaa2/rte_eth_dpbp.c | 430 ++++
- drivers/net/dpaa2/rte_eth_dpio.c | 339 +++
- drivers/net/dpaa2/rte_eth_dpni.c | 2230 +++++++++++++++++
- drivers/net/dpaa2/rte_eth_dpni_annot.h | 311 +++
- drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 4 +
- lib/librte_eal/common/eal_private.h | 12 +
- lib/librte_eal/linuxapp/eal/Makefile | 11 +
- lib/librte_eal/linuxapp/eal/eal.c | 10 +
- lib/librte_eal/linuxapp/eal/eal_soc.c | 84 +
- lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c | 653 +++++
- lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h | 102 +
- lib/librte_mbuf/Makefile | 4 +
- lib/librte_mbuf/rte_mbuf.c | 67 +
- lib/librte_mempool/Makefile | 4 +
- lib/librte_mempool/rte_mempool.c | 13 +
- lib/librte_mempool/rte_mempool.h | 30 +-
- mk/machine/dpaa2/rte.vars.mk | 60 +
- mk/rte.app.mk | 1 +
- 89 files changed, 39560 insertions(+), 1 deletion(-)
- create mode 100644 config/defconfig_arm64-dpaa2-linuxapp-gcc
- create mode 100644 drivers/net/dpaa2/Makefile
- create mode 100644 drivers/net/dpaa2/dpaa2_logs.h
- create mode 100644 drivers/net/dpaa2/mc/dpaiop.c
- create mode 100644 drivers/net/dpaa2/mc/dpbp.c
- create mode 100644 drivers/net/dpaa2/mc/dpci.c
- create mode 100644 drivers/net/dpaa2/mc/dpcon.c
- create mode 100644 drivers/net/dpaa2/mc/dpdbg.c
- create mode 100644 drivers/net/dpaa2/mc/dpdcei.c
- create mode 100644 drivers/net/dpaa2/mc/dpdmai.c
- create mode 100644 drivers/net/dpaa2/mc/dpdmux.c
- create mode 100644 drivers/net/dpaa2/mc/dpio.c
- create mode 100644 drivers/net/dpaa2/mc/dpmac.c
- create mode 100644 drivers/net/dpaa2/mc/dpmcp.c
- create mode 100644 drivers/net/dpaa2/mc/dpmng.c
- create mode 100644 drivers/net/dpaa2/mc/dpni.c
- create mode 100644 drivers/net/dpaa2/mc/dprc.c
- create mode 100644 drivers/net/dpaa2/mc/dprtc.c
- create mode 100644 drivers/net/dpaa2/mc/dpseci.c
- create mode 100644 drivers/net/dpaa2/mc/dpsw.c
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpbp_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpci.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpci_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpcon_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpio.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpio_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpkg.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpmac_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpmng_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpni.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpni_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dprc.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dprc_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dprtc_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpseci_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_dpsw_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_mc_cmd.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_mc_sys.h
- create mode 100644 drivers/net/dpaa2/mc/fsl_net.h
- create mode 100644 drivers/net/dpaa2/mc/mc_sys.c
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.c
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_debug.h
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.c
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_portal.h
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_private.h
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys.h
- create mode 100644 drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h
- create mode 100644 drivers/net/dpaa2/qbman/include/compat.h
- create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h
- create mode 100644 drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h
- create mode 100644 drivers/net/dpaa2/rte_eth_dpaa2_pvt.h
- create mode 100644 drivers/net/dpaa2/rte_eth_dpbp.c
- create mode 100644 drivers/net/dpaa2/rte_eth_dpio.c
- create mode 100644 drivers/net/dpaa2/rte_eth_dpni.c
- create mode 100644 drivers/net/dpaa2/rte_eth_dpni_annot.h
- create mode 100644 drivers/net/dpaa2/rte_pmd_dpaa2_version.map
- create mode 100644 lib/librte_eal/linuxapp/eal/eal_soc.c
- create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c
- create mode 100644 lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h
- create mode 100644 mk/machine/dpaa2/rte.vars.mk
-
-diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
-new file mode 100644
-index 0000000..fafbef4
---- /dev/null
-+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
-@@ -0,0 +1,61 @@
-+# BSD LICENSE
-+#
-+# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-+#
-+# Redistribution and use in source and binary forms, with or without
-+# modification, are permitted provided that the following conditions
-+# are met:
-+#
-+# * Redistributions of source code must retain the above copyright
-+# notice, this list of conditions and the following disclaimer.
-+# * Redistributions in binary form must reproduce the above copyright
-+# notice, this list of conditions and the following disclaimer in
-+# the documentation and/or other materials provided with the
-+# distribution.
-+# * Neither the name of Freescale Semiconductor nor the names of its
-+# contributors may be used to endorse or promote products derived
-+# from this software without specific prior written permission.
-+#
-+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+#
-+
-+#include "defconfig_arm64-armv8a-linuxapp-gcc"
-+
-+# NXP (Freescale) - Soc Architecture with WRIOP and QBMAN support
-+CONFIG_RTE_MACHINE="dpaa2"
-+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd"
-+
-+#
-+# Compile Environment Abstraction Layer
-+#
-+CONFIG_RTE_MAX_LCORE=8
-+CONFIG_RTE_MAX_NUMA_NODES=1
-+
-+# Compile software PMD backed by FSL DPAA2 files
-+#
-+CONFIG_RTE_LIBRTE_DPAA2_PMD=y
-+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n
-+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n
-+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
-+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
-+
-+CONFIG_RTE_LIBRTE_PMD_BOND=y
-+CONFIG_RTE_CACHE_LINE_SIZE=128
-+CONFIG_RTE_EAL_IGB_UIO=n
-+CONFIG_RTE_LIBRTE_KNI=n
-+
-+#FSL DPAA2 caam driver
-+CONFIG_RTE_LIBRTE_PMD_DPAA2_CAAM=n
-+CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_INIT=n
-+CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_DRIVER=n
-+CONFIG_RTE_LIBRTE_DPAA2_CAAM_DEBUG_RX=n
-diff --git a/drivers/net/Makefile b/drivers/net/Makefile
-index 3386a67..ed10351 100644
---- a/drivers/net/Makefile
-+++ b/drivers/net/Makefile
-@@ -52,6 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
- DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
- DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
- DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
-+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
-
- ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
- DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
-diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
-new file mode 100644
-index 0000000..3cf1782
---- /dev/null
-+++ b/drivers/net/dpaa2/Makefile
-@@ -0,0 +1,102 @@
-+# BSD LICENSE
-+#
-+# Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved.
-+#
-+# Redistribution and use in source and binary forms, with or without
-+# modification, are permitted provided that the following conditions
-+# are met:
-+#
-+# * Redistributions of source code must retain the above copyright
-+# notice, this list of conditions and the following disclaimer.
-+# * Redistributions in binary form must reproduce the above copyright
-+# notice, this list of conditions and the following disclaimer in
-+# the documentation and/or other materials provided with the
-+# distribution.
-+# * Neither the name of Freescale Semiconductor nor the names of its
-+# contributors may be used to endorse or promote products derived
-+# from this software without specific prior written permission.
-+#
-+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+
-+include $(RTE_SDK)/mk/rte.vars.mk
-+
-+#
-+# library name
-+#
-+LIB = librte_pmd_dpaa2.a
-+
-+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-+CFLAGS += -O0 -g
-+CFLAGS += "-Wno-error"
-+else
-+CFLAGS += -O3 -g
-+CFLAGS += $(WERROR_FLAGS)
-+endif
-+CFLAGS +=-Wno-strict-aliasing
-+CFLAGS +=-Wno-missing-prototypes
-+CFLAGS +=-Wno-missing-declarations
-+CFLAGS +=-Wno-unused-function
-+
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver/
-+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
-+CFLAGS += -I$(RTE_SDK)/lib/librte_ether
-+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-+
-+EXPORT_MAP := rte_pmd_dpaa2_version.map
-+
-+LIBABIVER := 1
-+#
-+# all source are stored in SRCS-y
-+#
-+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \
-+ mc/dprc.c \
-+ mc/dprtc.o \
-+ mc/dpaiop.c \
-+ mc/dpdbg.o \
-+ mc/dpdcei.c \
-+ mc/dpdmai.c \
-+ mc/dpmac.c \
-+ mc/dpmcp.c \
-+ mc/dpbp.c \
-+ mc/dpio.c \
-+ mc/dpni.c \
-+ mc/dpsw.c \
-+ mc/dpci.c \
-+ mc/dpcon.c \
-+ mc/dpseci.c \
-+ mc/dpmng.c \
-+ mc/dpdmux.c \
-+ mc/mc_sys.c
-+
-+#
-+# all source are stored in SRCS-y
-+#
-+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += \
-+ qbman/driver/qbman_portal.c \
-+ qbman/driver/qbman_debug.c
-+
-+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpni.c
-+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpio.c
-+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += rte_eth_dpbp.c
-+
-+#
-+# Export include files
-+#
-+SYMLINK-y-include +=
-+
-+# this lib depends upon:
-+DEPDIRS-y += lib/librte_eal
-+include $(RTE_SDK)/mk/rte.lib.mk
-diff --git a/drivers/net/dpaa2/dpaa2_logs.h b/drivers/net/dpaa2/dpaa2_logs.h
-new file mode 100644
-index 0000000..319786a
---- /dev/null
-+++ b/drivers/net/dpaa2/dpaa2_logs.h
-@@ -0,0 +1,77 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+
-+#ifndef _DPAA2_LOGS_H_
-+#define _DPAA2_LOGS_H_
-+
-+#define PMD_INIT_LOG(level, fmt, args...) \
-+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-+
-+#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT
-+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-+#else
-+#define PMD_INIT_FUNC_TRACE() do { } while (0)
-+#endif
-+
-+#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX
-+#define PMD_RX_LOG(level, fmt, args...) \
-+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-+#else
-+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
-+#endif
-+
-+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX
-+#define PMD_TX_LOG(level, fmt, args...) \
-+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-+#else
-+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
-+#endif
-+
-+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE
-+#define PMD_TX_FREE_LOG(level, fmt, args...) \
-+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-+#else
-+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
-+#endif
-+
-+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
-+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
-+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-+#else
-+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-+#endif
-+
-+#define PMD_DRV_LOG(level, fmt, args...) \
-+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-+
-+#endif /* _DPAA2_LOGS_H_ */
-diff --git a/drivers/net/dpaa2/mc/dpaiop.c b/drivers/net/dpaa2/mc/dpaiop.c
-new file mode 100644
-index 0000000..7c1ecff
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpaiop.c
-@@ -0,0 +1,457 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpaiop.h>
-+#include <fsl_dpaiop_cmd.h>
-+
-+int dpaiop_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpaiop_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPAIOP_CMD_OPEN(cmd, dpaiop_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return err;
-+}
-+
-+int dpaiop_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpaiop_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ (void)(cfg); /* unused */
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPAIOP_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpaiop_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpaiop_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpaiop_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpaiop_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpaiop_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpaiop_load(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpaiop_load_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_LOAD,
-+ cmd_flags,
-+ token);
-+ DPAIOP_CMD_LOAD(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_run(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpaiop_run_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_RUN,
-+ cmd_flags,
-+ token);
-+ DPAIOP_CMD_RUN(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_get_sl_version(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpaiop_sl_version *version)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_SL_VERSION,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_SL_VERSION(cmd, version);
-+
-+ return 0;
-+}
-+
-+int dpaiop_get_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_STATE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPAIOP_RSP_GET_STATE(cmd, *state);
-+
-+ return 0;
-+}
-+
-+int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time_of_day)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_SET_TIME_OF_DAY,
-+ cmd_flags,
-+ token);
-+
-+ DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day);
-+
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t *time_of_day)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ cmd.header = mc_encode_cmd_header(DPAIOP_CMDID_GET_TIME_OF_DAY,
-+ cmd_flags,
-+ token);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPAIOP_RSP_GET_TIME_OF_DAY(cmd, *time_of_day);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpbp.c b/drivers/net/dpaa2/mc/dpbp.c
-new file mode 100644
-index 0000000..87899b8
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpbp.c
-@@ -0,0 +1,432 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpbp.h>
-+#include <fsl_dpbp_cmd.h>
-+
-+int dpbp_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpbp_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPBP_CMD_OPEN(cmd, dpbp_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return err;
-+}
-+
-+int dpbp_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpbp_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ (void)(cfg); /* unused */
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpbp_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpbp_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpbp_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpbp_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpbp_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpbp_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
-+ cmd_flags,
-+ token);
-+
-+ DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpbp_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpci.c b/drivers/net/dpaa2/mc/dpci.c
-new file mode 100644
-index 0000000..2ec02a1
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpci.c
-@@ -0,0 +1,501 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpci.h>
-+#include <fsl_dpci_cmd.h>
-+
-+int dpci_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpci_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPCI_CMD_OPEN(cmd, dpci_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpci_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpci_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPCI_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpci_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpci_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpci_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpci_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpci_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpci_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpci_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpci_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpci_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpci_peer_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_PEER_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_PEER_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpci_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *up)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_LINK_STATE(cmd, *up);
-+
-+ return 0;
-+}
-+
-+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ const struct dpci_rx_queue_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpci_rx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_GET_RX_QUEUE(cmd, priority);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_RX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpci_tx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPCI_CMD_GET_TX_QUEUE(cmd, priority);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCI_RSP_GET_TX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpcon.c b/drivers/net/dpaa2/mc/dpcon.c
-new file mode 100644
-index 0000000..396303d
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpcon.c
-@@ -0,0 +1,401 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpcon.h>
-+#include <fsl_dpcon_cmd.h>
-+
-+int dpcon_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpcon_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPCON_CMD_OPEN(cmd, dpcon_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpcon_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpcon_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPCON_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpcon_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCON_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpcon_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpcon_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpcon_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCON_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpcon_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCON_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpcon_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPCON_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpcon_set_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpcon_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPCON_CMD_SET_NOTIFICATION(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-diff --git a/drivers/net/dpaa2/mc/dpdbg.c b/drivers/net/dpaa2/mc/dpdbg.c
-new file mode 100644
-index 0000000..6f2a08d
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpdbg.c
-@@ -0,0 +1,547 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpdbg.h>
-+#include <fsl_dpdbg_cmd.h>
-+
-+int dpdbg_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdbg_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPDBG_CMD_OPEN(cmd, dpdbg_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return err;
-+}
-+
-+int dpdbg_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdbg_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ struct dpdbg_dpni_info *info)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_INFO,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPNI_INFO(cmd, info);
-+
-+ return 0;
-+}
-+
-+int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ uint8_t sender_id,
-+ uint32_t *fqid)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, *fqid);
-+
-+ return 0;
-+}
-+
-+int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id,
-+ struct dpdbg_dpcon_info *info)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPCON_INFO,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPCON_INFO(cmd, info);
-+
-+ return 0;
-+}
-+
-+int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpbp_id,
-+ struct dpdbg_dpbp_info *info)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPBP_INFO(cmd, info);
-+
-+ return 0;
-+}
-+
-+int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpci_id,
-+ uint8_t priority,
-+ uint32_t *fqid)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPBP_INFO,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPCI_FQID(cmd, *fqid);
-+
-+ return 0;
-+}
-+
-+int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule,
-+ uint8_t *rule_buf)
-+{
-+ int i, j;
-+ int offset = 0;
-+ int param = 1;
-+ uint64_t *params = (uint64_t *)rule_buf;
-+
-+ if (!rule_buf || !dpkg_rule)
-+ return -EINVAL;
-+
-+ params[0] |= mc_enc(0, 8, dpkg_rule->num_extracts);
-+ params[0] = cpu_to_le64(params[0]);
-+
-+ if (dpkg_rule->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
-+ return -EINVAL;
-+
-+ for (i = 0; i < dpkg_rule->num_extracts; i++) {
-+ switch (dpkg_rule->extracts[i].type) {
-+ case DPKG_EXTRACT_FROM_HDR:
-+ params[param] |= mc_enc(0, 8,
-+ dpkg_rule->extracts[i].extract.from_hdr.prot);
-+ params[param] |= mc_enc(8, 4,
-+ dpkg_rule->extracts[i].extract.from_hdr.type);
-+ params[param] |= mc_enc(16, 8,
-+ dpkg_rule->extracts[i].extract.from_hdr.size);
-+ params[param] |= mc_enc(24, 8,
-+ dpkg_rule->extracts[i].extract.from_hdr.offset);
-+ params[param] |= mc_enc(32, 32,
-+ dpkg_rule->extracts[i].extract.from_hdr.field);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ params[param] |= mc_enc(0, 8,
-+ dpkg_rule->extracts[i].extract.
-+ from_hdr.hdr_index);
-+ break;
-+ case DPKG_EXTRACT_FROM_DATA:
-+ params[param] |= mc_enc(16, 8,
-+ dpkg_rule->extracts[i].extract.from_data.size);
-+ params[param] |= mc_enc(24, 8,
-+ dpkg_rule->extracts[i].extract.
-+ from_data.offset);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ break;
-+ case DPKG_EXTRACT_FROM_PARSE:
-+ params[param] |= mc_enc(16, 8,
-+ dpkg_rule->extracts[i].extract.from_parse.size);
-+ params[param] |= mc_enc(24, 8,
-+ dpkg_rule->extracts[i].extract.
-+ from_parse.offset);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ params[param] |= mc_enc(
-+ 24, 8, dpkg_rule->extracts[i].num_of_byte_masks);
-+ params[param] |= mc_enc(32, 4, dpkg_rule->extracts[i].type);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ for (offset = 0, j = 0;
-+ j < DPKG_NUM_OF_MASKS;
-+ offset += 16, j++) {
-+ params[param] |= mc_enc(
-+ (offset), 8,
-+ dpkg_rule->extracts[i].masks[j].mask);
-+ params[param] |= mc_enc(
-+ (offset + 8), 8,
-+ dpkg_rule->extracts[i].masks[j].offset);
-+ }
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ }
-+ return 0;
-+}
-+
-+int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t marking,
-+ struct dpdbg_rule_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ struct dpdbg_dpni_rx_marking_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_MARKING,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ uint16_t sender_id,
-+ uint8_t marking)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpio_id,
-+ uint8_t marking)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_MARKING,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdbg_rule_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpio_id,
-+ struct dpdbg_dpio_trace_cfg
-+ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS])
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPIO_TRACE,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ struct dpdbg_dpni_rx_trace_cfg *trace_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_RX_TRACE,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ uint16_t sender_id,
-+ struct dpdbg_dpni_tx_trace_cfg *trace_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPNI_TX_TRACE,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id,
-+ struct dpdbg_dpcon_trace_cfg
-+ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS])
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPCON_TRACE,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpseci_id,
-+ struct dpdbg_dpseci_trace_cfg
-+ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS])
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_SET_DPSECI_TRACE,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpmac_id,
-+ enum dpmac_counter counter_type,
-+ uint64_t *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPMAC_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPMAC_COUNTER(cmd, *counter);
-+
-+ return 0;
-+}
-+
-+int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ enum dpni_counter counter_type,
-+ uint64_t *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDBG_CMDID_GET_DPNI_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpni_id, counter_type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDBG_RSP_GET_DPNI_COUNTER(cmd, *counter);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpdcei.c b/drivers/net/dpaa2/mc/dpdcei.c
-new file mode 100644
-index 0000000..a5c4c47
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpdcei.c
-@@ -0,0 +1,449 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpdcei.h>
-+#include <fsl_dpdcei_cmd.h>
-+
-+int dpdcei_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdcei_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPDCEI_CMD_OPEN(cmd, dpdcei_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdcei_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdcei_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPDCEI_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdcei_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdcei_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdcei_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpdcei_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdcei_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdcei_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpdcei_rx_queue_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_SET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdcei_rx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_RX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdcei_tx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDCEI_CMDID_GET_TX_QUEUE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDCEI_RSP_GET_TX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpdmai.c b/drivers/net/dpaa2/mc/dpdmai.c
-new file mode 100644
-index 0000000..154d2c6
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpdmai.c
-@@ -0,0 +1,452 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpdmai.h>
-+#include <fsl_dpdmai_cmd.h>
-+
-+int dpdmai_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdmai_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPDMAI_CMD_OPEN(cmd, dpdmai_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdmai_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
-+ cmd_flags, token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdmai_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPDMAI_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdmai_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdmai_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdmai_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdmai_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdmai_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ const struct dpdmai_rx_queue_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority, struct dpdmai_rx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpdmai_tx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
-new file mode 100644
-index 0000000..dc07608
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpdmux.c
-@@ -0,0 +1,567 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpdmux.h>
-+#include <fsl_dpdmux_cmd.h>
-+
-+int dpdmux_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdmux_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPDMUX_CMD_OPEN(cmd, dpdmux_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdmux_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdmux_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPDMUX_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpdmux_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdmux_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdmux_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdmux_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdmux_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t max_frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpdmux_accepted_frames *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpdmux_if_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_GET_ATTR(cmd, if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_IF_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpdmux_l2_rule *rule)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, rule);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpdmux_l2_rule *rule)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, rule);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ enum dpdmux_counter_type counter_type,
-+ uint64_t *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_IF_GET_COUNTER(cmd, *counter);
-+
-+ return 0;
-+}
-+
-+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpdmux_link_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpdmux_link_state *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpio.c b/drivers/net/dpaa2/mc/dpio.c
-new file mode 100644
-index 0000000..f511e29
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpio.c
-@@ -0,0 +1,468 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpio.h>
-+#include <fsl_dpio_cmd.h>
-+
-+int dpio_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpio_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPIO_CMD_OPEN(cmd, dpio_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpio_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpio_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPIO_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpio_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpio_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpio_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpio_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpio_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpio_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpio_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpio_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpio_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t sdest)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t *sdest)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest);
-+
-+ return 0;
-+}
-+
-+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id,
-+ uint8_t *channel_index)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index);
-+
-+ return 0;
-+}
-+
-+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
-+ cmd_flags,
-+ token);
-+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-diff --git a/drivers/net/dpaa2/mc/dpmac.c b/drivers/net/dpaa2/mc/dpmac.c
-new file mode 100644
-index 0000000..f31d949
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpmac.c
-@@ -0,0 +1,422 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpmac.h>
-+#include <fsl_dpmac_cmd.h>
-+
-+int dpmac_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpmac_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPMAC_CMD_OPEN(cmd, dpmac_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return err;
-+}
-+
-+int dpmac_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpmac_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPMAC_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpmac_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpmac_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpmac_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpmac_mdio_read(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_mdio_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_MDIO_READ(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMAC_RSP_MDIO_READ(cmd, cfg->data);
-+
-+ return 0;
-+}
-+
-+int dpmac_mdio_write(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_mdio_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_MDIO_WRITE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_link_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPMAC_RSP_GET_LINK_CFG(cmd, cfg);
-+
-+ return 0;
-+}
-+
-+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_link_state *link_state)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_SET_LINK_STATE(cmd, link_state);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmac_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ enum dpmac_counter type,
-+ uint64_t *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPMAC_CMD_GET_COUNTER(cmd, type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPMAC_RSP_GET_COUNTER(cmd, *counter);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpmcp.c b/drivers/net/dpaa2/mc/dpmcp.c
-new file mode 100644
-index 0000000..dfd84b8
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpmcp.c
-@@ -0,0 +1,312 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpmcp.h>
-+#include <fsl_dpmcp_cmd.h>
-+
-+int dpmcp_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpmcp_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPMCP_CMD_OPEN(cmd, dpmcp_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return err;
-+}
-+
-+int dpmcp_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmcp_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpmcp_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPMCP_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpmcp_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmcp_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmcp_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpmcp_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmcp_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpmcp_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMCP_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMCP_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMCP_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMCP_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmcp_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMCP_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpmng.c b/drivers/net/dpaa2/mc/dpmng.c
-new file mode 100644
-index 0000000..cac5ba5
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpmng.c
-@@ -0,0 +1,58 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpmng.h>
-+#include <fsl_dpmng_cmd.h>
-+
-+int mc_get_version(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ struct mc_version *mc_ver_info)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
-+ cmd_flags,
-+ 0);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPMNG_RSP_GET_VERSION(cmd, mc_ver_info);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
-new file mode 100644
-index 0000000..cdd2f37
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpni.c
-@@ -0,0 +1,1907 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpni.h>
-+#include <fsl_dpni_cmd.h>
-+
-+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
-+ uint8_t *key_cfg_buf)
-+{
-+ int i, j;
-+ int offset = 0;
-+ int param = 1;
-+ uint64_t *params = (uint64_t *)key_cfg_buf;
-+
-+ if (!key_cfg_buf || !cfg)
-+ return -EINVAL;
-+
-+ params[0] |= mc_enc(0, 8, cfg->num_extracts);
-+ params[0] = cpu_to_le64(params[0]);
-+
-+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
-+ return -EINVAL;
-+
-+ for (i = 0; i < cfg->num_extracts; i++) {
-+ switch (cfg->extracts[i].type) {
-+ case DPKG_EXTRACT_FROM_HDR:
-+ params[param] |= mc_enc(0, 8,
-+ cfg->extracts[i].extract.from_hdr.prot);
-+ params[param] |= mc_enc(8, 4,
-+ cfg->extracts[i].extract.from_hdr.type);
-+ params[param] |= mc_enc(16, 8,
-+ cfg->extracts[i].extract.from_hdr.size);
-+ params[param] |= mc_enc(24, 8,
-+ cfg->extracts[i].extract.
-+ from_hdr.offset);
-+ params[param] |= mc_enc(32, 32,
-+ cfg->extracts[i].extract.
-+ from_hdr.field);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ params[param] |= mc_enc(0, 8,
-+ cfg->extracts[i].extract.
-+ from_hdr.hdr_index);
-+ break;
-+ case DPKG_EXTRACT_FROM_DATA:
-+ params[param] |= mc_enc(16, 8,
-+ cfg->extracts[i].extract.
-+ from_data.size);
-+ params[param] |= mc_enc(24, 8,
-+ cfg->extracts[i].extract.
-+ from_data.offset);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ break;
-+ case DPKG_EXTRACT_FROM_PARSE:
-+ params[param] |= mc_enc(16, 8,
-+ cfg->extracts[i].extract.
-+ from_parse.size);
-+ params[param] |= mc_enc(24, 8,
-+ cfg->extracts[i].extract.
-+ from_parse.offset);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ params[param] |= mc_enc(
-+ 24, 8, cfg->extracts[i].num_of_byte_masks);
-+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type);
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ for (offset = 0, j = 0;
-+ j < DPKG_NUM_OF_MASKS;
-+ offset += 16, j++) {
-+ params[param] |= mc_enc(
-+ (offset), 8, cfg->extracts[i].masks[j].mask);
-+ params[param] |= mc_enc(
-+ (offset + 8), 8,
-+ cfg->extracts[i].masks[j].offset);
-+ }
-+ params[param] = cpu_to_le64(params[param]);
-+ param++;
-+ }
-+ return 0;
-+}
-+
-+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg,
-+ uint8_t *ext_cfg_buf)
-+{
-+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf;
-+
-+ DPNI_PREP_EXTENDED_CFG(ext_params, cfg);
-+
-+ return 0;
-+}
-+
-+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
-+ const uint8_t *ext_cfg_buf)
-+{
-+ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf;
-+
-+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg);
-+
-+ return 0;
-+}
-+
-+int dpni_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpni_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPNI_CMD_OPEN(cmd, dpni_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpni_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpni_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPNI_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpni_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_pools(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_pools_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_POOLS(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpni_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpni_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpni_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_ATTR(cmd, attr);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_error_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_buffer_layout *layout)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout);
-+
-+ return 0;
-+}
-+
-+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_buffer_layout *layout)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_buffer_layout *layout)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_buffer_layout *layout)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_buffer_layout *layout)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_buffer_layout *layout)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_qdid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *qdid)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_QDID(cmd, *qdid);
-+
-+ return 0;
-+}
-+
-+int dpni_get_sp_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_sp_info *sp_info)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_SP_INFO(cmd, sp_info);
-+
-+ return 0;
-+}
-+
-+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *data_offset)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset);
-+
-+ return 0;
-+}
-+
-+int dpni_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ enum dpni_counter counter,
-+ uint64_t *value)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_COUNTER(cmd, counter);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_COUNTER(cmd, *value);
-+
-+ return 0;
-+}
-+
-+int dpni_set_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ enum dpni_counter counter,
-+ uint64_t value)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_COUNTER(cmd, counter, value);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_link_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_LINK_CFG(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_link_state *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_LINK_STATE(cmd, state);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t max_frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *max_frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length);
-+
-+ return 0;
-+}
-+
-+int dpni_set_mtu(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t mtu)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_MTU(cmd, mtu);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_mtu(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *mtu)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_MTU(cmd, *mtu);
-+
-+ return 0;
-+}
-+
-+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const uint8_t mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
-+
-+ return 0;
-+}
-+
-+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const uint8_t mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const uint8_t mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int unicast,
-+ int multicast)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_VLAN_FILTERS(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_tx_selection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_tx_selection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_SELECTION(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rx_tc_dist_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *flow_id,
-+ const struct dpni_tx_flow_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id);
-+
-+ return 0;
-+}
-+
-+int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ struct dpni_tx_flow_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_TX_FLOW(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint16_t flow_id,
-+ const struct dpni_queue_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint16_t flow_id,
-+ struct dpni_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_RX_FLOW(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_queue_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int revoke)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_qos_tbl_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_QOS_TABLE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_rule_cfg *cfg,
-+ uint8_t tc_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_rule_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rule_cfg *cfg,
-+ uint16_t flow_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rule_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION,
-+ cmd_flags, token);
-+ DPNI_CMD_SET_VLAN_INSERTION(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL,
-+ cmd_flags, token);
-+ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_ipr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_IPR(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_ipf(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_IPF(cmd, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rx_tc_policing_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ struct dpni_rx_tc_policing_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg);
-+
-+ return 0;
-+}
-+
-+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
-+ uint8_t *early_drop_buf)
-+{
-+ uint64_t *ext_params = (uint64_t *)early_drop_buf;
-+
-+ DPNI_PREP_EARLY_DROP(ext_params, cfg);
-+}
-+
-+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
-+ const uint8_t *early_drop_buf)
-+{
-+ const uint64_t *ext_params = (const uint64_t *)early_drop_buf;
-+
-+ DPNI_EXT_EARLY_DROP(ext_params, cfg);
-+}
-+
-+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_conf(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ const struct dpni_tx_conf_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_tx_conf(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ struct dpni_tx_conf_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_TX_CONF(cmd, flow_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPNI_RSP_GET_TX_CONF(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ const struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ struct dpni_congestion_notification_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dprc.c b/drivers/net/dpaa2/mc/dprc.c
-new file mode 100644
-index 0000000..75c6a68
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dprc.c
-@@ -0,0 +1,786 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dprc.h>
-+#include <fsl_dprc_cmd.h>
-+
-+int dprc_get_container_id(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int *container_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
-+ cmd_flags,
-+ 0);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_CONTAINER_ID(cmd, *container_id);
-+
-+ return 0;
-+}
-+
-+int dprc_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int container_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
-+ 0);
-+ DPRC_CMD_OPEN(cmd, container_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dprc_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_create_container(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprc_cfg *cfg,
-+ int *child_container_id,
-+ uint64_t *child_portal_paddr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ DPRC_CMD_CREATE_CONTAINER(cmd, cfg);
-+
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_CREATE_CONTAINER(cmd, *child_container_id,
-+ *child_portal_paddr);
-+
-+ return 0;
-+}
-+
-+int dprc_destroy_container(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_reset_container(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_RESET_CONTAINER(cmd, child_container_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dprc_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dprc_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dprc_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprc_attributes *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dprc_set_res_quota(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id,
-+ char *type,
-+ uint16_t quota)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_res_quota(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id,
-+ char *type,
-+ uint16_t *quota)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_RES_QUOTA(cmd, *quota);
-+
-+ return 0;
-+}
-+
-+int dprc_assign(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int container_id,
-+ struct dprc_res_req *res_req)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_ASSIGN(cmd, container_id, res_req);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_unassign(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id,
-+ struct dprc_res_req *res_req)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_pool_count(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *pool_count)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_POOL_COUNT(cmd, *pool_count);
-+
-+ return 0;
-+}
-+
-+int dprc_get_pool(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int pool_index,
-+ char *type)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_POOL(cmd, pool_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_POOL(cmd, type);
-+
-+ return 0;
-+}
-+
-+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *obj_count)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_OBJ_COUNT(cmd, *obj_count);
-+
-+ return 0;
-+}
-+
-+int dprc_get_obj(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int obj_index,
-+ struct dprc_obj_desc *obj_desc)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_OBJ(cmd, obj_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_OBJ(cmd, obj_desc);
-+
-+ return 0;
-+}
-+
-+int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ struct dprc_obj_desc *obj_desc)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc);
-+
-+ return 0;
-+}
-+
-+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ uint8_t irq_index,
-+ struct dprc_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_SET_OBJ_IRQ(cmd,
-+ obj_type,
-+ obj_id,
-+ irq_index,
-+ irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dprc_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_OBJ_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dprc_get_res_count(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *type,
-+ int *res_count)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ *res_count = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_RES_COUNT(cmd, type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_RES_COUNT(cmd, *res_count);
-+
-+ return 0;
-+}
-+
-+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *type,
-+ struct dprc_res_ids_range_desc *range_desc)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_RES_IDS(cmd, range_desc, type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_RES_IDS(cmd, range_desc);
-+
-+ return 0;
-+}
-+
-+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ uint8_t region_index,
-+ struct dprc_region_desc *region_desc)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_OBJ_REGION(cmd, region_desc);
-+
-+ return 0;
-+}
-+
-+int dprc_set_obj_label(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ char *label)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_connect(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dprc_endpoint *endpoint1,
-+ const struct dprc_endpoint *endpoint2,
-+ const struct dprc_connection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_disconnect(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dprc_endpoint *endpoint)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_DISCONNECT(cmd, endpoint);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprc_get_connection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dprc_endpoint *endpoint1,
-+ struct dprc_endpoint *endpoint2,
-+ int *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
-+ cmd_flags,
-+ token);
-+ DPRC_CMD_GET_CONNECTION(cmd, endpoint1);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRC_RSP_GET_CONNECTION(cmd, endpoint2, *state);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dprtc.c b/drivers/net/dpaa2/mc/dprtc.c
-new file mode 100644
-index 0000000..73667af
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dprtc.c
-@@ -0,0 +1,509 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dprtc.h>
-+#include <fsl_dprtc_cmd.h>
-+
-+int dprtc_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dprtc_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPRTC_CMD_OPEN(cmd, dprtc_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return err;
-+}
-+
-+int dprtc_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dprtc_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ (void)(cfg); /* unused */
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dprtc_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dprtc_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dprtc_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dprtc_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprtc_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_ATTRIBUTES(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int64_t offset)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t freq_compensation)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t *freq_compensation)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, *freq_compensation);
-+
-+ return 0;
-+}
-+
-+int dprtc_get_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t *time)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPRTC_RSP_GET_TIME(cmd, *time);
-+
-+ return 0;
-+}
-+
-+int dprtc_set_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_TIME(cmd, time);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token, uint64_t time)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
-+ cmd_flags,
-+ token);
-+
-+ DPRTC_CMD_SET_ALARM(cmd, time);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-diff --git a/drivers/net/dpaa2/mc/dpseci.c b/drivers/net/dpaa2/mc/dpseci.c
-new file mode 100644
-index 0000000..a4b932a
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpseci.c
-@@ -0,0 +1,502 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpseci.h>
-+#include <fsl_dpseci_cmd.h>
-+
-+int dpseci_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpseci_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPSECI_CMD_OPEN(cmd, dpseci_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpseci_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpseci_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPSECI_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpseci_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpseci_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpseci_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpseci_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpseci_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpseci_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t queue,
-+ const struct dpseci_rx_queue_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t queue,
-+ struct dpseci_rx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_GET_RX_QUEUE(cmd, queue);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_RX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t queue,
-+ struct dpseci_tx_queue_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
-+ cmd_flags,
-+ token);
-+ DPSECI_CMD_GET_TX_QUEUE(cmd, queue);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_TX_QUEUE(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpseci_sec_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_SEC_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpseci_sec_counters *counters)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters);
-+
-+ return 0;
-+}
-diff --git a/drivers/net/dpaa2/mc/dpsw.c b/drivers/net/dpaa2/mc/dpsw.c
-new file mode 100644
-index 0000000..2034b55
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/dpsw.c
-@@ -0,0 +1,1639 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+#include <fsl_dpsw.h>
-+#include <fsl_dpsw_cmd.h>
-+
-+/* internal functions */
-+static void build_if_id_bitmap(const uint16_t *if_id,
-+ const uint16_t num_ifs,
-+ struct mc_command *cmd,
-+ int start_param)
-+{
-+ int i;
-+
-+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
-+ cmd->params[start_param + (if_id[i] / 64)] |= mc_enc(
-+ (if_id[i] % 64), 1, 1);
-+}
-+
-+static int read_if_id_bitmap(uint16_t *if_id,
-+ uint16_t *num_ifs,
-+ struct mc_command *cmd,
-+ int start_param)
-+{
-+ int bitmap[DPSW_MAX_IF] = { 0 };
-+ int i, j = 0;
-+ int count = 0;
-+
-+ for (i = 0; i < DPSW_MAX_IF; i++) {
-+ bitmap[i] = (int)mc_dec(cmd->params[start_param + i / 64],
-+ i % 64, 1);
-+ count += bitmap[i];
-+ }
-+
-+ *num_ifs = (uint16_t)count;
-+
-+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
-+ if (bitmap[i]) {
-+ if_id[j] = (uint16_t)i;
-+ j++;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/* DPSW APIs */
-+int dpsw_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpsw_id,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ DPSW_CMD_OPEN(cmd, dpsw_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpsw_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpsw_cfg *cfg,
-+ uint16_t *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CREATE,
-+ cmd_flags,
-+ 0);
-+ DPSW_CMD_CREATE(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
-+ return 0;
-+}
-+
-+int dpsw_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DESTROY,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_IS_ENABLED(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpsw_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_GET_IRQ(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_GET_IRQ(cmd, *type, irq_cfg);
-+
-+ return 0;
-+}
-+
-+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_GET_IRQ_ENABLE(cmd, *en);
-+
-+ return 0;
-+}
-+
-+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_GET_IRQ_MASK(cmd, irq_index);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_GET_IRQ_MASK(cmd, *mask);
-+
-+ return 0;
-+}
-+
-+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_GET_IRQ_STATUS(cmd, *status);
-+
-+ return 0;
-+}
-+
-+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpsw_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_SET_REFLECTION_IF(cmd, if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_link_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_link_state *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_IF_GET_LINK_STATE(cmd, state);
-+
-+ return 0;
-+}
-+
-+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_tci_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_tci_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err = 0;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_GET_TCI(cmd, if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_IF_GET_TCI(cmd, cfg);
-+
-+ return 0;
-+}
-+
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_stp_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_STP(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int accept_all)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ enum dpsw_counter type,
-+ uint64_t *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_IF_GET_COUNTER(cmd, *counter);
-+
-+ return 0;
-+}
-+
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ enum dpsw_counter type,
-+ uint64_t counter)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_tx_selection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint8_t tc_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ uint8_t *early_drop_buf)
-+{
-+ uint64_t *ext_params = (uint64_t *)early_drop_buf;
-+
-+ DPSW_PREP_EARLY_DROP(ext_params, cfg);
-+}
-+
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_ENABLE(cmd, if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_DISABLE(cmd, if_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_if_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_GET_ATTR(cmd, if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_IF_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint16_t frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint16_t *frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, *frame_length);
-+
-+ return 0;
-+}
-+
-+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_REMOVE(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_VLAN_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_GET_IF(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_VLAN_GET_IF(cmd, cfg);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
-+
-+ return 0;
-+}
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
-+
-+ return 0;
-+}
-+
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_VLAN_GET_IF(cmd, cfg);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 1);
-+
-+ return 0;
-+}
-+
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_ADD(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_FDB_ADD(cmd, *fdb_id);
-+
-+ return 0;
-+}
-+
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_REMOVE(cmd, fdb_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_FDB_GET_UNICAST(cmd, cfg);
-+
-+ return 0;
-+}
-+
-+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, &cmd, 2);
-+
-+ return 0;
-+}
-+
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 2);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ enum dpsw_fdb_learning_mode mode)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ struct dpsw_fdb_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_FDB_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *acl_id,
-+ const struct dpsw_acl_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_ADD(cmd, cfg);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_ACL_ADD(cmd, *acl_id);
-+
-+ return 0;
-+}
-+
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_REMOVE(cmd, acl_id);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ uint8_t *entry_cfg_buf)
-+{
-+ uint64_t *ext_params = (uint64_t *)entry_cfg_buf;
-+
-+ DPSW_PREP_ACL_ENTRY(ext_params, key);
-+}
-+
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ build_if_id_bitmap(cfg->if_id, cfg->num_ifs, &cmd, 1);
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ struct dpsw_acl_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_ACL_GET_ATTR(cmd, acl_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_ACL_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpsw_ctrl_if_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr);
-+
-+ return 0;
-+}
-+
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpsw_ctrl_if_pools_cfg *pools)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
-+ cmd_flags,
-+ token);
-+ DPSW_CMD_CTRL_IF_SET_POOLS(cmd, pools);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+* @brief Function disables control interface
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPSW object
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop.h b/drivers/net/dpaa2/mc/fsl_dpaiop.h
-new file mode 100644
-index 0000000..b039b2a
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpaiop.h
-@@ -0,0 +1,494 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPAIOP_H
-+#define __FSL_DPAIOP_H
-+
-+struct fsl_mc_io;
-+
-+/* Data Path AIOP API
-+ * Contains initialization APIs and runtime control APIs for DPAIOP
-+ */
-+
-+/**
-+ * dpaiop_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpaiop_id: DPAIOP unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpaiop_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpaiop_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpaiop_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_close(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token);
-+
-+/**
-+ * struct dpaiop_cfg - Structure representing DPAIOP configuration
-+ * @aiop_id: AIOP ID
-+ * @aiop_container_id: AIOP container ID
-+ */
-+struct dpaiop_cfg {
-+ int aiop_id;
-+ int aiop_container_id;
-+};
-+
-+/**
-+ * dpaiop_create() - Create the DPAIOP object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPAIOP object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpaiop_open function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpaiop_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpaiop_destroy() - Destroy the DPAIOP object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpaiop_destroy(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token);
-+
-+/**
-+ * dpaiop_reset() - Reset the DPAIOP, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_reset(struct fsl_mc_io *mc_io, uint32_t cmd_flags, uint16_t token);
-+
-+/**
-+ * struct dpaiop_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpaiop_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpaiop_set_irq() - Set IRQ information for the DPAIOP to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpaiop_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpaiop_get_irq() - Get IRQ information from the DPAIOP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpaiop_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpaiop_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpaiop_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpaiop_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpaiop_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpaiop_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpaiop_clear_irq_status() - Clear a pending interrupt's status
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpaiop_attr - Structure representing DPAIOP attributes
-+ * @id: AIOP ID
-+ * @version: DPAIOP version
-+ */
-+struct dpaiop_attr {
-+ int id;
-+ /**
-+ * struct version - Structure representing DPAIOP version
-+ * @major: DPAIOP major version
-+ * @minor: DPAIOP minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dpaiop_get_attributes - Retrieve DPAIOP attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpaiop_attr *attr);
-+
-+/**
-+ * struct dpaiop_load_cfg - AIOP load configuration
-+ * @options: AIOP load options
-+ * @img_iova: I/O virtual address of AIOP ELF image
-+ * @img_size: Size of AIOP ELF image in memory (in bytes)
-+ */
-+struct dpaiop_load_cfg {
-+ uint64_t options;
-+ uint64_t img_iova;
-+ uint32_t img_size;
-+};
-+
-+/**
-+ * dpaiop_load_aiop() - Loads an image to AIOP
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @cfg: AIOP load configurations
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_load(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpaiop_load_cfg *cfg);
-+
-+#define DPAIOP_RUN_OPT_DEBUG 0x0000000000000001ULL
-+
-+/**
-+ * struct dpaiop_run_cfg - AIOP run configuration
-+ * @cores_mask: Mask of AIOP cores to run (core 0 in most significant bit)
-+ * @options: Execution options (currently none defined)
-+ * @args_iova: I/O virtual address of AIOP arguments
-+ * @args_size: Size of AIOP arguments in memory (in bytes)
-+ */
-+struct dpaiop_run_cfg {
-+ uint64_t cores_mask;
-+ uint64_t options;
-+ uint64_t args_iova;
-+ uint32_t args_size;
-+};
-+
-+/**
-+ * dpaiop_run_aiop() - Starts AIOP execution
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @cfg: AIOP run configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_run(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpaiop_run_cfg *cfg);
-+
-+/**
-+ * struct dpaiop_sl_version - AIOP SL (Service Layer) version
-+ * @major: AIOP SL major version number
-+ * @minor: AIOP SL minor version number
-+ * @revision: AIOP SL revision number
-+ */
-+struct dpaiop_sl_version {
-+ uint32_t major;
-+ uint32_t minor;
-+ uint32_t revision;
-+};
-+
-+/**
-+ * dpaiop_get_sl_version() - Get AIOP SL (Service Layer) version
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @version: AIOP SL version number
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_sl_version(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpaiop_sl_version *version);
-+
-+/**
-+ * AIOP states
-+ *
-+ * AIOP internal states, can be retrieved by calling dpaiop_get_state() routine
-+ */
-+
-+/**
-+ * AIOP reset successfully completed.
-+ */
-+#define DPAIOP_STATE_RESET_DONE 0x00000000
-+/**
-+ * AIOP reset is ongoing.
-+ */
-+#define DPAIOP_STATE_RESET_ONGOING 0x00000001
-+
-+/**
-+ * AIOP image loading successfully completed.
-+ */
-+#define DPAIOP_STATE_LOAD_DONE 0x00000002
-+/**
-+ * AIOP image loading is ongoing.
-+ */
-+#define DPAIOP_STATE_LOAD_ONGIONG 0x00000004
-+/**
-+ * AIOP image loading completed with error.
-+ */
-+#define DPAIOP_STATE_LOAD_ERROR 0x00000008
-+
-+/**
-+ * Boot process of AIOP cores is ongoing.
-+ */
-+#define DPAIOP_STATE_BOOT_ONGOING 0x00000010
-+/**
-+ * Boot process of AIOP cores completed with an error.
-+ */
-+#define DPAIOP_STATE_BOOT_ERROR 0x00000020
-+/**
-+ * AIOP cores are functional and running
-+ */
-+#define DPAIOP_STATE_RUNNING 0x00000040
-+/** @} */
-+
-+/**
-+ * dpaiop_get_state() - Get AIOP state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @state: AIOP state
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t *state);
-+
-+/**
-+ * dpaiop_set_time_of_day() - Set AIOP internal time-of-day
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @time_of_day: Current number of milliseconds since the Epoch
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_set_time_of_day(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time_of_day);
-+
-+/**
-+ * dpaiop_get_time_of_day() - Get AIOP internal time-of-day
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPAIOP object
-+ * @time_of_day: Current number of milliseconds since the Epoch
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpaiop_get_time_of_day(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t *time_of_day);
-+
-+#endif /* __FSL_DPAIOP_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h
-new file mode 100644
-index 0000000..5b77bb8
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpaiop_cmd.h
-@@ -0,0 +1,190 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPAIOP_CMD_H
-+#define _FSL_DPAIOP_CMD_H
-+
-+/* DPAIOP Version */
-+#define DPAIOP_VER_MAJOR 1
-+#define DPAIOP_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPAIOP_CMDID_CLOSE 0x800
-+#define DPAIOP_CMDID_OPEN 0x80a
-+#define DPAIOP_CMDID_CREATE 0x90a
-+#define DPAIOP_CMDID_DESTROY 0x900
-+
-+#define DPAIOP_CMDID_GET_ATTR 0x004
-+#define DPAIOP_CMDID_RESET 0x005
-+
-+#define DPAIOP_CMDID_SET_IRQ 0x010
-+#define DPAIOP_CMDID_GET_IRQ 0x011
-+#define DPAIOP_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPAIOP_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPAIOP_CMDID_SET_IRQ_MASK 0x014
-+#define DPAIOP_CMDID_GET_IRQ_MASK 0x015
-+#define DPAIOP_CMDID_GET_IRQ_STATUS 0x016
-+#define DPAIOP_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPAIOP_CMDID_LOAD 0x280
-+#define DPAIOP_CMDID_RUN 0x281
-+#define DPAIOP_CMDID_GET_SL_VERSION 0x282
-+#define DPAIOP_CMDID_GET_STATE 0x283
-+#define DPAIOP_CMDID_SET_TIME_OF_DAY 0x284
-+#define DPAIOP_CMDID_GET_TIME_OF_DAY 0x285
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_OPEN(cmd, dpaiop_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpaiop_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->aiop_id);\
-+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->aiop_container_id);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_LOAD(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->img_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->img_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_RUN(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->args_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->cores_mask); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options); \
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->args_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_SL_VERSION(cmd, version) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, version->major);\
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, version->minor);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, version->revision);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_STATE(cmd, state) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, state)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_CMD_SET_TIME_OF_DAY(cmd, time_of_day) \
-+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time_of_day)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPAIOP_RSP_GET_TIME_OF_DAY(cmd, time_of_day) \
-+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time_of_day)
-+
-+#endif /* _FSL_DPAIOP_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpbp.h b/drivers/net/dpaa2/mc/fsl_dpbp.h
-new file mode 100644
-index 0000000..9856bb8
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpbp.h
-@@ -0,0 +1,438 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPBP_H
-+#define __FSL_DPBP_H
-+
-+/* Data Path Buffer Pool API
-+ * Contains initialization APIs and runtime control APIs for DPBP
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * dpbp_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpbp_id: DPBP unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpbp_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpbp_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpbp_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpbp_cfg - Structure representing DPBP configuration
-+ * @options: place holder
-+ */
-+struct dpbp_cfg {
-+ uint32_t options;
-+};
-+
-+/**
-+ * dpbp_create() - Create the DPBP object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPBP object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpbp_open function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpbp_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpbp_destroy() - Destroy the DPBP object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpbp_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpbp_enable() - Enable the DPBP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpbp_disable() - Disable the DPBP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpbp_is_enabled() - Check if the DPBP is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpbp_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpbp_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpbp_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpbp_get_irq() - Get IRQ information from the DPBP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpbp_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpbp_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpbp_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpbp_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpbp_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpbp_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpbp_clear_irq_status() - Clear a pending interrupt's status
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpbp_attr - Structure representing DPBP attributes
-+ * @id: DPBP object ID
-+ * @version: DPBP version
-+ * @bpid: Hardware buffer pool ID; should be used as an argument in
-+ * acquire/release operations on buffers
-+ */
-+struct dpbp_attr {
-+ int id;
-+ /**
-+ * struct version - Structure representing DPBP version
-+ * @major: DPBP major version
-+ * @minor: DPBP minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint16_t bpid;
-+};
-+
-+/**
-+ * dpbp_get_attributes - Retrieve DPBP attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpbp_attr *attr);
-+
-+/**
-+ * DPBP notifications options
-+ */
-+
-+/**
-+ * BPSCN write will attempt to allocate into a cache (coherent write)
-+ */
-+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
-+
-+/**
-+ * struct dpbp_notification_cfg - Structure representing DPBP notifications
-+ * towards software
-+ * @depletion_entry: below this threshold the pool is "depleted";
-+ * set it to '0' to disable it
-+ * @depletion_exit: greater than or equal to this threshold the pool exit its
-+ * "depleted" state
-+ * @surplus_entry: above this threshold the pool is in "surplus" state;
-+ * set it to '0' to disable it
-+ * @surplus_exit: less than or equal to this threshold the pool exit its
-+ * "surplus" state
-+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
-+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned.
-+ * @message_ctx: The context that will be part of the BPSCN message and will
-+ * be written to 'message_iova'
-+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
-+ */
-+struct dpbp_notification_cfg {
-+ uint32_t depletion_entry;
-+ uint32_t depletion_exit;
-+ uint32_t surplus_entry;
-+ uint32_t surplus_exit;
-+ uint64_t message_iova;
-+ uint64_t message_ctx;
-+ uint16_t options;
-+};
-+
-+/**
-+ * dpbp_set_notifications() - Set notifications towards software
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @cfg: notifications configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpbp_notification_cfg *cfg);
-+
-+/**
-+ * dpbp_get_notifications() - Get the notifications configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPBP object
-+ * @cfg: notifications configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpbp_notification_cfg *cfg);
-+
-+#endif /* __FSL_DPBP_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h
-new file mode 100644
-index 0000000..71ad96a
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpbp_cmd.h
-@@ -0,0 +1,172 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPBP_CMD_H
-+#define _FSL_DPBP_CMD_H
-+
-+/* DPBP Version */
-+#define DPBP_VER_MAJOR 2
-+#define DPBP_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPBP_CMDID_CLOSE 0x800
-+#define DPBP_CMDID_OPEN 0x804
-+#define DPBP_CMDID_CREATE 0x904
-+#define DPBP_CMDID_DESTROY 0x900
-+
-+#define DPBP_CMDID_ENABLE 0x002
-+#define DPBP_CMDID_DISABLE 0x003
-+#define DPBP_CMDID_GET_ATTR 0x004
-+#define DPBP_CMDID_RESET 0x005
-+#define DPBP_CMDID_IS_ENABLED 0x006
-+
-+#define DPBP_CMDID_SET_IRQ 0x010
-+#define DPBP_CMDID_GET_IRQ 0x011
-+#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPBP_CMDID_SET_IRQ_MASK 0x014
-+#define DPBP_CMDID_GET_IRQ_MASK 0x015
-+#define DPBP_CMDID_GET_IRQ_STATUS 0x016
-+#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
-+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_OPEN(cmd, dpbp_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->bpid); \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_SET_NOTIFICATIONS(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\
-+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\
-+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPBP_CMD_GET_NOTIFICATIONS(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, cfg->depletion_entry); \
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->depletion_exit);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->surplus_entry);\
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->surplus_exit);\
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options);\
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx);\
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova);\
-+} while (0)
-+#endif /* _FSL_DPBP_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpci.h b/drivers/net/dpaa2/mc/fsl_dpci.h
-new file mode 100644
-index 0000000..d885935
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpci.h
-@@ -0,0 +1,594 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPCI_H
-+#define __FSL_DPCI_H
-+
-+/* Data Path Communication Interface API
-+ * Contains initialization APIs and runtime control APIs for DPCI
-+ */
-+
-+struct fsl_mc_io;
-+
-+/** General DPCI macros */
-+
-+/**
-+ * Maximum number of Tx/Rx priorities per DPCI object
-+ */
-+#define DPCI_PRIO_NUM 2
-+
-+/**
-+ * Indicates an invalid frame queue
-+ */
-+#define DPCI_FQID_NOT_VALID (uint32_t)(-1)
-+
-+/**
-+ * All queues considered; see dpci_set_rx_queue()
-+ */
-+#define DPCI_ALL_QUEUES (uint8_t)(-1)
-+
-+/**
-+ * dpci_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpci_id: DPCI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpci_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpci_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpci_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpci_cfg - Structure representing DPCI configuration
-+ * @num_of_priorities: Number of receive priorities (queues) for the DPCI;
-+ * note, that the number of transmit priorities (queues)
-+ * is determined by the number of receive priorities of
-+ * the peer DPCI object
-+ */
-+struct dpci_cfg {
-+ uint8_t num_of_priorities;
-+};
-+
-+/**
-+ * dpci_create() - Create the DPCI object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPCI object, allocate required resources and perform required
-+ * initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpci_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpci_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpci_destroy() - Destroy the DPCI object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpci_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpci_is_enabled() - Check if the DPCI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpci_reset() - Reset the DPCI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/** DPCI IRQ Index and Events */
-+
-+/**
-+ * IRQ index
-+ */
-+#define DPCI_IRQ_INDEX 0
-+
-+/**
-+ * IRQ event - indicates a change in link state
-+ */
-+#define DPCI_IRQ_EVENT_LINK_CHANGED 0x00000001
-+/**
-+ * IRQ event - indicates a connection event
-+ */
-+#define DPCI_IRQ_EVENT_CONNECTED 0x00000002
-+/**
-+ * IRQ event - indicates a disconnection event
-+ */
-+#define DPCI_IRQ_EVENT_DISCONNECTED 0x00000004
-+
-+/**
-+ * struct dpci_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpci_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpci_set_irq() - Set IRQ information for the DPCI to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpci_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpci_get_irq() - Get IRQ information from the DPCI.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpci_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpci_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpci_get_irq_enable() - Get overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpci_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpci_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpci_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpci_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpci_attr - Structure representing DPCI attributes
-+ * @id: DPCI object ID
-+ * @version: DPCI version
-+ * @num_of_priorities: Number of receive priorities
-+ */
-+struct dpci_attr {
-+ int id;
-+ /**
-+ * struct version - Structure representing DPCI attributes
-+ * @major: DPCI major version
-+ * @minor: DPCI minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint8_t num_of_priorities;
-+};
-+
-+/**
-+ * dpci_get_attributes() - Retrieve DPCI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpci_attr *attr);
-+
-+/**
-+ * struct dpci_peer_attr - Structure representing the peer DPCI attributes
-+ * @peer_id: DPCI peer id; if no peer is connected returns (-1)
-+ * @num_of_priorities: The pper's number of receive priorities; determines the
-+ * number of transmit priorities for the local DPCI object
-+ */
-+struct dpci_peer_attr {
-+ int peer_id;
-+ uint8_t num_of_priorities;
-+};
-+
-+/**
-+ * dpci_get_peer_attributes() - Retrieve peer DPCI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @attr: Returned peer attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_peer_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpci_peer_attr *attr);
-+
-+/**
-+ * dpci_get_link_state() - Retrieve the DPCI link state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @up: Returned link state; returns '1' if link is up, '0' otherwise
-+ *
-+ * DPCI can be connected to another DPCI, together they
-+ * create a 'link'. In order to use the DPCI Tx and Rx queues,
-+ * both objects must be enabled.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *up);
-+
-+/**
-+ * enum dpci_dest - DPCI destination types
-+ * @DPCI_DEST_NONE: Unassigned destination; The queue is set in parked mode
-+ * and does not generate FQDAN notifications; user is
-+ * expected to dequeue from the queue based on polling or
-+ * other user-defined method
-+ * @DPCI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected
-+ * to dequeue from the queue only after notification is
-+ * received
-+ * @DPCI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified
-+ * DPCON object;
-+ * user is expected to dequeue from the DPCON channel
-+ */
-+enum dpci_dest {
-+ DPCI_DEST_NONE = 0,
-+ DPCI_DEST_DPIO = 1,
-+ DPCI_DEST_DPCON = 2
-+};
-+
-+/**
-+ * struct dpci_dest_cfg - Structure representing DPCI destination configuration
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid
-+ * values are 0-1 or 0-7, depending on the number of priorities
-+ * in that channel; not relevant for 'DPCI_DEST_NONE' option
-+ */
-+struct dpci_dest_cfg {
-+ enum dpci_dest dest_type;
-+ int dest_id;
-+ uint8_t priority;
-+};
-+
-+/** DPCI queue modification options */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPCI_QUEUE_OPT_USER_CTX 0x00000001
-+
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPCI_QUEUE_OPT_DEST 0x00000002
-+
-+/**
-+ * struct dpci_rx_queue_cfg - Structure representing RX queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPCI_QUEUE_OPT_<X>' flags
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame;
-+ * valid only if 'DPCI_QUEUE_OPT_USER_CTX' is contained in
-+ * 'options'
-+ * @dest_cfg: Queue destination parameters;
-+ * valid only if 'DPCI_QUEUE_OPT_DEST' is contained in 'options'
-+ */
-+struct dpci_rx_queue_cfg {
-+ uint32_t options;
-+ uint64_t user_ctx;
-+ struct dpci_dest_cfg dest_cfg;
-+};
-+
-+/**
-+ * dpci_set_rx_queue() - Set Rx queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPCI creation; use
-+ * DPCI_ALL_QUEUES to configure all Rx queues
-+ * identically.
-+ * @cfg: Rx queue configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ const struct dpci_rx_queue_cfg *cfg);
-+
-+/**
-+ * struct dpci_rx_queue_attr - Structure representing Rx queue attributes
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @dest_cfg: Queue destination configuration
-+ * @fqid: Virtual FQID value to be used for dequeue operations
-+ */
-+struct dpci_rx_queue_attr {
-+ uint64_t user_ctx;
-+ struct dpci_dest_cfg dest_cfg;
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPCI creation
-+ * @attr: Returned Rx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpci_rx_queue_attr *attr);
-+
-+/**
-+ * struct dpci_tx_queue_attr - Structure representing attributes of Tx queues
-+ * @fqid: Virtual FQID to be used for sending frames to peer DPCI;
-+ * returns 'DPCI_FQID_NOT_VALID' if a no peer is connected or if
-+ * the selected priority exceeds the number of priorities of the
-+ * peer DPCI object
-+ */
-+struct dpci_tx_queue_attr {
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @priority: Select the queue relative to number of
-+ * priorities of the peer DPCI object
-+ * @attr: Returned Tx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpci_tx_queue_attr *attr);
-+
-+#endif /* __FSL_DPCI_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h
-new file mode 100644
-index 0000000..f45e435
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpci_cmd.h
-@@ -0,0 +1,200 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPCI_CMD_H
-+#define _FSL_DPCI_CMD_H
-+
-+/* DPCI Version */
-+#define DPCI_VER_MAJOR 2
-+#define DPCI_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPCI_CMDID_CLOSE 0x800
-+#define DPCI_CMDID_OPEN 0x807
-+#define DPCI_CMDID_CREATE 0x907
-+#define DPCI_CMDID_DESTROY 0x900
-+
-+#define DPCI_CMDID_ENABLE 0x002
-+#define DPCI_CMDID_DISABLE 0x003
-+#define DPCI_CMDID_GET_ATTR 0x004
-+#define DPCI_CMDID_RESET 0x005
-+#define DPCI_CMDID_IS_ENABLED 0x006
-+
-+#define DPCI_CMDID_SET_IRQ 0x010
-+#define DPCI_CMDID_GET_IRQ 0x011
-+#define DPCI_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPCI_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPCI_CMDID_SET_IRQ_MASK 0x014
-+#define DPCI_CMDID_GET_IRQ_MASK 0x015
-+#define DPCI_CMDID_GET_IRQ_STATUS 0x016
-+#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPCI_CMDID_SET_RX_QUEUE 0x0e0
-+#define DPCI_CMDID_GET_LINK_STATE 0x0e1
-+#define DPCI_CMDID_GET_PEER_ATTR 0x0e2
-+#define DPCI_CMDID_GET_RX_QUEUE 0x0e3
-+#define DPCI_CMDID_GET_TX_QUEUE 0x0e4
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_OPEN(cmd, dpci_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_CREATE(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_of_priorities);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\
-+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_LINK_STATE(cmd, up) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, up)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\
-+ MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\
-+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
-+
-+#endif /* _FSL_DPCI_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpcon.h b/drivers/net/dpaa2/mc/fsl_dpcon.h
-new file mode 100644
-index 0000000..2555be5
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpcon.h
-@@ -0,0 +1,407 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPCON_H
-+#define __FSL_DPCON_H
-+
-+/* Data Path Concentrator API
-+ * Contains initialization APIs and runtime control APIs for DPCON
-+ */
-+
-+struct fsl_mc_io;
-+
-+/** General DPCON macros */
-+
-+/**
-+ * Use it to disable notifications; see dpcon_set_notification()
-+ */
-+#define DPCON_INVALID_DPIO_ID (int)(-1)
-+
-+/**
-+ * dpcon_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpcon_id: DPCON unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpcon_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpcon_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpcon_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpcon_cfg - Structure representing DPCON configuration
-+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
-+ */
-+struct dpcon_cfg {
-+ uint8_t num_priorities;
-+};
-+
-+/**
-+ * dpcon_create() - Create the DPCON object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPCON object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpcon_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpcon_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpcon_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpcon_enable() - Enable the DPCON
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpcon_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpcon_disable() - Disable the DPCON
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpcon_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpcon_is_enabled() - Check if the DPCON is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpcon_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpcon_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpcon_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpcon_get_irq() - Get IRQ information from the DPCON.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpcon_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpcon_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpcon_get_irq_enable() - Get overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpcon_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpcon_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpcon_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @status: interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpcon_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpcon_attr - Structure representing DPCON attributes
-+ * @id: DPCON object ID
-+ * @version: DPCON version
-+ * @qbman_ch_id: Channel ID to be used by dequeue operation
-+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
-+ */
-+struct dpcon_attr {
-+ int id;
-+ /**
-+ * struct version - DPCON version
-+ * @major: DPCON major version
-+ * @minor: DPCON minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint16_t qbman_ch_id;
-+ uint8_t num_priorities;
-+};
-+
-+/**
-+ * dpcon_get_attributes() - Retrieve DPCON attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @attr: Object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpcon_attr *attr);
-+
-+/**
-+ * struct dpcon_notification_cfg - Structure representing notification parameters
-+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
-+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
-+ * @priority: Priority selection within the DPIO channel; valid values
-+ * are 0-7, depending on the number of priorities in that channel
-+ * @user_ctx: User context value provided with each CDAN message
-+ */
-+struct dpcon_notification_cfg {
-+ int dpio_id;
-+ uint8_t priority;
-+ uint64_t user_ctx;
-+};
-+
-+/**
-+ * dpcon_set_notification() - Set DPCON notification destination
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @cfg: Notification parameters
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpcon_set_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpcon_notification_cfg *cfg);
-+
-+#endif /* __FSL_DPCON_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h
-new file mode 100644
-index 0000000..ecb40d0
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpcon_cmd.h
-@@ -0,0 +1,162 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPCON_CMD_H
-+#define _FSL_DPCON_CMD_H
-+
-+/* DPCON Version */
-+#define DPCON_VER_MAJOR 2
-+#define DPCON_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPCON_CMDID_CLOSE 0x800
-+#define DPCON_CMDID_OPEN 0x808
-+#define DPCON_CMDID_CREATE 0x908
-+#define DPCON_CMDID_DESTROY 0x900
-+
-+#define DPCON_CMDID_ENABLE 0x002
-+#define DPCON_CMDID_DISABLE 0x003
-+#define DPCON_CMDID_GET_ATTR 0x004
-+#define DPCON_CMDID_RESET 0x005
-+#define DPCON_CMDID_IS_ENABLED 0x006
-+
-+#define DPCON_CMDID_SET_IRQ 0x010
-+#define DPCON_CMDID_GET_IRQ 0x011
-+#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPCON_CMDID_SET_IRQ_MASK 0x014
-+#define DPCON_CMDID_GET_IRQ_MASK 0x015
-+#define DPCON_CMDID_GET_IRQ_STATUS 0x016
-+#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPCON_CMDID_SET_NOTIFICATION 0x100
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_OPEN(cmd, dpcon_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_CREATE(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\
-+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
-+} while (0)
-+
-+#endif /* _FSL_DPCON_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg.h b/drivers/net/dpaa2/mc/fsl_dpdbg.h
-new file mode 100644
-index 0000000..ead22e8
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdbg.h
-@@ -0,0 +1,635 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPDBG_H
-+#define __FSL_DPDBG_H
-+
-+#include <fsl_dpkg.h>
-+#include <fsl_dpmac.h>
-+#include <fsl_dpni.h>
-+
-+/* Data Path Debug API
-+ * Contains initialization APIs and runtime control APIs for DPDBG
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * dpdbg_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpdbg_id: DPDBG unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object;
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdbg_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpdbg_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdbg_attr - Structure representing DPDBG attributes
-+ * @id: DPDBG object ID
-+ * @version: DPDBG version
-+ */
-+struct dpdbg_attr {
-+ int id;
-+ /**
-+ * struct version - Structure representing DPDBG version
-+ * @major: DPDBG major version
-+ * @minor: DPDBG minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dpdbg_get_attributes - Retrieve DPDBG attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdbg_attr *attr);
-+
-+/**
-+ * struct dpdbg_dpni_info - Info of DPNI
-+ * @max_senders: Maximum number of different senders; used as the number
-+ * of dedicated Tx flows; Non-power-of-2 values are rounded
-+ * up to the next power-of-2 value as hardware demands it;
-+ * '0' will be treated as '1'
-+ * @qdid: Virtual QDID.
-+ * @err_fqid: Virtual FQID for error queues
-+ * @tx_conf_fqid: Virtual FQID for global TX confirmation queue
-+ */
-+struct dpdbg_dpni_info {
-+ uint8_t max_senders;
-+ uint32_t qdid;
-+ uint32_t err_fqid;
-+ uint32_t tx_conf_fqid;
-+};
-+
-+/**
-+ * dpdbg_get_dpni_info() - Retrieve info for a specific DPNI
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @info: The returned info
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpni_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ struct dpdbg_dpni_info *info);
-+
-+/**
-+ * dpdbg_get_dpni_private_fqid() - Retrieve the virtual TX confirmation queue
-+ * FQID of the required DPNI
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @sender_id: The requested sender ID
-+ * @fqid: The returned virtual private TX confirmation FQID.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpni_priv_tx_conf_fqid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ uint8_t sender_id,
-+ uint32_t *fqid);
-+
-+/**
-+ * struct dpdbg_dpcon_info - Info of DPCON
-+ * @ch_id: Channel ID
-+ */
-+struct dpdbg_dpcon_info {
-+ uint32_t ch_id;
-+};
-+
-+/**
-+ * dpdbg_get_dpcon_info() - Retrieve info of DPCON
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpcon_id: The requested DPCON ID
-+ * @info: The returned info.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpcon_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id,
-+ struct dpdbg_dpcon_info *info);
-+
-+/**
-+ * struct dpdbg_dpbp_info - Info of DPBP
-+ * @bpid: Virtual buffer pool ID
-+ */
-+struct dpdbg_dpbp_info {
-+ uint32_t bpid;
-+};
-+
-+/**
-+ * dpdbg_get_dpbp_info() - Retrieve info of DPBP
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpbp_id: The requested DPBP ID
-+ * @info: The returned info.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpbp_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpbp_id,
-+ struct dpdbg_dpbp_info *info);
-+
-+/**
-+ * dpdbg_get_dpci_fqid() - Retrieve the virtual FQID of the required DPCI
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpci_id: The requested DPCI ID
-+ * @priority: Select the queue relative to number of priorities configured at
-+ * DPCI creation
-+ * @fqid: The returned virtual FQID.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpci_fqid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpci_id,
-+ uint8_t priority,
-+ uint32_t *fqid);
-+
-+/**
-+ * Maximum size for rule match (in bytes)
-+ */
-+#define DPDBG_MAX_RULE_SIZE 56
-+/**
-+ * Disable marking
-+ */
-+#define DPDBG_DISABLE_MARKING 0xFF
-+
-+/**
-+ * dpdbg_prepare_ctlu_global_rule() - function prepare extract parameters
-+ * @dpkg_rule: defining a full Key Generation profile (rule)
-+ * @rule_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpdbg_set_global_marking()
-+ */
-+int dpdbg_prepare_ctlu_global_rule(struct dpkg_profile_cfg *dpkg_rule,
-+ uint8_t *rule_buf);
-+
-+/**
-+ * struct dpdbg_rule_cfg - Rule configuration for table lookup
-+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
-+ * @rule_iova: I/O virtual address of the rule (must be in DMA-able memory)
-+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
-+ * @key_size: key and mask size (in bytes)
-+ */
-+struct dpdbg_rule_cfg {
-+ uint64_t key_iova;
-+ uint64_t mask_iova;
-+ uint64_t rule_iova;
-+ uint8_t key_size;
-+};
-+
-+/**
-+ * dpdbg_set_ctlu_global_marking() - Set marking for all match rule frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @marking: The requested Debug marking
-+ * @cfg: Marking rule to add
-+ *
-+ * Warning: must be called after dpdbg_prepare_global_rule()
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_ctlu_global_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t marking,
-+ struct dpdbg_rule_cfg *cfg);
-+
-+/**
-+ * All traffic classes considered
-+ */
-+#define DPDBG_DPNI_ALL_TCS (uint8_t)(-1)
-+/**
-+ * All flows within traffic class considered
-+ */
-+#define DPDBG_DPNI_ALL_TC_FLOWS (uint8_t)(-1)
-+/**
-+ * All buffer pools considered
-+ */
-+#define DPDBG_DPNI_ALL_DPBP (uint8_t)(-1)
-+
-+/**
-+ * struct dpdbg_dpni_rx_marking_cfg - Ingress frame configuration
-+ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes.
-+ * @flow_id: Rx flow id within the traffic class; use
-+ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id;
-+ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS';
-+ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP
-+ * @marking: Marking for match frames;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ */
-+struct dpdbg_dpni_rx_marking_cfg {
-+ uint8_t tc_id;
-+ uint16_t flow_id;
-+ uint16_t dpbp_id;
-+ uint8_t marking;
-+};
-+
-+/**
-+ * dpdbg_set_dpni_rx_marking() - Set Rx frame marking for DPNI
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @cfg: RX frame marking configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpni_rx_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ struct dpdbg_dpni_rx_marking_cfg *cfg);
-+
-+/* selects global confirmation queues */
-+#define DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE (uint16_t)(-1)
-+
-+/**
-+ * dpdbg_set_dpni_tx_conf_marking() - Set Tx frame marking for DPNI
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @sender_id: Sender Id for the confirmation queue;
-+ * 'DPDBG_DPNI_GLOBAL_TX_CONF_QUEUE' for global confirmation queue
-+ * @marking: The requested marking;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpni_tx_conf_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ uint16_t sender_id,
-+ uint8_t marking);
-+
-+/**
-+ * dpdbg_set_dpio_marking() - Set debug frame marking on enqueue
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpio_id: The requested DPIO ID
-+ * @marking: The requested marking;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpio_marking(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpio_id,
-+ uint8_t marking);
-+
-+/**
-+ * enum dpdbg_verbosity_level - Trace verbosity level
-+ * @DPDBG_VERBOSITY_LEVEL_DISABLE: Trace disabled
-+ * @DPDBG_VERBOSITY_LEVEL_TERSE: Terse trace
-+ * @DPDBG_VERBOSITY_LEVEL_VERBOSE: Verbose trace
-+ */
-+enum dpdbg_verbosity_level {
-+ DPDBG_VERBOSITY_LEVEL_DISABLE = 0,
-+ DPDBG_VERBOSITY_LEVEL_TERSE,
-+ DPDBG_VERBOSITY_LEVEL_VERBOSE
-+};
-+
-+/**
-+ * dpdbg_set_ctlu_global_trace() - Set global trace configuration for CTLU trace
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @cfg: trace rule to add
-+ *
-+ * Warning: must be called after dpdbg_prepare_global_rule()
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_ctlu_global_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdbg_rule_cfg *cfg);
-+
-+/**
-+ * Number of DPIO trace points
-+ */
-+#define DPDBG_NUM_OF_DPIO_TRACE_POINTS 2
-+
-+/**
-+ * enum dpdbg_dpio_trace_type - Define Trace point type
-+ * @DPDBG_DPIO_TRACE_TYPE_ENQUEUE: This trace point triggers when an enqueue
-+ * command, received via this portal,
-+ * and containing a marked frame, is executed
-+ * @DPDBG_DPIO_TRACE_TYPE_DEFERRED: This trace point triggers when the deferred
-+ * enqueue of a marked frame received via this
-+ * portal completes
-+ */
-+enum dpdbg_dpio_trace_type {
-+ DPDBG_DPIO_TRACE_TYPE_ENQUEUE = 0,
-+ DPDBG_DPIO_TRACE_TYPE_DEFERRED = 1
-+};
-+
-+/**
-+ * struct dpdbg_dpio_trace_cfg - Configure the behavior of a trace point
-+ * when a frame marked with the specified DD code point is
-+ * encountered
-+ * @marking: this field will be written into the DD field of every FD
-+ * enqueued in this DPIO.
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ * @verbosity: Verbosity level
-+ * @enqueue_type: Enqueue trace point type defining a full Key Generation
-+ * profile (rule)
-+ */
-+struct dpdbg_dpio_trace_cfg {
-+ uint8_t marking;
-+ enum dpdbg_verbosity_level verbosity;
-+ enum dpdbg_dpio_trace_type enqueue_type;
-+};
-+
-+/**
-+ * dpdbg_set_dpio_trace() - Set trace for DPIO for every enqueued frame to
-+ * the portal
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpio_id: The requested DPIO ID
-+ * @trace_point: Trace points configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpio_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpio_id,
-+ struct dpdbg_dpio_trace_cfg
-+ trace_point[DPDBG_NUM_OF_DPIO_TRACE_POINTS]);
-+
-+/**
-+ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a
-+ * @tc_id: Traffic class ID (0-7); DPDBG_DPNI_ALL_TCS for all traffic classes.
-+ * @flow_id: Rx flow id within the traffic class; use
-+ * 'DPDBG_DPNI_ALL_TC_FLOWS' to set all flows within this tc_id;
-+ * ignored if tc_id is set to 'DPDBG_DPNI_ALL_TCS';
-+ * @dpbp_id: buffer pool ID; 'DPDBG_DPNI_ALL_DPBP' to set all DPBP
-+ * @marking: Marking for match frames;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ */
-+struct dpdbg_dpni_rx_trace_cfg {
-+ uint8_t tc_id;
-+ uint16_t flow_id;
-+ uint16_t dpbp_id;
-+ uint8_t marking;
-+};
-+
-+/**
-+ * dpdbg_set_dpni_rx_trace() - Set trace for DPNI ingress (WRIOP ingress).
-+ * in case of multiple requests for different DPNIs - the trace
-+ * will be for the latest DPNI requested.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @trace_cfg: Trace configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpni_rx_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ struct dpdbg_dpni_rx_trace_cfg *trace_cfg);
-+
-+/**
-+ * All DPNI senders
-+ */
-+#define DPDBG_DPNI_ALL_SENDERS (uint16_t)(-1)
-+
-+/**
-+ * struct dpdbg_dpni_trace_cfg - Configure the behavior of a trace point when a
-+ * frame marked with the specified DD code point is encountered
-+ * @marking: The requested debug marking;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ */
-+struct dpdbg_dpni_tx_trace_cfg {
-+ uint8_t marking;
-+};
-+
-+/**
-+ * dpdbg_set_dpni_tx_trace() - Set trace for DPNI dequeued frames
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @sender_id: Sender ID; 'DPDBG_DPNI_ALL_SENDERS' for all senders
-+ * @trace_cfg: Trace configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpni_tx_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ uint16_t sender_id,
-+ struct dpdbg_dpni_tx_trace_cfg *trace_cfg);
-+
-+/**
-+ * Number of DPCON trace points
-+ */
-+#define DPDBG_NUM_OF_DPCON_TRACE_POINTS 2
-+
-+/**
-+ * struct dpdbg_dpcon_trace_cfg - Configure the behavior of a trace point when a
-+ * frame marked with the specified DD code point is encountered
-+ * @marking: The requested debug marking;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ * @verbosity: Verbosity level
-+ */
-+struct dpdbg_dpcon_trace_cfg {
-+ uint8_t marking;
-+ enum dpdbg_verbosity_level verbosity;
-+};
-+
-+/**
-+ * dpdbg_set_dpcon_trace() - Set trace for DPCON when a frame marked with a
-+ * specified marking is dequeued from a WQ in the
-+ * channel selected
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpcon_id: The requested DPCON ID
-+ * @trace_point: Trace points configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpcon_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id,
-+ struct dpdbg_dpcon_trace_cfg
-+ trace_point[DPDBG_NUM_OF_DPCON_TRACE_POINTS]);
-+
-+/**
-+ * Number of DPSECI trace points
-+ */
-+#define DPDBG_NUM_OF_DPSECI_TRACE_POINTS 2
-+
-+/**
-+ * struct dpdbg_dpseci_trace_cfg - Configure the behavior of a trace point when
-+ * a frame marked with the specified DD code point is
-+ * encountered
-+ * @marking: The requested debug marking;
-+ * 'DPDBG_DISABLE_MARKING' for disable marking
-+ * @verbosity: Verbosity level
-+ */
-+struct dpdbg_dpseci_trace_cfg {
-+ uint8_t marking;
-+ enum dpdbg_verbosity_level verbosity;
-+};
-+
-+/**
-+ * dpdbg_set_dpseci_trace() - Set trace for DPSECI when a frame marked with the
-+ * specific marking is enqueued via this portal.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpseci_id: The requested DPSECI ID
-+ * @trace_point: Trace points configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_set_dpseci_trace(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpseci_id,
-+ struct dpdbg_dpseci_trace_cfg
-+ trace_point[DPDBG_NUM_OF_DPSECI_TRACE_POINTS]);
-+
-+/**
-+ * dpdbg_get_dpmac_counter() - DPMAC packet throughput
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpmac_id: The requested DPMAC ID
-+ * @counter_type: The requested DPMAC counter
-+ * @counter: Returned counter value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpmac_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpmac_id,
-+ enum dpmac_counter counter_type,
-+ uint64_t *counter);
-+
-+/**
-+ * dpdbg_get_dpni_counter() - DPNI packet throughput
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDBG object
-+ * @dpni_id: The requested DPNI ID
-+ * @counter_type: The requested DPNI counter
-+ * @counter: Returned counter value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdbg_get_dpni_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpni_id,
-+ enum dpni_counter counter_type,
-+ uint64_t *counter);
-+
-+#endif /* __FSL_DPDBG_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h
-new file mode 100644
-index 0000000..b672788
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdbg_cmd.h
-@@ -0,0 +1,249 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPDBG_CMD_H
-+#define _FSL_DPDBG_CMD_H
-+
-+/* DPDBG Version */
-+#define DPDBG_VER_MAJOR 1
-+#define DPDBG_VER_MINOR 0
-+
-+/* Command IDs */
-+#define DPDBG_CMDID_CLOSE 0x800
-+#define DPDBG_CMDID_OPEN 0x80F
-+
-+#define DPDBG_CMDID_GET_ATTR 0x004
-+
-+#define DPDBG_CMDID_GET_DPNI_INFO 0x130
-+#define DPDBG_CMDID_GET_DPNI_PRIV_TX_CONF_FQID 0x131
-+#define DPDBG_CMDID_GET_DPCON_INFO 0x132
-+#define DPDBG_CMDID_GET_DPBP_INFO 0x133
-+#define DPDBG_CMDID_GET_DPCI_FQID 0x134
-+
-+#define DPDBG_CMDID_SET_CTLU_GLOBAL_MARKING 0x135
-+#define DPDBG_CMDID_SET_DPNI_RX_MARKING 0x136
-+#define DPDBG_CMDID_SET_DPNI_TX_CONF_MARKING 0x137
-+#define DPDBG_CMDID_SET_DPIO_MARKING 0x138
-+
-+#define DPDBG_CMDID_SET_CTLU_GLOBAL_TRACE 0x140
-+#define DPDBG_CMDID_SET_DPIO_TRACE 0x141
-+#define DPDBG_CMDID_SET_DPNI_RX_TRACE 0x142
-+#define DPDBG_CMDID_SET_DPNI_TX_TRACE 0x143
-+#define DPDBG_CMDID_SET_DPCON_TRACE 0x145
-+#define DPDBG_CMDID_SET_DPSECI_TRACE 0x146
-+
-+#define DPDBG_CMDID_GET_DPMAC_COUNTER 0x150
-+#define DPDBG_CMDID_GET_DPNI_COUNTER 0x151
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_OPEN(cmd, dpdbg_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdbg_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPNI_INFO(cmd, dpni_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPNI_INFO(cmd, info) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, info->qdid);\
-+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, info->max_senders);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, info->err_fqid);\
-+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, info->tx_conf_fqid);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPNI_PRIV_TX_CONF_FQID(cmd, dpni_id, sender_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, sender_id);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPNI_PRIV_TX_CONF_FQID(cmd, fqid) \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPCON_INFO(cmd, dpcon_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPCON_INFO(cmd, info) \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->ch_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPBP_INFO(cmd, dpbp_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPBP_INFO(cmd, info) \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, info->bpid)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPCI_FQID(cmd, dpci_id, priority) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, priority);\
-+} while (0)
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPCI_FQID(cmd, fqid) \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, fqid)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_CTLU_GLOBAL_MARKING(cmd, marking, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, marking);\
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \
-+} while (0)
-+
-+#define DPDBG_CMD_SET_DPNI_RX_MARKING(cmd, dpni_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->tc_id);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->flow_id);\
-+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->dpbp_id);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->marking);\
-+} while (0)
-+
-+#define DPDBG_CMD_SET_DPNI_TX_CONF_MARKING(cmd, dpni_id, sender_id, marking) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\
-+} while (0)
-+
-+#define DPDBG_CMD_SET_DPIO_MARKING(cmd, dpio_id, marking) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, marking);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_CTLU_GLOBAL_TRACE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->key_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->rule_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_DPIO_TRACE(cmd, dpio_id, trace_point) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id);\
-+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \
-+ trace_point[0].verbosity); \
-+ MC_CMD_OP(cmd, 1, 4, 4, enum dpdbg_dpio_trace_type, \
-+ trace_point[0].enqueue_type); \
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \
-+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \
-+ trace_point[1].verbosity); \
-+ MC_CMD_OP(cmd, 1, 36, 4, enum dpdbg_dpio_trace_type, \
-+ trace_point[1].enqueue_type); \
-+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_DPNI_RX_TRACE(cmd, dpni_id, trace_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, trace_cfg->tc_id);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, trace_cfg->flow_id);\
-+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, trace_cfg->dpbp_id);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_DPNI_TX_TRACE(cmd, dpni_id, sender_id, trace_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, sender_id);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, trace_cfg->marking);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_DPCON_TRACE(cmd, dpcon_id, trace_point) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id);\
-+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \
-+ trace_point[0].verbosity); \
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \
-+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \
-+ trace_point[1].verbosity); \
-+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_SET_DPSECI_TRACE(cmd, dpseci_id, trace_point) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id);\
-+ MC_CMD_OP(cmd, 1, 0, 4, enum dpdbg_verbosity_level, \
-+ trace_point[0].verbosity); \
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, trace_point[0].marking); \
-+ MC_CMD_OP(cmd, 1, 32, 4, enum dpdbg_verbosity_level, \
-+ trace_point[1].verbosity); \
-+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, trace_point[1].marking); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPMAC_COUNTER(cmd, dpmac_id, counter_type) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id);\
-+ MC_CMD_OP(cmd, 0, 32, 16, enum dpmac_counter, counter_type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPMAC_COUNTER(cmd, counter) \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_CMD_GET_DPNI_COUNTER(cmd, dpni_id, counter_type) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id);\
-+ MC_CMD_OP(cmd, 0, 32, 16, enum dpni_counter, counter_type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDBG_RSP_GET_DPNI_COUNTER(cmd, counter) \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
-+
-+#endif /* _FSL_DPDBG_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei.h b/drivers/net/dpaa2/mc/fsl_dpdcei.h
-new file mode 100644
-index 0000000..319795c
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdcei.h
-@@ -0,0 +1,515 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPDCEI_H
-+#define __FSL_DPDCEI_H
-+
-+/* Data Path DCE Interface API
-+ * Contains initialization APIs and runtime control APIs for DPDCEI
-+ */
-+
-+struct fsl_mc_io;
-+
-+/** General DPDCEI macros */
-+
-+/**
-+ * Indicates an invalid frame queue
-+ */
-+#define DPDCEI_FQID_NOT_VALID (uint32_t)(-1)
-+
-+/**
-+ * enum dpdcei_engine - DCE engine block
-+ * @DPDCEI_ENGINE_COMPRESSION: Engine compression
-+ * @DPDCEI_ENGINE_DECOMPRESSION: Engine decompression
-+ */
-+enum dpdcei_engine {
-+ DPDCEI_ENGINE_COMPRESSION,
-+ DPDCEI_ENGINE_DECOMPRESSION
-+};
-+
-+/**
-+ * dpdcei_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @dpdcei_id: DPDCEI unique ID
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpdcei_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdcei_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpdcei_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdcei_cfg - Structure representing DPDCEI configuration
-+ * @engine: compression or decompression engine to be selected
-+ * @priority: Priority for the DCE hardware processing (valid values 1-8).
-+ */
-+struct dpdcei_cfg {
-+ enum dpdcei_engine engine;
-+ uint8_t priority;
-+};
-+
-+/**
-+ * dpdcei_create() - Create the DPDCEI object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @cfg: configuration parameters
-+ *
-+ * Create the DPDCEI object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpdcei_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdcei_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpdcei_destroy() - Destroy the DPDCEI object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpdcei_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdcei_enable() - Enable the DPDCEI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdcei_disable() - Disable the DPDCEI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdcei_is_enabled() - Check if the DPDCEI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @en: Return '1' for object enabled/'0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpdcei_reset() - Reset the DPDCEI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdcei_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpdcei_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpdcei_set_irq() - Set IRQ information for the DPDCEI to trigger an interrupt
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdcei_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdcei_get_irq() - Get IRQ information from the DPDCEI
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdcei_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdcei_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpdcei_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned Interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpdcei_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpdcei_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpdcei_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpdcei_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+/**
-+ * struct dpdcei_attr - Structure representing DPDCEI attributes
-+ * @id: DPDCEI object ID
-+ * @engine: DCE engine block
-+ * @version: DPDCEI version
-+ */
-+struct dpdcei_attr {
-+ int id;
-+ enum dpdcei_engine engine;
-+ /**
-+ * struct version - DPDCEI version
-+ * @major: DPDCEI major version
-+ * @minor: DPDCEI minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dpdcei_get_attributes() - Retrieve DPDCEI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdcei_attr *attr);
-+
-+/**
-+ * enum dpdcei_dest - DPDCEI destination types
-+ * @DPDCEI_DEST_NONE: Unassigned destination; The queue is set in parked mode
-+ * and does not generate FQDAN notifications;
-+ * user is expected to dequeue from the queue based on
-+ * polling or other user-defined method
-+ * @DPDCEI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to
-+ * dequeue from the queue only after notification is
-+ * received
-+ * @DPDCEI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified
-+ * DPCON object;
-+ * user is expected to dequeue from the DPCON channel
-+ */
-+enum dpdcei_dest {
-+ DPDCEI_DEST_NONE = 0,
-+ DPDCEI_DEST_DPIO = 1,
-+ DPDCEI_DEST_DPCON = 2
-+};
-+
-+/**
-+ * struct dpdcei_dest_cfg - Structure representing DPDCEI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPDCEI_DEST_NONE' option
-+ */
-+struct dpdcei_dest_cfg {
-+ enum dpdcei_dest dest_type;
-+ int dest_id;
-+ uint8_t priority;
-+};
-+
-+/** DPDCEI queue modification options */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPDCEI_QUEUE_OPT_USER_CTX 0x00000001
-+
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPDCEI_QUEUE_OPT_DEST 0x00000002
-+
-+/**
-+ * struct dpdcei_rx_queue_cfg - RX queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPDCEI_QUEUE_OPT_<X>' flags
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame;
-+ * valid only if 'DPDCEI_QUEUE_OPT_USER_CTX' is contained in 'options'
-+ * @dest_cfg: Queue destination parameters;
-+ * valid only if 'DPDCEI_QUEUE_OPT_DEST' is contained in 'options'
-+ */
-+struct dpdcei_rx_queue_cfg {
-+ uint32_t options;
-+ uint64_t user_ctx;
-+ struct dpdcei_dest_cfg dest_cfg;
-+};
-+
-+/**
-+ * dpdcei_set_rx_queue() - Set Rx queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @cfg: Rx queue configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpdcei_rx_queue_cfg *cfg);
-+
-+/**
-+ * struct dpdcei_rx_queue_attr - Structure representing attributes of Rx queues
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @dest_cfg: Queue destination configuration
-+ * @fqid: Virtual FQID value to be used for dequeue operations
-+ */
-+struct dpdcei_rx_queue_attr {
-+ uint64_t user_ctx;
-+ struct dpdcei_dest_cfg dest_cfg;
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpdcei_get_rx_queue() - Retrieve Rx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @attr: Returned Rx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdcei_rx_queue_attr *attr);
-+
-+/**
-+ * struct dpdcei_tx_queue_attr - Structure representing attributes of Tx queues
-+ * @fqid: Virtual FQID to be used for sending frames to DCE hardware
-+ */
-+struct dpdcei_tx_queue_attr {
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpdcei_get_tx_queue() - Retrieve Tx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDCEI object
-+ * @attr: Returned Tx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdcei_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdcei_tx_queue_attr *attr);
-+
-+#endif /* __FSL_DPDCEI_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h
-new file mode 100644
-index 0000000..8452d88
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdcei_cmd.h
-@@ -0,0 +1,182 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPDCEI_CMD_H
-+#define _FSL_DPDCEI_CMD_H
-+
-+/* DPDCEI Version */
-+#define DPDCEI_VER_MAJOR 1
-+#define DPDCEI_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPDCEI_CMDID_CLOSE 0x800
-+#define DPDCEI_CMDID_OPEN 0x80D
-+#define DPDCEI_CMDID_CREATE 0x90D
-+#define DPDCEI_CMDID_DESTROY 0x900
-+
-+#define DPDCEI_CMDID_ENABLE 0x002
-+#define DPDCEI_CMDID_DISABLE 0x003
-+#define DPDCEI_CMDID_GET_ATTR 0x004
-+#define DPDCEI_CMDID_RESET 0x005
-+#define DPDCEI_CMDID_IS_ENABLED 0x006
-+
-+#define DPDCEI_CMDID_SET_IRQ 0x010
-+#define DPDCEI_CMDID_GET_IRQ 0x011
-+#define DPDCEI_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPDCEI_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPDCEI_CMDID_SET_IRQ_MASK 0x014
-+#define DPDCEI_CMDID_GET_IRQ_MASK 0x015
-+#define DPDCEI_CMDID_GET_IRQ_STATUS 0x016
-+#define DPDCEI_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPDCEI_CMDID_SET_RX_QUEUE 0x1B0
-+#define DPDCEI_CMDID_GET_RX_QUEUE 0x1B1
-+#define DPDCEI_CMDID_GET_TX_QUEUE 0x1B2
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_OPEN(cmd, dpdcei_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdcei_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdcei_engine, cfg->engine);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priority);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
-+ MC_RSP_OP(cmd, 0, 32, 8, enum dpdcei_engine, attr->engine); \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_CMD_SET_RX_QUEUE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdcei_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_RX_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdcei_dest, attr->dest_cfg.dest_type);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDCEI_RSP_GET_TX_QUEUE(cmd, attr) \
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
-+
-+#endif /* _FSL_DPDCEI_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai.h b/drivers/net/dpaa2/mc/fsl_dpdmai.h
-new file mode 100644
-index 0000000..e931ce1
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdmai.h
-@@ -0,0 +1,521 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPDMAI_H
-+#define __FSL_DPDMAI_H
-+
-+struct fsl_mc_io;
-+
-+/* Data Path DMA Interface API
-+ * Contains initialization APIs and runtime control APIs for DPDMAI
-+ */
-+
-+/* General DPDMAI macros */
-+
-+/**
-+ * Maximum number of Tx/Rx priorities per DPDMAI object
-+ */
-+#define DPDMAI_PRIO_NUM 2
-+
-+/**
-+ * All queues considered; see dpdmai_set_rx_queue()
-+ */
-+#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
-+
-+/**
-+ * dpdmai_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpdmai_id: DPDMAI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpdmai_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdmai_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpdmai_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
-+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
-+ * configured with values 1-8; the entry following last valid entry
-+ * should be configured with 0
-+ */
-+struct dpdmai_cfg {
-+ uint8_t priorities[DPDMAI_PRIO_NUM];
-+};
-+
-+/**
-+ * dpdmai_create() - Create the DPDMAI object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPDMAI object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpdmai_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdmai_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpdmai_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdmai_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpdmai_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdmai_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdmai_get_irq() - Get IRQ information from the DPDMAI
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdmai_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdmai_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpdmai_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned Interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpdmai_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpdmai_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpdmai_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpdmai_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpdmai_attr - Structure representing DPDMAI attributes
-+ * @id: DPDMAI object ID
-+ * @version: DPDMAI version
-+ * @num_of_priorities: number of priorities
-+ */
-+struct dpdmai_attr {
-+ int id;
-+ /**
-+ * struct version - DPDMAI version
-+ * @major: DPDMAI major version
-+ * @minor: DPDMAI minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint8_t num_of_priorities;
-+};
-+
-+/**
-+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdmai_attr *attr);
-+
-+/**
-+ * enum dpdmai_dest - DPDMAI destination types
-+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
-+ * and does not generate FQDAN notifications; user is expected to dequeue
-+ * from the queue based on polling or other user-defined method
-+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue
-+ * from the queue only after notification is received
-+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON object;
-+ * user is expected to dequeue from the DPCON channel
-+ */
-+enum dpdmai_dest {
-+ DPDMAI_DEST_NONE = 0,
-+ DPDMAI_DEST_DPIO = 1,
-+ DPDMAI_DEST_DPCON = 2
-+};
-+
-+/**
-+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
-+ */
-+struct dpdmai_dest_cfg {
-+ enum dpdmai_dest dest_type;
-+ int dest_id;
-+ uint8_t priority;
-+};
-+
-+/* DPDMAI queue modification options */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
-+
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPDMAI_QUEUE_OPT_DEST 0x00000002
-+
-+/**
-+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame;
-+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
-+ * @dest_cfg: Queue destination parameters;
-+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
-+ */
-+struct dpdmai_rx_queue_cfg {
-+ uint32_t options;
-+ uint64_t user_ctx;
-+ struct dpdmai_dest_cfg dest_cfg;
-+
-+};
-+
-+/**
-+ * dpdmai_set_rx_queue() - Set Rx queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPDMAI creation; use
-+ * DPDMAI_ALL_QUEUES to configure all Rx queues
-+ * identically.
-+ * @cfg: Rx queue configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ const struct dpdmai_rx_queue_cfg *cfg);
-+
-+/**
-+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @dest_cfg: Queue destination configuration
-+ * @fqid: Virtual FQID value to be used for dequeue operations
-+ */
-+struct dpdmai_rx_queue_attr {
-+ uint64_t user_ctx;
-+ struct dpdmai_dest_cfg dest_cfg;
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPDMAI creation
-+ * @attr: Returned Rx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpdmai_rx_queue_attr *attr);
-+
-+/**
-+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
-+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
-+ */
-+
-+struct dpdmai_tx_queue_attr {
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMAI object
-+ * @priority: Select the queue relative to number of
-+ * priorities configured at DPDMAI creation
-+ * @attr: Returned Tx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t priority,
-+ struct dpdmai_tx_queue_attr *attr);
-+
-+#endif /* __FSL_DPDMAI_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h
-new file mode 100644
-index 0000000..7c4a31a
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdmai_cmd.h
-@@ -0,0 +1,191 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPDMAI_CMD_H
-+#define _FSL_DPDMAI_CMD_H
-+
-+/* DPDMAI Version */
-+#define DPDMAI_VER_MAJOR 2
-+#define DPDMAI_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPDMAI_CMDID_CLOSE 0x800
-+#define DPDMAI_CMDID_OPEN 0x80E
-+#define DPDMAI_CMDID_CREATE 0x90E
-+#define DPDMAI_CMDID_DESTROY 0x900
-+
-+#define DPDMAI_CMDID_ENABLE 0x002
-+#define DPDMAI_CMDID_DISABLE 0x003
-+#define DPDMAI_CMDID_GET_ATTR 0x004
-+#define DPDMAI_CMDID_RESET 0x005
-+#define DPDMAI_CMDID_IS_ENABLED 0x006
-+
-+#define DPDMAI_CMDID_SET_IRQ 0x010
-+#define DPDMAI_CMDID_GET_IRQ 0x011
-+#define DPDMAI_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPDMAI_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPDMAI_CMDID_SET_IRQ_MASK 0x014
-+#define DPDMAI_CMDID_GET_IRQ_MASK 0x015
-+#define DPDMAI_CMDID_GET_IRQ_STATUS 0x016
-+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPDMAI_CMDID_SET_RX_QUEUE 0x1A0
-+#define DPDMAI_CMDID_GET_RX_QUEUE 0x1A1
-+#define DPDMAI_CMDID_GET_TX_QUEUE 0x1A2
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
-+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
-+
-+#endif /* _FSL_DPDMAI_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
-new file mode 100644
-index 0000000..455a042
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
-@@ -0,0 +1,724 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPDMUX_H
-+#define __FSL_DPDMUX_H
-+
-+#include <fsl_net.h>
-+
-+struct fsl_mc_io;
-+
-+/* Data Path Demux API
-+ * Contains API for handling DPDMUX topology and functionality
-+ */
-+
-+/**
-+ * dpdmux_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpdmux_id: DPDMUX unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpdmux_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpdmux_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpdmux_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * DPDMUX general options
-+ */
-+
-+/**
-+ * Enable bridging between internal interfaces
-+ */
-+#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
-+
-+#define DPDMUX_IRQ_INDEX_IF 0x0000
-+#define DPDMUX_IRQ_INDEX 0x0001
-+
-+/**
-+ * IRQ event - Indicates that the link state changed
-+ */
-+#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
-+
-+/**
-+ * enum dpdmux_manip - DPDMUX manipulation operations
-+ * @DPDMUX_MANIP_NONE: No manipulation on frames
-+ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
-+ */
-+enum dpdmux_manip {
-+ DPDMUX_MANIP_NONE = 0x0,
-+ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
-+};
-+
-+/**
-+ * enum dpdmux_method - DPDMUX method options
-+ * @DPDMUX_METHOD_NONE: no DPDMUX method
-+ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
-+ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
-+ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
-+ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
-+ */
-+enum dpdmux_method {
-+ DPDMUX_METHOD_NONE = 0x0,
-+ DPDMUX_METHOD_C_VLAN_MAC = 0x1,
-+ DPDMUX_METHOD_MAC = 0x2,
-+ DPDMUX_METHOD_C_VLAN = 0x3,
-+ DPDMUX_METHOD_S_VLAN = 0x4
-+};
-+
-+/**
-+ * struct dpdmux_cfg - DPDMUX configuration parameters
-+ * @method: Defines the operation method for the DPDMUX address table
-+ * @manip: Required manipulation operation
-+ * @num_ifs: Number of interfaces (excluding the uplink interface)
-+ * @adv: Advanced parameters; default is all zeros;
-+ * use this structure to change default settings
-+ */
-+struct dpdmux_cfg {
-+ enum dpdmux_method method;
-+ enum dpdmux_manip manip;
-+ uint16_t num_ifs;
-+ /**
-+ * struct adv - Advanced parameters
-+ * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
-+ * @max_dmat_entries: Maximum entries in DPDMUX address table
-+ * 0 - indicates default: 64 entries per interface.
-+ * @max_mc_groups: Number of multicast groups in DPDMUX table
-+ * 0 - indicates default: 32 multicast groups
-+ * @max_vlan_ids: max vlan ids allowed in the system -
-+ * relevant only case of working in mac+vlan method.
-+ * 0 - indicates default 16 vlan ids.
-+ */
-+ struct {
-+ uint64_t options;
-+ uint16_t max_dmat_entries;
-+ uint16_t max_mc_groups;
-+ uint16_t max_vlan_ids;
-+ } adv;
-+};
-+
-+/**
-+ * dpdmux_create() - Create the DPDMUX object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPDMUX object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpdmux_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpdmux_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpdmux_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmux_enable() - Enable DPDMUX functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmux_disable() - Disable DPDMUX functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpdmux_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpdmux_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpdmux_set_irq() - Set IRQ information for the DPDMUX to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpdmux_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdmux_get_irq() - Get IRQ information from the DPDMUX.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpdmux_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpdmux_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpdmux_get_irq_enable() - Get overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpdmux_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpdmux_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpdmux_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpdmux_attr - Structure representing DPDMUX attributes
-+ * @id: DPDMUX object ID
-+ * @version: DPDMUX version
-+ * @options: Configuration options (bitmap)
-+ * @method: DPDMUX address table method
-+ * @manip: DPDMUX manipulation type
-+ * @num_ifs: Number of interfaces (excluding the uplink interface)
-+ * @mem_size: DPDMUX frame storage memory size
-+ */
-+struct dpdmux_attr {
-+ int id;
-+ /**
-+ * struct version - DPDMUX version
-+ * @major: DPDMUX major version
-+ * @minor: DPDMUX minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint64_t options;
-+ enum dpdmux_method method;
-+ enum dpdmux_manip manip;
-+ uint16_t num_ifs;
-+ uint16_t mem_size;
-+};
-+
-+/**
-+ * dpdmux_get_attributes() - Retrieve DPDMUX attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpdmux_attr *attr);
-+
-+/**
-+ * dpdmux_ul_set_max_frame_length() - Set the maximum frame length in DPDMUX
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @max_frame_length: The required maximum frame length
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_ul_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t max_frame_length);
-+
-+/**
-+ * enum dpdmux_counter_type - Counter types
-+ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
-+ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
-+ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
-+ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
-+ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
-+ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
-+ */
-+enum dpdmux_counter_type {
-+ DPDMUX_CNT_ING_FRAME = 0x0,
-+ DPDMUX_CNT_ING_BYTE = 0x1,
-+ DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
-+ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
-+ DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
-+ DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
-+ DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
-+ DPDMUX_CNT_EGR_FRAME = 0x8,
-+ DPDMUX_CNT_EGR_BYTE = 0x9,
-+ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
-+};
-+
-+/**
-+ * enum dpdmux_accepted_frames_type - DPDMUX frame types
-+ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
-+ * priority-tagged frames
-+ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
-+ * priority-tagged frames that are received on this
-+ * interface
-+ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
-+ * received on this interface are accepted
-+ */
-+enum dpdmux_accepted_frames_type {
-+ DPDMUX_ADMIT_ALL = 0,
-+ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
-+ DPDMUX_ADMIT_ONLY_UNTAGGED = 2
-+};
-+
-+/**
-+ * enum dpdmux_action - DPDMUX action for un-accepted frames
-+ * @DPDMUX_ACTION_DROP: Drop un-accepted frames
-+ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
-+ * control interface
-+ */
-+enum dpdmux_action {
-+ DPDMUX_ACTION_DROP = 0,
-+ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
-+};
-+
-+/**
-+ * struct dpdmux_accepted_frames - Frame types configuration
-+ * @type: Defines ingress accepted frames
-+ * @unaccept_act: Defines action on frames not accepted
-+ */
-+struct dpdmux_accepted_frames {
-+ enum dpdmux_accepted_frames_type type;
-+ enum dpdmux_action unaccept_act;
-+};
-+
-+/**
-+ * dpdmux_if_set_accepted_frames() - Set the accepted frame types
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
-+ * @cfg: Frame types configuration
-+ *
-+ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
-+ * priority-tagged frames are discarded.
-+ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
-+ * priority-tagged frames are accepted.
-+ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
-+ * untagged and priority-tagged frame are accepted;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpdmux_accepted_frames *cfg);
-+
-+/**
-+ * struct dpdmux_if_attr - Structure representing frame types configuration
-+ * @rate: Configured interface rate (in bits per second)
-+ * @enabled: Indicates if interface is enabled
-+ * @accept_frame_type: Indicates type of accepted frames for the interface
-+ */
-+struct dpdmux_if_attr {
-+ uint32_t rate;
-+ int enabled;
-+ enum dpdmux_accepted_frames_type accept_frame_type;
-+};
-+
-+/**
-+ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
-+ * @attr: Interface attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpdmux_if_attr *attr);
-+
-+/**
-+ * struct dpdmux_l2_rule - Structure representing L2 rule
-+ * @mac_addr: MAC address
-+ * @vlan_id: VLAN ID
-+ */
-+struct dpdmux_l2_rule {
-+ uint8_t mac_addr[6];
-+ uint16_t vlan_id;
-+};
-+
-+/**
-+ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Destination interface ID
-+ * @rule: L2 rule
-+ *
-+ * Function removes a L2 rule from DPDMUX table
-+ * or adds an interface to an existing multicast address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpdmux_l2_rule *rule);
-+
-+/**
-+ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPDMUX object
-+ * @if_id: Destination interface ID
-+ * @rule: L2 rule
-+ *
-+ * Function adds a L2 rule into DPDMUX table
-+ * or adds an interface to an existing multicast address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpdmux_l2_rule *rule);
-+
-+/**
-+* dpdmux_if_get_counter() - Functions obtains specific counter of an interface
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPDMUX object
-+* @if_id: Interface Id
-+* @counter_type: counter type
-+* @counter: Returned specific counter information
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ enum dpdmux_counter_type counter_type,
-+ uint64_t *counter);
-+
-+/**
-+* dpdmux_ul_reset_counters() - Function resets the uplink counter
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPDMUX object
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
-+ */
-+struct dpdmux_link_cfg {
-+ uint32_t rate;
-+ uint64_t options;
-+};
-+
-+/**
-+ * dpdmux_if_set_link_cfg() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpdmux_link_cfg *cfg);
-+/**
-+ * struct dpdmux_link_state - Structure representing DPDMUX link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
-+ * @up: 0 - down, 1 - up
-+ */
-+struct dpdmux_link_state {
-+ uint32_t rate;
-+ uint64_t options;
-+ int up;
-+};
-+
-+/**
-+ * dpdmux_if_get_link_state - Return the link state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @state: link state
-+ *
-+ * @returns '0' on Success; Error code otherwise.
-+ */
-+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpdmux_link_state *state);
-+
-+#endif /* __FSL_DPDMUX_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
-new file mode 100644
-index 0000000..0a5cf17
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
-@@ -0,0 +1,256 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPDMUX_CMD_H
-+#define _FSL_DPDMUX_CMD_H
-+
-+/* DPDMUX Version */
-+#define DPDMUX_VER_MAJOR 5
-+#define DPDMUX_VER_MINOR 0
-+
-+/* Command IDs */
-+#define DPDMUX_CMDID_CLOSE 0x800
-+#define DPDMUX_CMDID_OPEN 0x806
-+#define DPDMUX_CMDID_CREATE 0x906
-+#define DPDMUX_CMDID_DESTROY 0x900
-+
-+#define DPDMUX_CMDID_ENABLE 0x002
-+#define DPDMUX_CMDID_DISABLE 0x003
-+#define DPDMUX_CMDID_GET_ATTR 0x004
-+#define DPDMUX_CMDID_RESET 0x005
-+#define DPDMUX_CMDID_IS_ENABLED 0x006
-+
-+#define DPDMUX_CMDID_SET_IRQ 0x010
-+#define DPDMUX_CMDID_GET_IRQ 0x011
-+#define DPDMUX_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPDMUX_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPDMUX_CMDID_SET_IRQ_MASK 0x014
-+#define DPDMUX_CMDID_GET_IRQ_MASK 0x015
-+#define DPDMUX_CMDID_GET_IRQ_STATUS 0x016
-+#define DPDMUX_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPDMUX_CMDID_UL_SET_MAX_FRAME_LENGTH 0x0a1
-+
-+#define DPDMUX_CMDID_UL_RESET_COUNTERS 0x0a3
-+
-+#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES 0x0a7
-+#define DPDMUX_CMDID_IF_GET_ATTR 0x0a8
-+
-+#define DPDMUX_CMDID_IF_ADD_L2_RULE 0x0b0
-+#define DPDMUX_CMDID_IF_REMOVE_L2_RULE 0x0b1
-+#define DPDMUX_CMDID_IF_GET_COUNTER 0x0b2
-+#define DPDMUX_CMDID_IF_SET_LINK_CFG 0x0b3
-+#define DPDMUX_CMDID_IF_GET_LINK_STATE 0x0b4
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_OPEN(cmd, dpdmux_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmux_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, enum dpdmux_method, cfg->method);\
-+ MC_CMD_OP(cmd, 0, 8, 8, enum dpdmux_manip, cfg->manip);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
-+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_dmat_entries);\
-+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_mc_groups);\
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.max_vlan_ids);\
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) \
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+#define DPDMUX_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 8, enum dpdmux_method, attr->method);\
-+ MC_RSP_OP(cmd, 0, 8, 8, enum dpdmux_manip, attr->manip);\
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->num_ifs);\
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->mem_size);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\
-+ MC_RSP_OP(cmd, 4, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 4, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_UL_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 4, enum dpdmux_accepted_frames_type, cfg->type);\
-+ MC_CMD_OP(cmd, 0, 20, 4, enum dpdmux_unaccepted_frames_action, \
-+ cfg->unaccept_act);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_IF_GET_ATTR(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_IF_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 56, 4, enum dpdmux_accepted_frames_type, \
-+ attr->accept_frame_type);\
-+ MC_RSP_OP(cmd, 0, 24, 1, int, attr->enabled);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rate);\
-+} while (0)
-+
-+#define DPDMUX_CMD_IF_REMOVE_L2_RULE(cmd, if_id, l2_rule) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\
-+} while (0)
-+
-+#define DPDMUX_CMD_IF_ADD_L2_RULE(cmd, if_id, l2_rule) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, l2_rule->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, l2_rule->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, l2_rule->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, l2_rule->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, l2_rule->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, l2_rule->mac_addr[0]);\
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, l2_rule->vlan_id);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_IF_GET_COUNTER(cmd, if_id, counter_type) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, enum dpdmux_counter_type, counter_type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_IF_GET_COUNTER(cmd, counter) \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_CMD_IF_GET_LINK_STATE(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPDMUX_RSP_IF_GET_LINK_STATE(cmd, state) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
-+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
-+} while (0)
-+
-+#endif /* _FSL_DPDMUX_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpio.h b/drivers/net/dpaa2/mc/fsl_dpio.h
-new file mode 100644
-index 0000000..88a492f
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpio.h
-@@ -0,0 +1,460 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPIO_H
-+#define __FSL_DPIO_H
-+
-+/* Data Path I/O Portal API
-+ * Contains initialization APIs and runtime control APIs for DPIO
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * dpio_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpio_id: DPIO unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpio_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpio_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpio_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * enum dpio_channel_mode - DPIO notification channel mode
-+ * @DPIO_NO_CHANNEL: No support for notification channel
-+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
-+ * dedicated channel in the DPIO; user should point the queue's
-+ * destination in the relevant interface to this DPIO
-+ */
-+enum dpio_channel_mode {
-+ DPIO_NO_CHANNEL = 0,
-+ DPIO_LOCAL_CHANNEL = 1,
-+};
-+
-+/**
-+ * struct dpio_cfg - Structure representing DPIO configuration
-+ * @channel_mode: Notification channel mode
-+ * @num_priorities: Number of priorities for the notification channel (1-8);
-+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
-+ */
-+struct dpio_cfg {
-+ enum dpio_channel_mode channel_mode;
-+ uint8_t num_priorities;
-+};
-+
-+/**
-+ * dpio_create() - Create the DPIO object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPIO object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpio_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpio_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpio_destroy() - Destroy the DPIO object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpio_is_enabled() - Check if the DPIO is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpio_set_stashing_destination() - Set the stashing destination.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @sdest: stashing destination value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t sdest);
-+
-+/**
-+ * dpio_get_stashing_destination() - Get the stashing destination..
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @sdest: Returns the stashing destination value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t *sdest);
-+
-+/**
-+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @dpcon_id: DPCON object ID
-+ * @channel_index: Returned channel index to be used in qbman API
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id,
-+ uint8_t *channel_index);
-+
-+/**
-+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @dpcon_id: DPCON object ID
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int dpcon_id);
-+
-+/**
-+ * DPIO IRQ Index and Events
-+ */
-+
-+/**
-+ * Irq software-portal index
-+ */
-+#define DPIO_IRQ_SWP_INDEX 0
-+
-+/**
-+ * struct dpio_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpio_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpio_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpio_get_irq() - Get IRQ information from the DPIO.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpio_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpio_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpio_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpio_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpio_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpio_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpio_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpio_attr - Structure representing DPIO attributes
-+ * @id: DPIO object ID
-+ * @version: DPIO version
-+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
-+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
-+ * @qbman_portal_id: Software portal ID
-+ * @channel_mode: Notification channel mode
-+ * @num_priorities: Number of priorities for the notification channel (1-8);
-+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
-+ * @qbman_version: QBMAN version
-+ */
-+struct dpio_attr {
-+ int id;
-+ /**
-+ * struct version - DPIO version
-+ * @major: DPIO major version
-+ * @minor: DPIO minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint64_t qbman_portal_ce_offset;
-+ uint64_t qbman_portal_ci_offset;
-+ uint16_t qbman_portal_id;
-+ enum dpio_channel_mode channel_mode;
-+ uint8_t num_priorities;
-+ uint32_t qbman_version;
-+};
-+
-+/**
-+ * dpio_get_attributes() - Retrieve DPIO attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpio_attr *attr);
-+#endif /* __FSL_DPIO_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpio_cmd.h b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h
-new file mode 100644
-index 0000000..f339cd6
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpio_cmd.h
-@@ -0,0 +1,184 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPIO_CMD_H
-+#define _FSL_DPIO_CMD_H
-+
-+/* DPIO Version */
-+#define DPIO_VER_MAJOR 3
-+#define DPIO_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPIO_CMDID_CLOSE 0x800
-+#define DPIO_CMDID_OPEN 0x803
-+#define DPIO_CMDID_CREATE 0x903
-+#define DPIO_CMDID_DESTROY 0x900
-+
-+#define DPIO_CMDID_ENABLE 0x002
-+#define DPIO_CMDID_DISABLE 0x003
-+#define DPIO_CMDID_GET_ATTR 0x004
-+#define DPIO_CMDID_RESET 0x005
-+#define DPIO_CMDID_IS_ENABLED 0x006
-+
-+#define DPIO_CMDID_SET_IRQ 0x010
-+#define DPIO_CMDID_GET_IRQ 0x011
-+#define DPIO_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPIO_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPIO_CMDID_SET_IRQ_MASK 0x014
-+#define DPIO_CMDID_GET_IRQ_MASK 0x015
-+#define DPIO_CMDID_GET_IRQ_STATUS 0x016
-+#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPIO_CMDID_SET_STASHING_DEST 0x120
-+#define DPIO_CMDID_GET_STASHING_DEST 0x121
-+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122
-+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_OPEN(cmd, dpio_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \
-+ cfg->channel_mode);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\
-+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
-+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\
-+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\
-+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\
-+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-+#endif /* _FSL_DPIO_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
-new file mode 100644
-index 0000000..b2bceaf
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
-@@ -0,0 +1,174 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPKG_H_
-+#define __FSL_DPKG_H_
-+
-+#include <fsl_net.h>
-+
-+/* Data Path Key Generator API
-+ * Contains initialization APIs and runtime APIs for the Key Generator
-+ */
-+
-+/** Key Generator properties */
-+
-+/**
-+ * Number of masks per key extraction
-+ */
-+#define DPKG_NUM_OF_MASKS 4
-+/**
-+ * Number of extractions per key profile
-+ */
-+#define DPKG_MAX_NUM_OF_EXTRACTS 10
-+
-+/**
-+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
-+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
-+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
-+ * @DPKG_FULL_FIELD: Extract a full field
-+ */
-+enum dpkg_extract_from_hdr_type {
-+ DPKG_FROM_HDR = 0,
-+ DPKG_FROM_FIELD = 1,
-+ DPKG_FULL_FIELD = 2
-+};
-+
-+/**
-+ * enum dpkg_extract_type - Enumeration for selecting extraction type
-+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
-+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
-+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
-+ * e.g. can be used to extract header existence;
-+ * please refer to 'Parse Result definition' section in the parser BG
-+ */
-+enum dpkg_extract_type {
-+ DPKG_EXTRACT_FROM_HDR = 0,
-+ DPKG_EXTRACT_FROM_DATA = 1,
-+ DPKG_EXTRACT_FROM_PARSE = 3
-+};
-+
-+/**
-+ * struct dpkg_mask - A structure for defining a single extraction mask
-+ * @mask: Byte mask for the extracted content
-+ * @offset: Offset within the extracted content
-+ */
-+struct dpkg_mask {
-+ uint8_t mask;
-+ uint8_t offset;
-+};
-+
-+/**
-+ * struct dpkg_extract - A structure for defining a single extraction
-+ * @type: Determines how the union below is interpreted:
-+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
-+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
-+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
-+ * @extract: Selects extraction method
-+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
-+ * This is also the number of bytes to be used as masks
-+ * @masks: Masks parameters
-+ */
-+struct dpkg_extract {
-+ enum dpkg_extract_type type;
-+ /**
-+ * union extract - Selects extraction method
-+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
-+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
-+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
-+ */
-+ union {
-+ /**
-+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
-+ * @prot: Any of the supported headers
-+ * @type: Defines the type of header extraction:
-+ * DPKG_FROM_HDR: use size & offset below;
-+ * DPKG_FROM_FIELD: use field, size and offset below;
-+ * DPKG_FULL_FIELD: use field below
-+ * @field: One of the supported fields (NH_FLD_)
-+ *
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ * @hdr_index: Clear for cases not listed below;
-+ * Used for protocols that may have more than a single
-+ * header, 0 indicates an outer header;
-+ * Supported protocols (possible values):
-+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
-+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
-+ * NET_PROT_IP(0, HDR_INDEX_LAST);
-+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
-+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
-+ */
-+
-+ struct {
-+ enum net_prot prot;
-+ enum dpkg_extract_from_hdr_type type;
-+ uint32_t field;
-+ uint8_t size;
-+ uint8_t offset;
-+ uint8_t hdr_index;
-+ } from_hdr;
-+ /**
-+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ */
-+ struct {
-+ uint8_t size;
-+ uint8_t offset;
-+ } from_data;
-+
-+ /**
-+ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ */
-+ struct {
-+ uint8_t size;
-+ uint8_t offset;
-+ } from_parse;
-+ } extract;
-+
-+ uint8_t num_of_byte_masks;
-+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
-+};
-+
-+/**
-+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
-+ * profile (rule)
-+ * @num_extracts: Defines the number of valid entries in the array below
-+ * @extracts: Array of required extractions
-+ */
-+struct dpkg_profile_cfg {
-+ uint8_t num_extracts;
-+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
-+};
-+
-+#endif /* __FSL_DPKG_H_ */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpmac.h b/drivers/net/dpaa2/mc/fsl_dpmac.h
-new file mode 100644
-index 0000000..ad27772
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpmac.h
-@@ -0,0 +1,593 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPMAC_H
-+#define __FSL_DPMAC_H
-+
-+/* Data Path MAC API
-+ * Contains initialization APIs and runtime control APIs for DPMAC
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * dpmac_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpmac_id: DPMAC unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpmac_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpmac_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpmac_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * enum dpmac_link_type - DPMAC link type
-+ * @DPMAC_LINK_TYPE_NONE: No link
-+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
-+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
-+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
-+ */
-+enum dpmac_link_type {
-+ DPMAC_LINK_TYPE_NONE,
-+ DPMAC_LINK_TYPE_FIXED,
-+ DPMAC_LINK_TYPE_PHY,
-+ DPMAC_LINK_TYPE_BACKPLANE
-+};
-+
-+/**
-+ * enum dpmac_eth_if - DPMAC Ethrnet interface
-+ * @DPMAC_ETH_IF_MII: MII interface
-+ * @DPMAC_ETH_IF_RMII: RMII interface
-+ * @DPMAC_ETH_IF_SMII: SMII interface
-+ * @DPMAC_ETH_IF_GMII: GMII interface
-+ * @DPMAC_ETH_IF_RGMII: RGMII interface
-+ * @DPMAC_ETH_IF_SGMII: SGMII interface
-+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
-+ * @DPMAC_ETH_IF_XAUI: XAUI interface
-+ * @DPMAC_ETH_IF_XFI: XFI interface
-+ */
-+enum dpmac_eth_if {
-+ DPMAC_ETH_IF_MII,
-+ DPMAC_ETH_IF_RMII,
-+ DPMAC_ETH_IF_SMII,
-+ DPMAC_ETH_IF_GMII,
-+ DPMAC_ETH_IF_RGMII,
-+ DPMAC_ETH_IF_SGMII,
-+ DPMAC_ETH_IF_QSGMII,
-+ DPMAC_ETH_IF_XAUI,
-+ DPMAC_ETH_IF_XFI
-+};
-+
-+/**
-+ * struct dpmac_cfg - Structure representing DPMAC configuration
-+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
-+ * the MAC IDs are continuous.
-+ * For example: 2 WRIOPs, 16 MACs in each:
-+ * MAC IDs for the 1st WRIOP: 1-16,
-+ * MAC IDs for the 2nd WRIOP: 17-32.
-+ */
-+struct dpmac_cfg {
-+ int mac_id;
-+};
-+
-+/**
-+ * dpmac_create() - Create the DPMAC object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPMAC object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpmac_open function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpmac_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpmac_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * DPMAC IRQ Index and Events
-+ */
-+
-+/**
-+ * IRQ index
-+ */
-+#define DPMAC_IRQ_INDEX 0
-+/**
-+ * IRQ event - indicates a change in link state
-+ */
-+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
-+/**
-+ * IRQ event - Indicates that the link state changed
-+ */
-+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
-+
-+/**
-+ * struct dpmac_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpmac_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpmac_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpmac_get_irq() - Get IRQ information from the DPMAC.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpmac_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpmac_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpmac_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpmac_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpmac_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpmac_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpmac_clear_irq_status() - Clear a pending interrupt's status
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpmac_attr - Structure representing DPMAC attributes
-+ * @id: DPMAC object ID
-+ * @phy_id: PHY ID
-+ * @link_type: link type
-+ * @eth_if: Ethernet interface
-+ * @max_rate: Maximum supported rate - in Mbps
-+ * @version: DPMAC version
-+ */
-+struct dpmac_attr {
-+ int id;
-+ int phy_id;
-+ enum dpmac_link_type link_type;
-+ enum dpmac_eth_if eth_if;
-+ uint32_t max_rate;
-+ /**
-+ * struct version - Structure representing DPMAC version
-+ * @major: DPMAC major version
-+ * @minor: DPMAC minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dpmac_get_attributes - Retrieve DPMAC attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_attr *attr);
-+
-+/**
-+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters
-+ * @phy_addr: MDIO device address
-+ * @reg: Address of the register within the Clause 45 PHY device from which data
-+ * is to be read
-+ * @data: Data read/write from/to MDIO
-+ */
-+struct dpmac_mdio_cfg {
-+ uint8_t phy_addr;
-+ uint8_t reg;
-+ uint16_t data;
-+};
-+
-+/**
-+ * dpmac_mdio_read() - Perform MDIO read transaction
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @cfg: Structure with MDIO transaction parameters
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_mdio_read(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_mdio_cfg *cfg);
-+
-+/**
-+ * dpmac_mdio_write() - Perform MDIO write transaction
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @cfg: Structure with MDIO transaction parameters
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_mdio_write(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_mdio_cfg *cfg);
-+
-+/**
-+ * DPMAC link configuration/state options
-+ */
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
-+ * @rate: Link's rate - in Mbps
-+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
-+ */
-+struct dpmac_link_cfg {
-+ uint32_t rate;
-+ uint64_t options;
-+};
-+
-+/**
-+ * dpmac_get_link_cfg() - Get Ethernet link configuration
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @cfg: Returned structure with the link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_link_cfg *cfg);
-+
-+/**
-+ * struct dpmac_link_state - DPMAC link configuration request
-+ * @rate: Rate in Mbps
-+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
-+ * @up: Link state
-+ */
-+struct dpmac_link_state {
-+ uint32_t rate;
-+ uint64_t options;
-+ int up;
-+};
-+
-+/**
-+ * dpmac_set_link_state() - Set the Ethernet link status
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @link_state: Link state configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmac_link_state *link_state);
-+
-+/**
-+ * enum dpmac_counter - DPMAC counter types
-+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
-+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
-+ * (up to max frame length specified),
-+ * good or bad.
-+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
-+ * with a wrong CRC
-+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
-+ * specified, with a bad frame check sequence.
-+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
-+ * Occurs when a receive FIFO overflows.
-+ * Includes also frames truncated as a result of
-+ * the receive FIFO overflow.
-+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
-+ * (optional used for wrong SFD).
-+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
-+ * bytes long with a good CRC.
-+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
-+ * specified, with a good frame check sequence.
-+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
-+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
-+ * (regular and PFC).
-+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
-+ * frames and valid pause frames.
-+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
-+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
-+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
-+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
-+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
-+ * (except for undersized/fragment frame).
-+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
-+ * frames and valid pause frames transmitted.
-+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
-+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
-+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
-+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
-+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
-+ * pause frames.
-+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
-+ * pause frames.
-+ */
-+enum dpmac_counter {
-+ DPMAC_CNT_ING_FRAME_64,
-+ DPMAC_CNT_ING_FRAME_127,
-+ DPMAC_CNT_ING_FRAME_255,
-+ DPMAC_CNT_ING_FRAME_511,
-+ DPMAC_CNT_ING_FRAME_1023,
-+ DPMAC_CNT_ING_FRAME_1518,
-+ DPMAC_CNT_ING_FRAME_1519_MAX,
-+ DPMAC_CNT_ING_FRAG,
-+ DPMAC_CNT_ING_JABBER,
-+ DPMAC_CNT_ING_FRAME_DISCARD,
-+ DPMAC_CNT_ING_ALIGN_ERR,
-+ DPMAC_CNT_EGR_UNDERSIZED,
-+ DPMAC_CNT_ING_OVERSIZED,
-+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
-+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
-+ DPMAC_CNT_ING_BYTE,
-+ DPMAC_CNT_ING_MCAST_FRAME,
-+ DPMAC_CNT_ING_BCAST_FRAME,
-+ DPMAC_CNT_ING_ALL_FRAME,
-+ DPMAC_CNT_ING_UCAST_FRAME,
-+ DPMAC_CNT_ING_ERR_FRAME,
-+ DPMAC_CNT_EGR_BYTE,
-+ DPMAC_CNT_EGR_MCAST_FRAME,
-+ DPMAC_CNT_EGR_BCAST_FRAME,
-+ DPMAC_CNT_EGR_UCAST_FRAME,
-+ DPMAC_CNT_EGR_ERR_FRAME,
-+ DPMAC_CNT_ING_GOOD_FRAME,
-+ DPMAC_CNT_ENG_GOOD_FRAME
-+};
-+
-+/**
-+ * dpmac_get_counter() - Read a specific DPMAC counter
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMAC object
-+ * @type: The requested counter
-+ * @counter: Returned counter value
-+ *
-+ * Return: The requested counter; '0' otherwise.
-+ */
-+int dpmac_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ enum dpmac_counter type,
-+ uint64_t *counter);
-+
-+#endif /* __FSL_DPMAC_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h
-new file mode 100644
-index 0000000..dc00590
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpmac_cmd.h
-@@ -0,0 +1,195 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPMAC_CMD_H
-+#define _FSL_DPMAC_CMD_H
-+
-+/* DPMAC Version */
-+#define DPMAC_VER_MAJOR 3
-+#define DPMAC_VER_MINOR 2
-+
-+/* Command IDs */
-+#define DPMAC_CMDID_CLOSE 0x800
-+#define DPMAC_CMDID_OPEN 0x80c
-+#define DPMAC_CMDID_CREATE 0x90c
-+#define DPMAC_CMDID_DESTROY 0x900
-+
-+#define DPMAC_CMDID_GET_ATTR 0x004
-+#define DPMAC_CMDID_RESET 0x005
-+
-+#define DPMAC_CMDID_SET_IRQ 0x010
-+#define DPMAC_CMDID_GET_IRQ 0x011
-+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPMAC_CMDID_SET_IRQ_MASK 0x014
-+#define DPMAC_CMDID_GET_IRQ_MASK 0x015
-+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016
-+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPMAC_CMDID_MDIO_READ 0x0c0
-+#define DPMAC_CMDID_MDIO_WRITE 0x0c1
-+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2
-+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3
-+#define DPMAC_CMDID_GET_COUNTER 0x0c4
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_CREATE(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\
-+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\
-+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_MDIO_READ(cmd, data) \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \
-+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_CMD_GET_COUNTER(cmd, type) \
-+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
-+
-+#endif /* _FSL_DPMAC_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp.h b/drivers/net/dpaa2/mc/fsl_dpmcp.h
-new file mode 100644
-index 0000000..80f238e
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpmcp.h
-@@ -0,0 +1,332 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPMCP_H
-+#define __FSL_DPMCP_H
-+
-+/* Data Path Management Command Portal API
-+ * Contains initialization APIs and runtime control APIs for DPMCP
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * dpmcp_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpmcp_id: DPMCP unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpmcp_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpmcp_id,
-+ uint16_t *token);
-+
-+/**
-+ * Get portal ID from pool
-+ */
-+#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
-+
-+/**
-+ * dpmcp_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpmcp_cfg - Structure representing DPMCP configuration
-+ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
-+ * from pool
-+ */
-+struct dpmcp_cfg {
-+ int portal_id;
-+};
-+
-+/**
-+ * dpmcp_create() - Create the DPMCP object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPMCP object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpmcp_open function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpmcp_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpmcp_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * IRQ
-+ */
-+
-+/**
-+ * IRQ Index
-+ */
-+#define DPMCP_IRQ_INDEX 0
-+/**
-+ * irq event - Indicates that the link state changed
-+ */
-+#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
-+
-+/**
-+ * struct dpmcp_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpmcp_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpmcp_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpmcp_get_irq() - Get IRQ information from the DPMCP.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpmcp_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpmcp_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpmcp_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpmcp_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpmcp_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * struct dpmcp_attr - Structure representing DPMCP attributes
-+ * @id: DPMCP object ID
-+ * @version: DPMCP version
-+ */
-+struct dpmcp_attr {
-+ int id;
-+ /**
-+ * struct version - Structure representing DPMCP version
-+ * @major: DPMCP major version
-+ * @minor: DPMCP minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dpmcp_get_attributes - Retrieve DPMCP attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPMCP object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpmcp_attr *attr);
-+
-+#endif /* __FSL_DPMCP_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h
-new file mode 100644
-index 0000000..8f710bd
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpmcp_cmd.h
-@@ -0,0 +1,135 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPMCP_CMD_H
-+#define _FSL_DPMCP_CMD_H
-+
-+/* DPMCP Version */
-+#define DPMCP_VER_MAJOR 3
-+#define DPMCP_VER_MINOR 0
-+
-+/* Command IDs */
-+#define DPMCP_CMDID_CLOSE 0x800
-+#define DPMCP_CMDID_OPEN 0x80b
-+#define DPMCP_CMDID_CREATE 0x90b
-+#define DPMCP_CMDID_DESTROY 0x900
-+
-+#define DPMCP_CMDID_GET_ATTR 0x004
-+#define DPMCP_CMDID_RESET 0x005
-+
-+#define DPMCP_CMDID_SET_IRQ 0x010
-+#define DPMCP_CMDID_GET_IRQ 0x011
-+#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPMCP_CMDID_SET_IRQ_MASK 0x014
-+#define DPMCP_CMDID_GET_IRQ_MASK 0x015
-+#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_OPEN(cmd, dpmcp_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmcp_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_CREATE(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->portal_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMCP_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+#endif /* _FSL_DPMCP_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpmng.h b/drivers/net/dpaa2/mc/fsl_dpmng.h
-new file mode 100644
-index 0000000..4468dea
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpmng.h
-@@ -0,0 +1,74 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPMNG_H
-+#define __FSL_DPMNG_H
-+
-+/* Management Complex General API
-+ * Contains general API for the Management Complex firmware
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * Management Complex firmware version information
-+ */
-+#define MC_VER_MAJOR 9
-+#define MC_VER_MINOR 0
-+
-+/**
-+ * struct mc_versoin
-+ * @major: Major version number: incremented on API compatibility changes
-+ * @minor: Minor version number: incremented on API additions (that are
-+ * backward compatible); reset when major version is incremented
-+ * @revision: Internal revision number: incremented on implementation changes
-+ * and/or bug fixes that have no impact on API
-+ */
-+struct mc_version {
-+ uint32_t major;
-+ uint32_t minor;
-+ uint32_t revision;
-+};
-+
-+/**
-+ * mc_get_version() - Retrieves the Management Complex firmware
-+ * version information
-+ * @mc_io: Pointer to opaque I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @mc_ver_info: Returned version information structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int mc_get_version(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ struct mc_version *mc_ver_info);
-+
-+#endif /* __FSL_DPMNG_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h
-new file mode 100644
-index 0000000..c34ca3a
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpmng_cmd.h
-@@ -0,0 +1,46 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPMNG_CMD_H
-+#define __FSL_DPMNG_CMD_H
-+
-+/* Command IDs */
-+#define DPMNG_CMDID_GET_VERSION 0x831
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \
-+} while (0)
-+
-+#endif /* __FSL_DPMNG_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
-new file mode 100644
-index 0000000..c820086
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
-@@ -0,0 +1,2581 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPNI_H
-+#define __FSL_DPNI_H
-+
-+#include <fsl_dpkg.h>
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * Data Path Network Interface API
-+ * Contains initialization APIs and runtime control APIs for DPNI
-+ */
-+
-+/** General DPNI macros */
-+
-+/**
-+ * Maximum number of traffic classes
-+ */
-+#define DPNI_MAX_TC 8
-+/**
-+ * Maximum number of buffer pools per DPNI
-+ */
-+#define DPNI_MAX_DPBP 8
-+/**
-+ * Maximum number of storage-profiles per DPNI
-+ */
-+#define DPNI_MAX_SP 2
-+
-+/**
-+ * All traffic classes considered; see dpni_set_rx_flow()
-+ */
-+#define DPNI_ALL_TCS (uint8_t)(-1)
-+/**
-+ * All flows within traffic class considered; see dpni_set_rx_flow()
-+ */
-+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
-+/**
-+ * Generate new flow ID; see dpni_set_tx_flow()
-+ */
-+#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
-+/* use for common tx-conf queue; see dpni_set_tx_conf_<x>() */
-+#define DPNI_COMMON_TX_CONF (uint16_t)(-1)
-+
-+/**
-+ * dpni_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpni_id: DPNI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpni_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpni_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpni_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/* DPNI configuration options */
-+
-+/**
-+ * Allow different distribution key profiles for different traffic classes;
-+ * if not set, a single key profile is assumed
-+ */
-+#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001
-+
-+/**
-+ * Disable all non-error transmit confirmation; error frames are reported
-+ * back to a common Tx error queue
-+ */
-+#define DPNI_OPT_TX_CONF_DISABLED 0x00000002
-+
-+/**
-+ * Disable per-sender private Tx confirmation/error queue
-+ */
-+#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004
-+
-+/**
-+ * Support distribution based on hashed key;
-+ * allows statistical distribution over receive queues in a traffic class
-+ */
-+#define DPNI_OPT_DIST_HASH 0x00000010
-+
-+/**
-+ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are
-+ * '0' then backward compatibility is preserved;
-+ * Support distribution based on flow steering;
-+ * allows explicit control of distribution over receive queues in a traffic
-+ * class
-+ */
-+#define DPNI_OPT_DIST_FS 0x00000020
-+
-+/**
-+ * Unicast filtering support
-+ */
-+#define DPNI_OPT_UNICAST_FILTER 0x00000080
-+/**
-+ * Multicast filtering support
-+ */
-+#define DPNI_OPT_MULTICAST_FILTER 0x00000100
-+/**
-+ * VLAN filtering support
-+ */
-+#define DPNI_OPT_VLAN_FILTER 0x00000200
-+/**
-+ * Support IP reassembly on received packets
-+ */
-+#define DPNI_OPT_IPR 0x00000800
-+/**
-+ * Support IP fragmentation on transmitted packets
-+ */
-+#define DPNI_OPT_IPF 0x00001000
-+/**
-+ * VLAN manipulation support
-+ */
-+#define DPNI_OPT_VLAN_MANIPULATION 0x00010000
-+/**
-+ * Support masking of QoS lookup keys
-+ */
-+#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000
-+/**
-+ * Support masking of Flow Steering lookup keys
-+ */
-+#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000
-+
-+/**
-+ * struct dpni_extended_cfg - Structure representing extended DPNI configuration
-+ * @tc_cfg: TCs configuration
-+ * @ipr_cfg: IP reassembly configuration
-+ */
-+struct dpni_extended_cfg {
-+ /**
-+ * struct tc_cfg - TC configuration
-+ * @max_dist: Maximum distribution size for Rx traffic class;
-+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
-+ * 112,128,192,224,256,384,448,512,768,896,1024;
-+ * value '0' will be treated as '1'.
-+ * other unsupported values will be round down to the nearest
-+ * supported value.
-+ * @max_fs_entries: Maximum FS entries for Rx traffic class;
-+ * '0' means no support for this TC;
-+ */
-+ struct {
-+ uint16_t max_dist;
-+ uint16_t max_fs_entries;
-+ } tc_cfg[DPNI_MAX_TC];
-+ /**
-+ * struct ipr_cfg - Structure representing IP reassembly configuration
-+ * @max_reass_frm_size: Maximum size of the reassembled frame
-+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments
-+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments
-+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly
-+ * process
-+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly
-+ * process
-+ */
-+ struct {
-+ uint16_t max_reass_frm_size;
-+ uint16_t min_frag_size_ipv4;
-+ uint16_t min_frag_size_ipv6;
-+ uint16_t max_open_frames_ipv4;
-+ uint16_t max_open_frames_ipv6;
-+ } ipr_cfg;
-+};
-+
-+/**
-+ * dpni_prepare_extended_cfg() - function prepare extended parameters
-+ * @cfg: extended structure
-+ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpni_create()
-+ */
-+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg,
-+ uint8_t *ext_cfg_buf);
-+
-+/**
-+ * struct dpni_cfg - Structure representing DPNI configuration
-+ * @mac_addr: Primary MAC address
-+ * @adv: Advanced parameters; default is all zeros;
-+ * use this structure to change default settings
-+ */
-+struct dpni_cfg {
-+ uint8_t mac_addr[6];
-+ /**
-+ * struct adv - Advanced parameters
-+ * @options: Mask of available options; use 'DPNI_OPT_<X>' values
-+ * @start_hdr: Selects the packet starting header for parsing;
-+ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH'
-+ * @max_senders: Maximum number of different senders; used as the number
-+ * of dedicated Tx flows; Non-power-of-2 values are rounded
-+ * up to the next power-of-2 value as hardware demands it;
-+ * '0' will be treated as '1'
-+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx);
-+ * '0' will e treated as '1'
-+ * @max_unicast_filters: Maximum number of unicast filters;
-+ * '0' is treated as '16'
-+ * @max_multicast_filters: Maximum number of multicast filters;
-+ * '0' is treated as '64'
-+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in
-+ * the QoS table; '0' is treated as '64'
-+ * @max_qos_key_size: Maximum key size for the QoS look-up;
-+ * '0' is treated as '24' which is enough for IPv4
-+ * 5-tuple
-+ * @max_dist_key_size: Maximum key size for the distribution;
-+ * '0' is treated as '24' which is enough for IPv4 5-tuple
-+ * @max_policers: Maximum number of policers;
-+ * should be between '0' and max_tcs
-+ * @max_congestion_ctrl: Maximum number of congestion control groups
-+ * (CGs); covers early drop and congestion notification
-+ * requirements;
-+ * should be between '0' and ('max_tcs' + 'max_senders')
-+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory
-+ * filled with the extended configuration by calling
-+ * dpni_prepare_extended_cfg()
-+ */
-+ struct {
-+ uint32_t options;
-+ enum net_prot start_hdr;
-+ uint8_t max_senders;
-+ uint8_t max_tcs;
-+ uint8_t max_unicast_filters;
-+ uint8_t max_multicast_filters;
-+ uint8_t max_vlan_filters;
-+ uint8_t max_qos_entries;
-+ uint8_t max_qos_key_size;
-+ uint8_t max_dist_key_size;
-+ uint8_t max_policers;
-+ uint8_t max_congestion_ctrl;
-+ uint64_t ext_cfg_iova;
-+ } adv;
-+};
-+
-+/**
-+ * dpni_create() - Create the DPNI object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPNI object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpni_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpni_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpni_destroy() - Destroy the DPNI object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
-+ */
-+struct dpni_pools_cfg {
-+ uint8_t num_dpbp;
-+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
-+ */
-+ struct {
-+ int dpbp_id;
-+ uint16_t buffer_size;
-+ int backup_pool;
-+ } pools[DPNI_MAX_DPBP];
-+};
-+
-+/**
-+ * dpni_set_pools() - Set buffer pools configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Buffer pools configuration
-+ *
-+ * mandatory for DPNI operation
-+ * warning:Allowed only when DPNI is disabled
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_pools(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_pools_cfg *cfg);
-+
-+/**
-+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpni_is_enabled() - Check if the DPNI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * DPNI IRQ Index and Events
-+ */
-+
-+/**
-+ * IRQ index
-+ */
-+#define DPNI_IRQ_INDEX 0
-+/**
-+ * IRQ event - indicates a change in link state
-+ */
-+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
-+
-+/**
-+ * struct dpni_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpni_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpni_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpni_get_irq() - Get IRQ information from the DPNI.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpni_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpni_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state: - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpni_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpni_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpni_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpni_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpni_attr - Structure representing DPNI attributes
-+ * @id: DPNI object ID
-+ * @version: DPNI version
-+ * @start_hdr: Indicates the packet starting header for parsing
-+ * @options: Mask of available options; reflects the value as was given in
-+ * object's creation
-+ * @max_senders: Maximum number of different senders; used as the number
-+ * of dedicated Tx flows;
-+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx)
-+ * @max_unicast_filters: Maximum number of unicast filters
-+ * @max_multicast_filters: Maximum number of multicast filters
-+ * @max_vlan_filters: Maximum number of VLAN filters
-+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table
-+ * @max_qos_key_size: Maximum key size for the QoS look-up
-+ * @max_dist_key_size: Maximum key size for the distribution look-up
-+ * @max_policers: Maximum number of policers;
-+ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs);
-+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory;
-+ * call dpni_extract_extended_cfg() to extract the extended configuration
-+ */
-+struct dpni_attr {
-+ int id;
-+ /**
-+ * struct version - DPNI version
-+ * @major: DPNI major version
-+ * @minor: DPNI minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ enum net_prot start_hdr;
-+ uint32_t options;
-+ uint8_t max_senders;
-+ uint8_t max_tcs;
-+ uint8_t max_unicast_filters;
-+ uint8_t max_multicast_filters;
-+ uint8_t max_vlan_filters;
-+ uint8_t max_qos_entries;
-+ uint8_t max_qos_key_size;
-+ uint8_t max_dist_key_size;
-+ uint8_t max_policers;
-+ uint8_t max_congestion_ctrl;
-+ uint64_t ext_cfg_iova;
-+};
-+
-+/**
-+ * dpni_get_attributes() - Retrieve DPNI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @attr: Object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_attr *attr);
-+
-+/**
-+ * dpni_extract_extended_cfg() - extract the extended parameters
-+ * @cfg: extended structure
-+ * @ext_cfg_buf: 256 bytes of DMA-able memory
-+ *
-+ * This function has to be called after dpni_get_attributes()
-+ */
-+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg,
-+ const uint8_t *ext_cfg_buf);
-+
-+/**
-+ * DPNI errors
-+ */
-+
-+/**
-+ * Extract out of frame header error
-+ */
-+#define DPNI_ERROR_EOFHE 0x00020000
-+/**
-+ * Frame length error
-+ */
-+#define DPNI_ERROR_FLE 0x00002000
-+/**
-+ * Frame physical error
-+ */
-+#define DPNI_ERROR_FPE 0x00001000
-+/**
-+ * Parsing header error
-+ */
-+#define DPNI_ERROR_PHE 0x00000020
-+/**
-+ * Parser L3 checksum error
-+ */
-+#define DPNI_ERROR_L3CE 0x00000004
-+/**
-+ * Parser L3 checksum error
-+ */
-+#define DPNI_ERROR_L4CE 0x00000001
-+
-+/**
-+ * enum dpni_error_action - Defines DPNI behavior for errors
-+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
-+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
-+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
-+ */
-+enum dpni_error_action {
-+ DPNI_ERROR_ACTION_DISCARD = 0,
-+ DPNI_ERROR_ACTION_CONTINUE = 1,
-+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
-+};
-+
-+/**
-+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
-+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
-+ * @error_action: The desired action for the errors mask
-+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
-+ * status (FAS); relevant only for the non-discard action
-+ */
-+struct dpni_error_cfg {
-+ uint32_t errors;
-+ enum dpni_error_action error_action;
-+ int set_frame_annotation;
-+};
-+
-+/**
-+ * dpni_set_errors_behavior() - Set errors behavior
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Errors configuration
-+ *
-+ * this function may be called numerous times with different
-+ * error masks
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_error_cfg *cfg);
-+
-+/**
-+ * DPNI buffer layout modification options
-+ */
-+
-+/**
-+ * Select to modify the time-stamp setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
-+/**
-+ * Select to modify the parser-result setting; not applicable for Tx
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
-+/**
-+ * Select to modify the frame-status setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
-+/**
-+ * Select to modify the private-data-size setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
-+/**
-+ * Select to modify the data-alignment setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
-+/**
-+ * Select to modify the data-head-room setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
-+/**
-+ * Select to modify the data-tail-room setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
-+
-+/**
-+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
-+ * @options: Flags representing the suggested modifications to the buffer
-+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
-+ * @pass_timestamp: Pass timestamp value
-+ * @pass_parser_result: Pass parser results
-+ * @pass_frame_status: Pass frame status
-+ * @private_data_size: Size kept for private data (in bytes)
-+ * @data_align: Data alignment
-+ * @data_head_room: Data head room
-+ * @data_tail_room: Data tail room
-+ */
-+struct dpni_buffer_layout {
-+ uint32_t options;
-+ int pass_timestamp;
-+ int pass_parser_result;
-+ int pass_frame_status;
-+ uint16_t private_data_size;
-+ uint16_t data_align;
-+ uint16_t data_head_room;
-+ uint16_t data_tail_room;
-+};
-+
-+/**
-+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @layout: Returns buffer layout attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_buffer_layout *layout);
-+
-+/**
-+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @layout: Buffer layout configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
-+ */
-+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_buffer_layout *layout);
-+
-+/**
-+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @layout: Returns buffer layout attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_buffer_layout *layout);
-+
-+/**
-+ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @layout: Buffer layout configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
-+ */
-+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_buffer_layout *layout);
-+
-+/**
-+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout
-+ * attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @layout: Returns buffer layout attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_buffer_layout *layout);
-+
-+/**
-+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout
-+ * configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @layout: Buffer layout configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
-+ */
-+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_buffer_layout *layout);
-+
-+/**
-+ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
-+ * for enqueue operations
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qdid: Returned virtual QDID value that should be used as an argument
-+ * in all enqueue operations
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_qdid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *qdid);
-+
-+/**
-+ * struct dpni_sp_info - Structure representing DPNI storage-profile information
-+ * (relevant only for DPNI owned by AIOP)
-+ * @spids: array of storage-profiles
-+ */
-+struct dpni_sp_info {
-+ uint16_t spids[DPNI_MAX_SP];
-+};
-+
-+/**
-+ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @sp_info: Returned AIOP storage-profile information
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Only relevant for DPNI that belongs to AIOP container.
-+ */
-+int dpni_get_sp_info(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_sp_info *sp_info);
-+
-+/**
-+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @data_offset: Tx data offset (from start of buffer)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *data_offset);
-+
-+/**
-+ * enum dpni_counter - DPNI counter types
-+ * @DPNI_CNT_ING_FRAME: Counts ingress frames
-+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit
-+ * 'drop' setting
-+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors
-+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPNI_CNT_EGR_FRAME: Counts egress frames
-+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes
-+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors
-+ */
-+enum dpni_counter {
-+ DPNI_CNT_ING_FRAME = 0x0,
-+ DPNI_CNT_ING_BYTE = 0x1,
-+ DPNI_CNT_ING_FRAME_DROP = 0x2,
-+ DPNI_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPNI_CNT_ING_MCAST_FRAME = 0x4,
-+ DPNI_CNT_ING_MCAST_BYTE = 0x5,
-+ DPNI_CNT_ING_BCAST_FRAME = 0x6,
-+ DPNI_CNT_ING_BCAST_BYTES = 0x7,
-+ DPNI_CNT_EGR_FRAME = 0x8,
-+ DPNI_CNT_EGR_BYTE = 0x9,
-+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa
-+};
-+
-+/**
-+ * dpni_get_counter() - Read a specific DPNI counter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @counter: The requested counter
-+ * @value: Returned counter's current value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ enum dpni_counter counter,
-+ uint64_t *value);
-+
-+/**
-+ * dpni_set_counter() - Set (or clear) a specific DPNI counter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @counter: The requested counter
-+ * @value: New counter value; typically pass '0' for resetting
-+ * the counter.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ enum dpni_counter counter,
-+ uint64_t value);
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * struct - Structure representing DPNI link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
-+ */
-+struct dpni_link_cfg {
-+ uint32_t rate;
-+ uint64_t options;
-+};
-+
-+/**
-+ * dpni_set_link_cfg() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_link_cfg *cfg);
-+
-+/**
-+ * struct dpni_link_state - Structure representing DPNI link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
-+ * @up: Link state; '0' for down, '1' for up
-+ */
-+struct dpni_link_state {
-+ uint32_t rate;
-+ uint64_t options;
-+ int up;
-+};
-+
-+/**
-+ * dpni_get_link_state() - Return the link state (either up or down)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @state: Returned link state;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_link_state *state);
-+
-+/**
-+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
-+ * @rate_limit: rate in Mbps
-+ * @max_burst_size: burst size in bytes (up to 64KB)
-+ */
-+struct dpni_tx_shaping_cfg {
-+ uint32_t rate_limit;
-+ uint16_t max_burst_size;
-+};
-+
-+/**
-+ * dpni_set_tx_shaping() - Set the transmit shaping
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tx_shaper: tx shaping configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper);
-+
-+/**
-+ * dpni_set_max_frame_length() - Set the maximum received frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @max_frame_length: Maximum received frame length (in
-+ * bytes); frame is discarded if its
-+ * length exceeds this value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t max_frame_length);
-+
-+/**
-+ * dpni_get_max_frame_length() - Get the maximum received frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @max_frame_length: Maximum received frame length (in
-+ * bytes); frame is discarded if its
-+ * length exceeds this value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *max_frame_length);
-+
-+/**
-+ * dpni_set_mtu() - Set the MTU for the interface.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mtu: MTU length (in bytes)
-+ *
-+ * MTU determines the maximum fragment size for performing IP
-+ * fragmentation on egress packets.
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_mtu(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t mtu);
-+
-+/**
-+ * dpni_get_mtu() - Get the MTU.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mtu: Returned MTU length (in bytes)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_mtu(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *mtu);
-+
-+/**
-+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpni_set_primary_mac_addr() - Set the primary MAC address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to set as primary address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const uint8_t mac_addr[6]);
-+
-+/**
-+ * dpni_get_primary_mac_addr() - Get the primary MAC address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: Returned MAC address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t mac_addr[6]);
-+
-+/**
-+ * dpni_add_mac_addr() - Add MAC address filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to add
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const uint8_t mac_addr[6]);
-+
-+/**
-+ * dpni_remove_mac_addr() - Remove MAC address filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const uint8_t mac_addr[6]);
-+
-+/**
-+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @unicast: Set to '1' to clear unicast addresses
-+ * @multicast: Set to '1' to clear multicast addresses
-+ *
-+ * The primary MAC address is not cleared by this operation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int unicast,
-+ int multicast);
-+
-+/**
-+ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_add_vlan_id() - Add VLAN ID filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @vlan_id: VLAN ID to add
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id);
-+
-+/**
-+ * dpni_remove_vlan_id() - Remove VLAN ID filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @vlan_id: VLAN ID to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id);
-+
-+/**
-+ * dpni_clear_vlan_filters() - Clear all VLAN filters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
-+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
-+ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling
-+ */
-+enum dpni_tx_schedule_mode {
-+ DPNI_TX_SCHED_STRICT_PRIORITY,
-+ DPNI_TX_SCHED_WEIGHTED,
-+};
-+
-+/**
-+ * struct dpni_tx_schedule_cfg - Structure representing Tx
-+ * scheduling configuration
-+ * @mode: scheduling mode
-+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
-+ * not applicable for 'strict-priority' mode;
-+ */
-+struct dpni_tx_schedule_cfg {
-+ enum dpni_tx_schedule_mode mode;
-+ uint16_t delta_bandwidth;
-+};
-+
-+/**
-+ * struct dpni_tx_selection_cfg - Structure representing transmission
-+ * selection configuration
-+ * @tc_sched: an array of traffic-classes
-+ */
-+struct dpni_tx_selection_cfg {
-+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
-+};
-+
-+/**
-+ * dpni_set_tx_selection() - Set transmission selection configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: transmission selection configuration
-+ *
-+ * warning: Allowed only when DPNI is disabled
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_selection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_tx_selection_cfg *cfg);
-+
-+/**
-+ * enum dpni_dist_mode - DPNI distribution mode
-+ * @DPNI_DIST_MODE_NONE: No distribution
-+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
-+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
-+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
-+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
-+ */
-+enum dpni_dist_mode {
-+ DPNI_DIST_MODE_NONE = 0,
-+ DPNI_DIST_MODE_HASH = 1,
-+ DPNI_DIST_MODE_FS = 2
-+};
-+
-+/**
-+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
-+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
-+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
-+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
-+ */
-+enum dpni_fs_miss_action {
-+ DPNI_FS_MISS_DROP = 0,
-+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
-+ DPNI_FS_MISS_HASH = 2
-+};
-+
-+/**
-+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
-+ * @miss_action: Miss action selection
-+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
-+ */
-+struct dpni_fs_tbl_cfg {
-+ enum dpni_fs_miss_action miss_action;
-+ uint16_t default_flow_id;
-+};
-+
-+/**
-+ * dpni_prepare_key_cfg() - function prepare extract parameters
-+ * @cfg: defining a full Key Generation profile (rule)
-+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before the following functions:
-+ * - dpni_set_rx_tc_dist()
-+ * - dpni_set_qos_table()
-+ */
-+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
-+ uint8_t *key_cfg_buf);
-+
-+/**
-+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
-+ * @dist_size: Set the distribution size;
-+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
-+ * 112,128,192,224,256,384,448,512,768,896,1024
-+ * @dist_mode: Distribution mode
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * the extractions to be used for the distribution key by calling
-+ * dpni_prepare_key_cfg() relevant only when
-+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
-+ * @fs_cfg: Flow Steering table configuration; only relevant if
-+ * 'dist_mode = DPNI_DIST_MODE_FS'
-+ */
-+struct dpni_rx_tc_dist_cfg {
-+ uint16_t dist_size;
-+ enum dpni_dist_mode dist_mode;
-+ uint64_t key_cfg_iova;
-+ struct dpni_fs_tbl_cfg fs_cfg;
-+};
-+
-+/**
-+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Traffic class distribution configuration
-+ *
-+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
-+ * first to prepare the key_cfg_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rx_tc_dist_cfg *cfg);
-+
-+/**
-+ * Set to select color aware mode (otherwise - color blind)
-+ */
-+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001
-+/**
-+ * Set to discard frame with RED color
-+ */
-+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002
-+
-+/**
-+ * enum dpni_policer_mode - selecting the policer mode
-+ * @DPNI_POLICER_MODE_NONE: Policer is disabled
-+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through
-+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698
-+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115
-+ */
-+enum dpni_policer_mode {
-+ DPNI_POLICER_MODE_NONE = 0,
-+ DPNI_POLICER_MODE_PASS_THROUGH,
-+ DPNI_POLICER_MODE_RFC_2698,
-+ DPNI_POLICER_MODE_RFC_4115
-+};
-+
-+/**
-+ * enum dpni_policer_unit - DPNI policer units
-+ * @DPNI_POLICER_UNIT_BYTES: bytes units
-+ * @DPNI_POLICER_UNIT_FRAMES: frames units
-+ */
-+enum dpni_policer_unit {
-+ DPNI_POLICER_UNIT_BYTES = 0,
-+ DPNI_POLICER_UNIT_FRAMES
-+};
-+
-+/**
-+ * enum dpni_policer_color - selecting the policer color
-+ * @DPNI_POLICER_COLOR_GREEN: Green color
-+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color
-+ * @DPNI_POLICER_COLOR_RED: Red color
-+ */
-+enum dpni_policer_color {
-+ DPNI_POLICER_COLOR_GREEN = 0,
-+ DPNI_POLICER_COLOR_YELLOW,
-+ DPNI_POLICER_COLOR_RED
-+};
-+
-+/**
-+ * struct dpni_rx_tc_policing_cfg - Policer configuration
-+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_<X>' values
-+ * @mode: policer mode
-+ * @default_color: For pass-through mode the policer re-colors with this
-+ * color any incoming packets. For Color aware non-pass-through mode:
-+ * policer re-colors with this color all packets with FD[DROPP]>2.
-+ * @units: Bytes or Packets
-+ * @cir: Committed information rate (CIR) in Kbps or packets/second
-+ * @cbs: Committed burst size (CBS) in bytes or packets
-+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second
-+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second
-+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets
-+ * Excess burst size (EBS, rfc4115) in bytes or packets
-+ */
-+struct dpni_rx_tc_policing_cfg {
-+ uint32_t options;
-+ enum dpni_policer_mode mode;
-+ enum dpni_policer_unit units;
-+ enum dpni_policer_color default_color;
-+ uint32_t cir;
-+ uint32_t cbs;
-+ uint32_t eir;
-+ uint32_t ebs;
-+};
-+
-+/**
-+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Traffic class policing configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rx_tc_policing_cfg *cfg);
-+
-+/**
-+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Traffic class policing configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ struct dpni_rx_tc_policing_cfg *cfg);
-+
-+/**
-+ * enum dpni_congestion_unit - DPNI congestion units
-+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
-+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
-+ */
-+enum dpni_congestion_unit {
-+ DPNI_CONGESTION_UNIT_BYTES = 0,
-+ DPNI_CONGESTION_UNIT_FRAMES
-+};
-+
-+/**
-+ * enum dpni_early_drop_mode - DPNI early drop mode
-+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled
-+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
-+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode
-+ */
-+enum dpni_early_drop_mode {
-+ DPNI_EARLY_DROP_MODE_NONE = 0,
-+ DPNI_EARLY_DROP_MODE_TAIL,
-+ DPNI_EARLY_DROP_MODE_WRED
-+};
-+
-+/**
-+ * struct dpni_wred_cfg - WRED configuration
-+ * @max_threshold: maximum threshold that packets may be discarded. Above this
-+ * threshold all packets are discarded; must be less than 2^39;
-+ * approximated to be expressed as (x+256)*2^(y-1) due to HW
-+ * implementation.
-+ * @min_threshold: minimum threshold that packets may be discarded at
-+ * @drop_probability: probability that a packet will be discarded (1-100,
-+ * associated with the max_threshold).
-+ */
-+struct dpni_wred_cfg {
-+ uint64_t max_threshold;
-+ uint64_t min_threshold;
-+ uint8_t drop_probability;
-+};
-+
-+/**
-+ * struct dpni_early_drop_cfg - early-drop configuration
-+ * @mode: drop mode
-+ * @units: units type
-+ * @green: WRED - 'green' configuration
-+ * @yellow: WRED - 'yellow' configuration
-+ * @red: WRED - 'red' configuration
-+ * @tail_drop_threshold: tail drop threshold
-+ */
-+struct dpni_early_drop_cfg {
-+ enum dpni_early_drop_mode mode;
-+ enum dpni_congestion_unit units;
-+
-+ struct dpni_wred_cfg green;
-+ struct dpni_wred_cfg yellow;
-+ struct dpni_wred_cfg red;
-+
-+ uint32_t tail_drop_threshold;
-+};
-+
-+/**
-+ * dpni_prepare_early_drop() - prepare an early drop.
-+ * @cfg: Early-drop configuration
-+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpni_set_rx_tc_early_drop or
-+ * dpni_set_tx_tc_early_drop
-+ *
-+ */
-+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg,
-+ uint8_t *early_drop_buf);
-+
-+/**
-+ * dpni_extract_early_drop() - extract the early drop configuration.
-+ * @cfg: Early-drop configuration
-+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called after dpni_get_rx_tc_early_drop or
-+ * dpni_get_tx_tc_early_drop
-+ *
-+ */
-+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
-+ const uint8_t *early_drop_buf);
-+
-+/**
-+ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
-+ * with the early-drop configuration by calling dpni_prepare_early_drop()
-+ *
-+ * warning: Before calling this function, call dpni_prepare_early_drop() to
-+ * prepare the early_drop_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova);
-+
-+/**
-+ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
-+ *
-+ * warning: After calling this function, call dpni_extract_early_drop() to
-+ * get the early drop configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova);
-+
-+/**
-+ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
-+ * with the early-drop configuration by calling dpni_prepare_early_drop()
-+ *
-+ * warning: Before calling this function, call dpni_prepare_early_drop() to
-+ * prepare the early_drop_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova);
-+
-+/**
-+ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
-+ *
-+ * warning: After calling this function, call dpni_extract_early_drop() to
-+ * get the early drop configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova);
-+
-+/**
-+ * enum dpni_dest - DPNI destination types
-+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
-+ * does not generate FQDAN notifications; user is expected to
-+ * dequeue from the queue based on polling or other user-defined
-+ * method
-+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue
-+ * from the queue only after notification is received
-+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON
-+ * object; user is expected to dequeue from the DPCON channel
-+ */
-+enum dpni_dest {
-+ DPNI_DEST_NONE = 0,
-+ DPNI_DEST_DPIO = 1,
-+ DPNI_DEST_DPCON = 2
-+};
-+
-+/**
-+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPNI_DEST_NONE' option
-+ */
-+struct dpni_dest_cfg {
-+ enum dpni_dest dest_type;
-+ int dest_id;
-+ uint8_t priority;
-+};
-+
-+/* DPNI congestion options */
-+
-+/**
-+ * CSCN message is written to message_iova once entering a
-+ * congestion state (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
-+/**
-+ * CSCN message is written to message_iova once exiting a
-+ * congestion state (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
-+/**
-+ * CSCN write will attempt to allocate into a cache (coherent write);
-+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
-+ */
-+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once entering a congestion state
-+ * (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once exiting a congestion state
-+ * (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
-+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
-+ */
-+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
-+
-+/**
-+ * struct dpni_congestion_notification_cfg - congestion notification
-+ * configuration
-+ * @units: units type
-+ * @threshold_entry: above this threshold we enter a congestion state.
-+ * set it to '0' to disable it
-+ * @threshold_exit: below this threshold we exit the congestion state.
-+ * @message_ctx: The context that will be part of the CSCN message
-+ * @message_iova: I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
-+ * contained in 'options'
-+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
-+ * @options: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
-+ */
-+
-+struct dpni_congestion_notification_cfg {
-+ enum dpni_congestion_unit units;
-+ uint32_t threshold_entry;
-+ uint32_t threshold_exit;
-+ uint64_t message_ctx;
-+ uint64_t message_iova;
-+ struct dpni_dest_cfg dest_cfg;
-+ uint16_t options;
-+};
-+
-+/**
-+ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
-+ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
-+ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
-+ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
-+ * enum dpni_flc_type - DPNI FLC types
-+ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value
-+ * @DPNI_FLC_STASH: select the FLC to be used for stash control
-+ */
-+enum dpni_flc_type {
-+ DPNI_FLC_USER_DEFINED = 0,
-+ DPNI_FLC_STASH = 1,
-+};
-+
-+/**
-+ * enum dpni_stash_size - DPNI FLC stashing size
-+ * @DPNI_STASH_SIZE_0B: no stash
-+ * @DPNI_STASH_SIZE_64B: stashes 64 bytes
-+ * @DPNI_STASH_SIZE_128B: stashes 128 bytes
-+ * @DPNI_STASH_SIZE_192B: stashes 192 bytes
-+ */
-+enum dpni_stash_size {
-+ DPNI_STASH_SIZE_0B = 0,
-+ DPNI_STASH_SIZE_64B = 1,
-+ DPNI_STASH_SIZE_128B = 2,
-+ DPNI_STASH_SIZE_192B = 3,
-+};
-+
-+/* DPNI FLC stash options */
-+
-+/**
-+ * stashes the whole annotation area (up to 192 bytes)
-+ */
-+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001
-+
-+/**
-+ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration
-+ * @flc_type: FLC type
-+ * @options: Mask of available options;
-+ * use 'DPNI_FLC_STASH_<X>' values
-+ * @frame_data_size: Size of frame data to be stashed
-+ * @flow_context_size: Size of flow context to be stashed
-+ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED':
-+ * this value will be provided in the frame descriptor
-+ * (FD[FLC])
-+ * 2. In case flc_type is 'DPNI_FLC_STASH':
-+ * this value will be I/O virtual address of the
-+ * flow-context;
-+ * Must be cacheline-aligned and DMA-able memory
-+ */
-+struct dpni_flc_cfg {
-+ enum dpni_flc_type flc_type;
-+ uint32_t options;
-+ enum dpni_stash_size frame_data_size;
-+ enum dpni_stash_size flow_context_size;
-+ uint64_t flow_context;
-+};
-+
-+/**
-+ * DPNI queue modification options
-+ */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPNI_QUEUE_OPT_DEST 0x00000002
-+/** Select to modify the flow-context parameters;
-+ * not applicable for Tx-conf/Err queues as the FD comes from the user
-+ */
-+#define DPNI_QUEUE_OPT_FLC 0x00000004
-+/**
-+ * Select to modify the queue's order preservation
-+ */
-+#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008
-+/* Select to modify the queue's tail-drop threshold */
-+#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010
-+
-+/**
-+ * struct dpni_queue_cfg - Structure representing queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPNI_QUEUE_OPT_<X>' flags
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX'
-+ * is contained in 'options'
-+ * @dest_cfg: Queue destination parameters;
-+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options'
-+ * @flc_cfg: Flow context configuration; in case the TC's distribution
-+ * is either NONE or HASH the FLC's settings of flow#0 are used.
-+ * in the case of FS (flow-steering) the flow's FLC settings
-+ * are used.
-+ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options'
-+ * @order_preservation_en: enable/disable order preservation;
-+ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained
-+ * in 'options'
-+ * @tail_drop_threshold: set the queue's tail drop threshold in bytes;
-+ * '0' value disable the threshold; maximum value is 0xE000000;
-+ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained
-+ * in 'options'
-+ */
-+struct dpni_queue_cfg {
-+ uint32_t options;
-+ uint64_t user_ctx;
-+ struct dpni_dest_cfg dest_cfg;
-+ struct dpni_flc_cfg flc_cfg;
-+ int order_preservation_en;
-+ uint32_t tail_drop_threshold;
-+};
-+
-+/**
-+ * struct dpni_queue_attr - Structure representing queue attributes
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @dest_cfg: Queue destination configuration
-+ * @flc_cfg: Flow context configuration
-+ * @order_preservation_en: enable/disable order preservation
-+ * @tail_drop_threshold: queue's tail drop threshold in bytes;
-+ * @fqid: Virtual fqid value to be used for dequeue operations
-+ */
-+struct dpni_queue_attr {
-+ uint64_t user_ctx;
-+ struct dpni_dest_cfg dest_cfg;
-+ struct dpni_flc_cfg flc_cfg;
-+ int order_preservation_en;
-+ uint32_t tail_drop_threshold;
-+
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * DPNI Tx flow modification options
-+ */
-+
-+/**
-+ * Select to modify the settings for dedicate Tx confirmation/error
-+ */
-+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001
-+/**
-+ * Select to modify the L3 checksum generation setting
-+ */
-+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010
-+/**
-+ * Select to modify the L4 checksum generation setting
-+ */
-+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020
-+
-+/**
-+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration
-+ * @options: Flags representing the suggested modifications to the Tx flow;
-+ * Use any combination 'DPNI_TX_FLOW_OPT_<X>' flags
-+ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx
-+ * confirmation and error queue; Set to '0' to use the private
-+ * Tx confirmation and error queue; valid only if
-+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation
-+ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options'
-+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable;
-+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options'
-+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable;
-+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options'
-+ */
-+struct dpni_tx_flow_cfg {
-+ uint32_t options;
-+ int use_common_tx_conf_queue;
-+ int l3_chksum_gen;
-+ int l4_chksum_gen;
-+};
-+
-+/**
-+ * dpni_set_tx_flow() - Set Tx flow configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @flow_id: Provides (or returns) the sender's flow ID;
-+ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate
-+ * a new flow_id; this ID should be used as the QDBIN argument
-+ * in enqueue operations
-+ * @cfg: Tx flow configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *flow_id,
-+ const struct dpni_tx_flow_cfg *cfg);
-+
-+/**
-+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes
-+ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and
-+ * error queue; '0' if using private Tx confirmation and error queue
-+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled
-+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled
-+ */
-+struct dpni_tx_flow_attr {
-+ int use_common_tx_conf_queue;
-+ int l3_chksum_gen;
-+ int l4_chksum_gen;
-+};
-+
-+/**
-+ * dpni_get_tx_flow() - Get Tx flow attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @flow_id: The sender's flow ID, as returned by the
-+ * dpni_set_tx_flow() function
-+ * @attr: Returned Tx flow attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_tx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ struct dpni_tx_flow_attr *attr);
-+
-+/**
-+ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration
-+ * @errors_only: Set to '1' to report back only error frames;
-+ * Set to '0' to confirm transmission/error for all transmitted frames;
-+ * @queue_cfg: Queue configuration
-+ */
-+struct dpni_tx_conf_cfg {
-+ int errors_only;
-+ struct dpni_queue_cfg queue_cfg;
-+};
-+
-+/**
-+ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @flow_id: The sender's flow ID, as returned by the
-+ * dpni_set_tx_flow() function;
-+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
-+ * @cfg: Queue configuration
-+ *
-+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
-+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
-+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
-+ * i.e. only serve the common tx-conf-err queue;
-+ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported
-+ * back - successfully transmitted frames are not confirmed. Otherwise, all
-+ * transmitted frames are sent for confirmation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_conf(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ const struct dpni_tx_conf_cfg *cfg);
-+
-+/**
-+ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes
-+ * @errors_only: '1' if only error frames are reported back; '0' if all
-+ * transmitted frames are confirmed
-+ * @queue_attr: Queue attributes
-+ */
-+struct dpni_tx_conf_attr {
-+ int errors_only;
-+ struct dpni_queue_attr queue_attr;
-+};
-+
-+/**
-+ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @flow_id: The sender's flow ID, as returned by the
-+ * dpni_set_tx_flow() function;
-+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
-+ * @attr: Returned tx-conf attributes
-+ *
-+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
-+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
-+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
-+ * i.e. only serve the common tx-conf-err queue;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_tx_conf(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ struct dpni_tx_conf_attr *attr);
-+
-+/**
-+ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @flow_id: The sender's flow ID, as returned by the
-+ * dpni_set_tx_flow() function;
-+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
-+ * @cfg: congestion notification configuration
-+ *
-+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
-+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
-+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
-+ * i.e. only serve the common tx-conf-err queue;
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ const struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
-+ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @flow_id: The sender's flow ID, as returned by the
-+ * dpni_set_tx_flow() function;
-+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf
-+ * @cfg: congestion notification
-+ *
-+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or
-+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation,
-+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF';
-+ * i.e. only serve the common tx-conf-err queue;
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t flow_id,
-+ struct dpni_congestion_notification_cfg *cfg);
-+
-+/**
-+ * dpni_set_tx_conf_revoke() - Tx confirmation revocation
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @revoke: revoke or not
-+ *
-+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
-+ * selected at DPNI creation.
-+ * Calling this function with 'revoke' set to '1' disables all transmit
-+ * confirmation (including the private confirmation queues), regardless of
-+ * previous settings; Note that in this case, Tx error frames are still
-+ * enqueued to the general transmit errors queue.
-+ * Calling this function with 'revoke' set to '0' restores the previous
-+ * settings for both general and private transmit confirmation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int revoke);
-+
-+/**
-+ * dpni_set_rx_flow() - Set Rx flow configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7);
-+ * use 'DPNI_ALL_TCS' to set all TCs and all flows
-+ * @flow_id: Rx flow id within the traffic class; use
-+ * 'DPNI_ALL_TC_FLOWS' to set all flows within
-+ * this tc_id; ignored if tc_id is set to
-+ * 'DPNI_ALL_TCS';
-+ * @cfg: Rx flow configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_rx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint16_t flow_id,
-+ const struct dpni_queue_cfg *cfg);
-+
-+/**
-+ * dpni_get_rx_flow() - Get Rx flow attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @flow_id: Rx flow id within the traffic class
-+ * @attr: Returned Rx flow attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_rx_flow(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ uint16_t flow_id,
-+ struct dpni_queue_attr *attr);
-+
-+/**
-+ * dpni_set_rx_err_queue() - Set Rx error queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Queue configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_queue_cfg *cfg);
-+
-+/**
-+ * dpni_get_rx_err_queue() - Get Rx error queue attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @attr: Returned Queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpni_queue_attr *attr);
-+
-+/**
-+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * key extractions to be used as the QoS criteria by calling
-+ * dpni_prepare_key_cfg()
-+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
-+ * '0' to use the 'default_tc' in such cases
-+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
-+ */
-+struct dpni_qos_tbl_cfg {
-+ uint64_t key_cfg_iova;
-+ int discard_on_miss;
-+ uint8_t default_tc;
-+};
-+
-+/**
-+ * dpni_set_qos_table() - Set QoS mapping table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS table configuration
-+ *
-+ * This function and all QoS-related functions require that
-+ *'max_tcs > 1' was set at DPNI creation.
-+ *
-+ * warning: Before calling this function, call dpni_prepare_key_cfg() to
-+ * prepare the key_cfg_iova parameter
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_qos_tbl_cfg *cfg);
-+
-+/**
-+ * struct dpni_rule_cfg - Rule configuration for table lookup
-+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
-+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
-+ * @key_size: key and mask size (in bytes)
-+ */
-+struct dpni_rule_cfg {
-+ uint64_t key_iova;
-+ uint64_t mask_iova;
-+ uint8_t key_size;
-+};
-+
-+/**
-+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS rule to add
-+ * @tc_id: Traffic class selection (0-7)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_rule_cfg *cfg,
-+ uint8_t tc_id);
-+
-+/**
-+ * dpni_remove_qos_entry() - Remove QoS mapping entry
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpni_rule_cfg *cfg);
-+
-+/**
-+ * dpni_clear_qos_table() - Clear all QoS mapping entries
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Following this function call, all frames are directed to
-+ * the default traffic class (0)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
-+ * (to select a flow ID)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Flow steering rule to add
-+ * @flow_id: Flow id selection (must be smaller than the
-+ * distribution size of the traffic class)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rule_cfg *cfg,
-+ uint16_t flow_id);
-+
-+/**
-+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
-+ * traffic class
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Flow steering rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id,
-+ const struct dpni_rule_cfg *cfg);
-+
-+/**
-+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific
-+ * traffic class
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t tc_id);
-+
-+/**
-+ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set
-+ * at DPNI creation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set
-+ * at DPNI creation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_ipr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+/**
-+ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI
-+ * creation. Fragmentation is performed according to MTU value
-+ * set by dpni_set_mtu() function
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_ipf(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en);
-+
-+#endif /* __FSL_DPNI_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
-new file mode 100644
-index 0000000..c0f8af0
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
-@@ -0,0 +1,1058 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPNI_CMD_H
-+#define _FSL_DPNI_CMD_H
-+
-+/* DPNI Version */
-+#define DPNI_VER_MAJOR 6
-+#define DPNI_VER_MINOR 0
-+
-+/* Command IDs */
-+#define DPNI_CMDID_OPEN 0x801
-+#define DPNI_CMDID_CLOSE 0x800
-+#define DPNI_CMDID_CREATE 0x901
-+#define DPNI_CMDID_DESTROY 0x900
-+
-+#define DPNI_CMDID_ENABLE 0x002
-+#define DPNI_CMDID_DISABLE 0x003
-+#define DPNI_CMDID_GET_ATTR 0x004
-+#define DPNI_CMDID_RESET 0x005
-+#define DPNI_CMDID_IS_ENABLED 0x006
-+
-+#define DPNI_CMDID_SET_IRQ 0x010
-+#define DPNI_CMDID_GET_IRQ 0x011
-+#define DPNI_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPNI_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPNI_CMDID_SET_IRQ_MASK 0x014
-+#define DPNI_CMDID_GET_IRQ_MASK 0x015
-+#define DPNI_CMDID_GET_IRQ_STATUS 0x016
-+#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPNI_CMDID_SET_POOLS 0x200
-+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201
-+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202
-+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203
-+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204
-+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205
-+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206
-+#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207
-+#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208
-+#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209
-+#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A
-+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B
-+#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C
-+
-+#define DPNI_CMDID_GET_QDID 0x210
-+#define DPNI_CMDID_GET_SP_INFO 0x211
-+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212
-+#define DPNI_CMDID_GET_COUNTER 0x213
-+#define DPNI_CMDID_SET_COUNTER 0x214
-+#define DPNI_CMDID_GET_LINK_STATE 0x215
-+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216
-+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217
-+#define DPNI_CMDID_SET_MTU 0x218
-+#define DPNI_CMDID_GET_MTU 0x219
-+#define DPNI_CMDID_SET_LINK_CFG 0x21A
-+#define DPNI_CMDID_SET_TX_SHAPING 0x21B
-+
-+#define DPNI_CMDID_SET_MCAST_PROMISC 0x220
-+#define DPNI_CMDID_GET_MCAST_PROMISC 0x221
-+#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222
-+#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223
-+#define DPNI_CMDID_SET_PRIM_MAC 0x224
-+#define DPNI_CMDID_GET_PRIM_MAC 0x225
-+#define DPNI_CMDID_ADD_MAC_ADDR 0x226
-+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227
-+#define DPNI_CMDID_CLR_MAC_FILTERS 0x228
-+
-+#define DPNI_CMDID_SET_VLAN_FILTERS 0x230
-+#define DPNI_CMDID_ADD_VLAN_ID 0x231
-+#define DPNI_CMDID_REMOVE_VLAN_ID 0x232
-+#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233
-+
-+#define DPNI_CMDID_SET_RX_TC_DIST 0x235
-+#define DPNI_CMDID_SET_TX_FLOW 0x236
-+#define DPNI_CMDID_GET_TX_FLOW 0x237
-+#define DPNI_CMDID_SET_RX_FLOW 0x238
-+#define DPNI_CMDID_GET_RX_FLOW 0x239
-+#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A
-+#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B
-+
-+#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E
-+#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F
-+
-+#define DPNI_CMDID_SET_QOS_TBL 0x240
-+#define DPNI_CMDID_ADD_QOS_ENT 0x241
-+#define DPNI_CMDID_REMOVE_QOS_ENT 0x242
-+#define DPNI_CMDID_CLR_QOS_TBL 0x243
-+#define DPNI_CMDID_ADD_FS_ENT 0x244
-+#define DPNI_CMDID_REMOVE_FS_ENT 0x245
-+#define DPNI_CMDID_CLR_FS_ENT 0x246
-+#define DPNI_CMDID_SET_VLAN_INSERTION 0x247
-+#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248
-+#define DPNI_CMDID_SET_IPR 0x249
-+#define DPNI_CMDID_SET_IPF 0x24A
-+
-+#define DPNI_CMDID_SET_TX_SELECTION 0x250
-+#define DPNI_CMDID_GET_RX_TC_POLICING 0x251
-+#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252
-+#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253
-+#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254
-+#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255
-+#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256
-+#define DPNI_CMDID_SET_TX_CONF 0x257
-+#define DPNI_CMDID_GET_TX_CONF 0x258
-+#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259
-+#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A
-+#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B
-+#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_OPEN(cmd, dpni_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
-+
-+#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \
-+do { \
-+ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \
-+ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \
-+ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \
-+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \
-+ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \
-+ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \
-+ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \
-+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \
-+ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \
-+ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \
-+ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \
-+ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \
-+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \
-+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \
-+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \
-+ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \
-+ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \
-+ cfg->ipr_cfg.max_open_frames_ipv4); \
-+ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \
-+ cfg->ipr_cfg.max_open_frames_ipv6); \
-+ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \
-+ cfg->ipr_cfg.max_reass_frm_size); \
-+ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \
-+ cfg->ipr_cfg.min_frag_size_ipv4); \
-+ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \
-+ cfg->ipr_cfg.min_frag_size_ipv6); \
-+} while (0)
-+
-+#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \
-+do { \
-+ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \
-+ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \
-+ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \
-+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \
-+ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \
-+ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \
-+ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \
-+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \
-+ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \
-+ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \
-+ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \
-+ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \
-+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \
-+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \
-+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \
-+ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \
-+ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \
-+ cfg->ipr_cfg.max_open_frames_ipv4); \
-+ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \
-+ cfg->ipr_cfg.max_open_frames_ipv6); \
-+ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \
-+ cfg->ipr_cfg.max_reass_frm_size); \
-+ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \
-+ cfg->ipr_cfg.min_frag_size_ipv4); \
-+ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \
-+ cfg->ipr_cfg.min_frag_size_ipv6); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \
-+ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \
-+ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \
-+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \
-+ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \
-+ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \
-+ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \
-+ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \
-+ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \
-+ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \
-+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_POOLS(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
-+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
-+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
-+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
-+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
-+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
-+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
-+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
-+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
-+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
-+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
-+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_ATTR(cmd, attr) \
-+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \
-+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \
-+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \
-+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \
-+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\
-+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \
-+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \
-+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \
-+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \
-+ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \
-+ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \
-+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \
-+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \
-+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
-+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
-+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
-+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
-+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
-+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
-+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
-+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
-+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
-+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
-+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
-+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
-+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
-+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
-+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
-+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
-+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
-+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
-+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
-+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
-+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \
-+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \
-+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \
-+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \
-+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_QDID(cmd, qdid) \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_COUNTER(cmd, counter) \
-+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_COUNTER(cmd, value) \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
-+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_MTU(cmd, mtu) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_MTU(cmd, mtu) \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
-+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
-+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
-+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
-+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \
-+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[0].mode); \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[1].mode); \
-+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[2].mode); \
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[3].mode); \
-+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[4].mode); \
-+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[5].mode); \
-+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[6].mode); \
-+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \
-+ cfg->tc_sched[7].mode); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \
-+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \
-+ cfg->fs_cfg.miss_action); \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \
-+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\
-+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\
-+ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \
-+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\
-+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\
-+ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
-+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
-+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
-+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \
-+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \
-+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
-+ cfg->flc_cfg.frame_data_size);\
-+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
-+ cfg->flc_cfg.flow_context_size);\
-+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\
-+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\
-+ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \
-+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \
-+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
-+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \
-+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
-+ attr->flc_cfg.frame_data_size);\
-+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
-+ attr->flc_cfg.flow_context_size);\
-+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
-+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\
-+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \
-+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \
-+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \
-+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
-+ cfg->flc_cfg.frame_data_size);\
-+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
-+ cfg->flc_cfg.flow_context_size);\
-+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\
-+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\
-+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \
-+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \
-+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \
-+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \
-+ attr->flc_cfg.frame_data_size);\
-+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \
-+ attr->flc_cfg.flow_context_size);\
-+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, revoke)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \
-+ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \
-+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_IPR(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_IPF(cmd, en) \
-+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \
-+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \
-+ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \
-+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \
-+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \
-+ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \
-+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_PREP_EARLY_DROP(ext, cfg) \
-+do { \
-+ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \
-+ MC_PREP_OP(ext, 0, 2, 2, \
-+ enum dpni_congestion_unit, cfg->units); \
-+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
-+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
-+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
-+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
-+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
-+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
-+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
-+ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \
-+ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \
-+ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_EXT_EARLY_DROP(ext, cfg) \
-+do { \
-+ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \
-+ MC_EXT_OP(ext, 0, 2, 2, \
-+ enum dpni_congestion_unit, cfg->units); \
-+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
-+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
-+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
-+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
-+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
-+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
-+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
-+ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \
-+ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \
-+ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
-+} while (0)
-+
-+#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
-+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
-+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
-+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
-+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
-+} while (0)
-+
-+#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id)
-+
-+#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
-+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
-+} while (0)
-+
-+#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
-+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
-+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
-+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
-+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
-+} while (0)
-+
-+#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id)
-+
-+#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
-+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
-+} while (0)
-+
-+#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \
-+ cfg->queue_cfg.dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \
-+ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \
-+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \
-+ cfg->queue_cfg.tail_drop_threshold); \
-+ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \
-+ cfg->queue_cfg.flc_cfg.flc_type); \
-+ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \
-+ cfg->queue_cfg.flc_cfg.frame_data_size); \
-+ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \
-+ cfg->queue_cfg.flc_cfg.flow_context_size); \
-+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \
-+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \
-+ cfg->queue_cfg.flc_cfg.flow_context); \
-+} while (0)
-+
-+#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
-+
-+#define DPNI_RSP_GET_TX_CONF(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \
-+ attr->queue_attr.dest_cfg.priority); \
-+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \
-+ attr->queue_attr.dest_cfg.dest_type); \
-+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \
-+ MC_RSP_OP(cmd, 0, 46, 1, int, \
-+ attr->queue_attr.order_preservation_en); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \
-+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \
-+ attr->queue_attr.tail_drop_threshold); \
-+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \
-+ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \
-+ attr->queue_attr.flc_cfg.flc_type); \
-+ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \
-+ attr->queue_attr.flc_cfg.frame_data_size); \
-+ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \
-+ attr->queue_attr.flc_cfg.flow_context_size); \
-+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \
-+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \
-+ attr->queue_attr.flc_cfg.flow_context); \
-+} while (0)
-+
-+#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
-+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
-+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
-+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
-+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
-+} while (0)
-+
-+#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id)
-+
-+#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \
-+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \
-+} while (0)
-+
-+#endif /* _FSL_DPNI_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dprc.h b/drivers/net/dpaa2/mc/fsl_dprc.h
-new file mode 100644
-index 0000000..c831f46
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dprc.h
-@@ -0,0 +1,1032 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPRC_H
-+#define _FSL_DPRC_H
-+
-+/* Data Path Resource Container API
-+ * Contains DPRC API for managing and querying DPAA resources
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * Set this value as the icid value in dprc_cfg structure when creating a
-+ * container, in case the ICID is not selected by the user and should be
-+ * allocated by the DPRC from the pool of ICIDs.
-+ */
-+#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0))
-+
-+/**
-+ * Set this value as the portal_id value in dprc_cfg structure when creating a
-+ * container, in case the portal ID is not specifically selected by the
-+ * user and should be allocated by the DPRC from the pool of portal ids.
-+ */
-+#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
-+
-+/**
-+ * dprc_get_container_id() - Get container ID associated with a given portal.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @container_id: Requested container ID
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_container_id(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int *container_id);
-+
-+/**
-+ * dprc_open() - Open DPRC object for use
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @container_id: Container ID to open
-+ * @token: Returned token of DPRC object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Required before any operation on the object.
-+ */
-+int dprc_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int container_id,
-+ uint16_t *token);
-+
-+/**
-+ * dprc_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * Container general options
-+ *
-+ * These options may be selected at container creation by the container creator
-+ * and can be retrieved using dprc_get_attributes()
-+ */
-+
-+/**
-+ * Spawn Policy Option allowed - Indicates that the new container is allowed
-+ * to spawn and have its own child containers.
-+ */
-+#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
-+
-+/**
-+ * General Container allocation policy - Indicates that the new container is
-+ * allowed to allocate requested resources from its parent container; if not
-+ * set, the container is only allowed to use resources in its own pools; Note
-+ * that this is a container's global policy, but the parent container may
-+ * override it and set specific quota per resource type.
-+ */
-+#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
-+
-+/**
-+ * Object initialization allowed - software context associated with this
-+ * container is allowed to invoke object initialization operations.
-+ */
-+#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
-+
-+/**
-+ * Topology change allowed - software context associated with this
-+ * container is allowed to invoke topology operations, such as attach/detach
-+ * of network objects.
-+ */
-+#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
-+
-+/**
-+ * AIOP - Indicates that container belongs to AIOP.
-+ */
-+#define DPRC_CFG_OPT_AIOP 0x00000020
-+
-+/**
-+ * IRQ Config - Indicates that the container allowed to configure its IRQs.
-+ */
-+#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
-+
-+/**
-+ * struct dprc_cfg - Container configuration options
-+ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
-+ * ICID value is allocated by the DPRC
-+ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
-+ * portal ID is allocated by the DPRC
-+ * @options: Combination of 'DPRC_CFG_OPT_<X>' options
-+ * @label: Object's label
-+ */
-+struct dprc_cfg {
-+ uint16_t icid;
-+ int portal_id;
-+ uint64_t options;
-+ char label[16];
-+};
-+
-+/**
-+ * dprc_create_container() - Create child container
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @cfg: Child container configuration
-+ * @child_container_id: Returned child container ID
-+ * @child_portal_offset: Returned child portal offset from MC portal base
-+ *
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_create_container(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprc_cfg *cfg,
-+ int *child_container_id,
-+ uint64_t *child_portal_offset);
-+
-+/**
-+ * dprc_destroy_container() - Destroy child container.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @child_container_id: ID of the container to destroy
-+ *
-+ * This function terminates the child container, so following this call the
-+ * child container ID becomes invalid.
-+ *
-+ * Notes:
-+ * - All resources and objects of the destroyed container are returned to the
-+ * parent container or destroyed if were created be the destroyed container.
-+ * - This function destroy all the child containers of the specified
-+ * container prior to destroying the container itself.
-+ *
-+ * warning: Only the parent container is allowed to destroy a child policy
-+ * Container 0 can't be destroyed
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ */
-+int dprc_destroy_container(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id);
-+
-+/**
-+ * dprc_reset_container - Reset child container.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @child_container_id: ID of the container to reset
-+ *
-+ * In case a software context crashes or becomes non-responsive, the parent
-+ * may wish to reset its resources container before the software context is
-+ * restarted.
-+ *
-+ * This routine informs all objects assigned to the child container that the
-+ * container is being reset, so they may perform any cleanup operations that are
-+ * needed. All objects handles that were owned by the child container shall be
-+ * closed.
-+ *
-+ * Note that such request may be submitted even if the child software context
-+ * has not crashed, but the resulting object cleanup operations will not be
-+ * aware of that.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_reset_container(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id);
-+
-+/**
-+ * DPRC IRQ Index and Events
-+ */
-+
-+/**
-+ * IRQ index
-+ */
-+#define DPRC_IRQ_INDEX 0
-+
-+/**
-+ * Number of dprc's IRQs
-+ */
-+#define DPRC_NUM_OF_IRQS 1
-+
-+/* DPRC IRQ events */
-+/**
-+ * IRQ event - Indicates that a new object added to the container
-+ */
-+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
-+/**
-+ * IRQ event - Indicates that an object was removed from the container
-+ */
-+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
-+/**
-+ * IRQ event - Indicates that resources added to the container
-+ */
-+#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
-+/**
-+ * IRQ event - Indicates that resources removed from the container
-+ */
-+#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
-+/**
-+ * IRQ event - Indicates that one of the descendant containers that opened by
-+ * this container is destroyed
-+ */
-+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-+/**
-+ * IRQ event - Indicates that on one of the container's opened object is
-+ * destroyed
-+ */
-+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
-+/**
-+ * Irq event - Indicates that object is created at the container
-+ */
-+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
-+
-+/**
-+ * struct dprc_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dprc_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+/**
-+ * dprc_get_irq() - Get IRQ information from the DPRC.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+/**
-+ * dprc_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dprc_get_irq_enable() - Get overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dprc_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dprc_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dprc_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dprc_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dprc_attributes - Container attributes
-+ * @container_id: Container's ID
-+ * @icid: Container's ICID
-+ * @portal_id: Container's portal ID
-+ * @options: Container's options as set at container's creation
-+ * @version: DPRC version
-+ */
-+struct dprc_attributes {
-+ int container_id;
-+ uint16_t icid;
-+ int portal_id;
-+ uint64_t options;
-+ /**
-+ * struct version - DPRC version
-+ * @major: DPRC major version
-+ * @minor: DPRC minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dprc_get_attributes() - Obtains container attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @attributes: Returned container attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprc_attributes *attributes);
-+
-+/**
-+ * dprc_set_res_quota() - Set allocation policy for a specific resource/object
-+ * type in a child container
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @child_container_id: ID of the child container
-+ * @type: Resource/object type
-+ * @quota: Sets the maximum number of resources of the selected type
-+ * that the child container is allowed to allocate from its parent;
-+ * when quota is set to -1, the policy is the same as container's
-+ * general policy.
-+ *
-+ * Allocation policy determines whether or not a container may allocate
-+ * resources from its parent. Each container has a 'global' allocation policy
-+ * that is set when the container is created.
-+ *
-+ * This function sets allocation policy for a specific resource type.
-+ * The default policy for all resource types matches the container's 'global'
-+ * allocation policy.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Only the parent container is allowed to change a child policy.
-+ */
-+int dprc_set_res_quota(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id,
-+ char *type,
-+ uint16_t quota);
-+
-+/**
-+ * dprc_get_res_quota() - Gets the allocation policy of a specific
-+ * resource/object type in a child container
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @child_container_id: ID of the child container
-+ * @type: resource/object type
-+ * @quota: Returnes the maximum number of resources of the selected type
-+ * that the child container is allowed to allocate from the parent;
-+ * when quota is set to -1, the policy is the same as container's
-+ * general policy.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_res_quota(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id,
-+ char *type,
-+ uint16_t *quota);
-+
-+/* Resource request options */
-+
-+/**
-+ * Explicit resource ID request - The requested objects/resources
-+ * are explicit and sequential (in case of resources).
-+ * The base ID is given at res_req at base_align field
-+ */
-+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
-+
-+/**
-+ * Aligned resources request - Relevant only for resources
-+ * request (and not objects). Indicates that resources base ID should be
-+ * sequential and aligned to the value given at dprc_res_req base_align field
-+ */
-+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
-+
-+/**
-+ * Plugged Flag - Relevant only for object assignment request.
-+ * Indicates that after all objects assigned. An interrupt will be invoked at
-+ * the relevant GPP. The assigned object will be marked as plugged.
-+ * plugged objects can't be assigned from their container
-+ */
-+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
-+
-+/**
-+ * struct dprc_res_req - Resource request descriptor, to be used in assignment
-+ * or un-assignment of resources and objects.
-+ * @type: Resource/object type: Represent as a NULL terminated string.
-+ * This string may received by using dprc_get_pool() to get resource
-+ * type and dprc_get_obj() to get object type;
-+ * Note: it is not possible to assign/un-assign DPRC objects
-+ * @num: Number of resources
-+ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
-+ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
-+ * is set at option), this field represents the required base ID
-+ * for resource allocation; In case of aligned assignment
-+ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
-+ * indicates the required alignment for the resource ID(s) -
-+ * use 0 if there is no alignment or explicit ID requirements
-+ */
-+struct dprc_res_req {
-+ char type[16];
-+ uint32_t num;
-+ uint32_t options;
-+ int id_base_align;
-+};
-+
-+/**
-+ * dprc_assign() - Assigns objects or resource to a child container.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @container_id: ID of the child container
-+ * @res_req: Describes the type and amount of resources to
-+ * assign to the given container
-+ *
-+ * Assignment is usually done by a parent (this DPRC) to one of its child
-+ * containers.
-+ *
-+ * According to the DPRC allocation policy, the assigned resources may be taken
-+ * (allocated) from the container's ancestors, if not enough resources are
-+ * available in the container itself.
-+ *
-+ * The type of assignment depends on the dprc_res_req options, as follows:
-+ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
-+ * the explicit base ID specified at the id_base_align field of res_req.
-+ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
-+ * aligned to the value given at id_base_align field of res_req.
-+ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
-+ * and indicates that the object must be set to the plugged state.
-+ *
-+ * A container may use this function with its own ID in order to change a
-+ * object state to plugged or unplugged.
-+ *
-+ * If IRQ information has been set in the child DPRC, it will signal an
-+ * interrupt following every change in its object assignment.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_assign(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int container_id,
-+ struct dprc_res_req *res_req);
-+
-+/**
-+ * dprc_unassign() - Un-assigns objects or resources from a child container
-+ * and moves them into this (parent) DPRC.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @child_container_id: ID of the child container
-+ * @res_req: Describes the type and amount of resources to un-assign from
-+ * the child container
-+ *
-+ * Un-assignment of objects can succeed only if the object is not in the
-+ * plugged or opened state.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_unassign(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int child_container_id,
-+ struct dprc_res_req *res_req);
-+
-+/**
-+ * dprc_get_pool_count() - Get the number of dprc's pools
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @pool_count: Returned number of resource pools in the dprc
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_pool_count(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *pool_count);
-+
-+/**
-+ * dprc_get_pool() - Get the type (string) of a certain dprc's pool
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @pool_index: Index of the pool to be queried (< pool_count)
-+ * @type: The type of the pool
-+ *
-+ * The pool types retrieved one by one by incrementing
-+ * pool_index up to (not including) the value of pool_count returned
-+ * from dprc_get_pool_count(). dprc_get_pool_count() must
-+ * be called prior to dprc_get_pool().
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_pool(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int pool_index,
-+ char *type);
-+
-+/**
-+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_count: Number of objects assigned to the DPRC
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *obj_count);
-+
-+/**
-+ * Objects Attributes Flags
-+ */
-+
-+/**
-+ * Opened state - Indicates that an object is open by at least one owner
-+ */
-+#define DPRC_OBJ_STATE_OPEN 0x00000001
-+/**
-+ * Plugged state - Indicates that the object is plugged
-+ */
-+#define DPRC_OBJ_STATE_PLUGGED 0x00000002
-+
-+/**
-+ * Shareability flag - Object flag indicating no memory shareability.
-+ * the object generates memory accesses that are non coherent with other
-+ * masters;
-+ * user is responsible for proper memory handling through IOMMU configuration.
-+ */
-+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
-+
-+/**
-+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
-+ * @type: Type of object: NULL terminated string
-+ * @id: ID of logical object resource
-+ * @vendor: Object vendor identifier
-+ * @ver_major: Major version number
-+ * @ver_minor: Minor version number
-+ * @irq_count: Number of interrupts supported by the object
-+ * @region_count: Number of mappable regions supported by the object
-+ * @state: Object state: combination of DPRC_OBJ_STATE_ states
-+ * @label: Object label
-+ * @flags: Object's flags
-+ */
-+struct dprc_obj_desc {
-+ char type[16];
-+ int id;
-+ uint16_t vendor;
-+ uint16_t ver_major;
-+ uint16_t ver_minor;
-+ uint8_t irq_count;
-+ uint8_t region_count;
-+ uint32_t state;
-+ char label[16];
-+ uint16_t flags;
-+};
-+
-+/**
-+ * dprc_get_obj() - Get general information on an object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_index: Index of the object to be queried (< obj_count)
-+ * @obj_desc: Returns the requested object descriptor
-+ *
-+ * The object descriptors are retrieved one by one by incrementing
-+ * obj_index up to (not including) the value of obj_count returned
-+ * from dprc_get_obj_count(). dprc_get_obj_count() must
-+ * be called prior to dprc_get_obj().
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int obj_index,
-+ struct dprc_obj_desc *obj_desc);
-+
-+/**
-+ * dprc_get_obj_desc() - Get object descriptor.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type: The type of the object to get its descriptor.
-+ * @obj_id: The id of the object to get its descriptor
-+ * @obj_desc: The returned descriptor to fill and return to the user
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ */
-+int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ struct dprc_obj_desc *obj_desc);
-+
-+/**
-+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type: Type of the object to set its IRQ
-+ * @obj_id: ID of the object to set its IRQ
-+ * @irq_index: The interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ uint8_t irq_index,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+/**
-+ * dprc_get_obj_irq() - Get IRQ information from object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type: Type od the object to get its IRQ
-+ * @obj_id: ID of the object to get its IRQ
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: The returned IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+/**
-+ * dprc_get_res_count() - Obtains the number of free resources that are
-+ * assigned to this container, by pool type
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @type: pool type
-+ * @res_count: Returned number of free resources of the given
-+ * resource type that are assigned to this DPRC
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_res_count(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *type,
-+ int *res_count);
-+
-+/**
-+ * enum dprc_iter_status - Iteration status
-+ * @DPRC_ITER_STATUS_FIRST: Perform first iteration
-+ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
-+ * @DPRC_ITER_STATUS_LAST: Indicates last iteration
-+ */
-+enum dprc_iter_status {
-+ DPRC_ITER_STATUS_FIRST = 0,
-+ DPRC_ITER_STATUS_MORE = 1,
-+ DPRC_ITER_STATUS_LAST = 2
-+};
-+
-+/**
-+ * struct dprc_res_ids_range_desc - Resource ID range descriptor
-+ * @base_id: Base resource ID of this range
-+ * @last_id: Last resource ID of this range
-+ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
-+ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
-+ * additional iterations are needed, until the returned marker is
-+ * DPRC_ITER_STATUS_LAST
-+ */
-+struct dprc_res_ids_range_desc {
-+ int base_id;
-+ int last_id;
-+ enum dprc_iter_status iter_status;
-+};
-+
-+/**
-+ * dprc_get_res_ids() - Obtains IDs of free resources in the container
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @type: pool type
-+ * @range_desc: range descriptor
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *type,
-+ struct dprc_res_ids_range_desc *range_desc);
-+
-+/**
-+ * Region flags
-+ */
-+/**
-+ * Cacheable - Indicates that region should be mapped as cacheable
-+ */
-+#define DPRC_REGION_CACHEABLE 0x00000001
-+
-+/**
-+ * enum dprc_region_type - Region type
-+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
-+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
-+ */
-+enum dprc_region_type {
-+ DPRC_REGION_TYPE_MC_PORTAL,
-+ DPRC_REGION_TYPE_QBMAN_PORTAL
-+};
-+
-+/**
-+ * struct dprc_region_desc - Mappable region descriptor
-+ * @base_offset: Region offset from region's base address.
-+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
-+ * base address; For DPIO, region base is offset from SoC QMan portals
-+ * base address
-+ * @size: Region size (in bytes)
-+ * @flags: Region attributes
-+ * @type: Portal region type
-+ */
-+struct dprc_region_desc {
-+ uint32_t base_offset;
-+ uint32_t size;
-+ uint32_t flags;
-+ enum dprc_region_type type;
-+};
-+
-+/**
-+ * dprc_get_obj_region() - Get region information for a specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type: Object type as returned in dprc_get_obj()
-+ * @obj_id: Unique object instance as returned in dprc_get_obj()
-+ * @region_index: The specific region to query
-+ * @region_desc: Returns the requested region descriptor
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ uint8_t region_index,
-+ struct dprc_region_desc *region_desc);
-+
-+/**
-+ * dprc_set_obj_label() - Set object label.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @obj_type: Object's type
-+ * @obj_id: Object's ID
-+ * @label: The required label. The maximum length is 16 chars.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_set_obj_label(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ char *obj_type,
-+ int obj_id,
-+ char *label);
-+
-+/**
-+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
-+ * operations
-+ * @type: Endpoint object type: NULL terminated string
-+ * @id: Endpoint object ID
-+ * @if_id: Interface ID; should be set for endpoints with multiple
-+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
-+ */
-+struct dprc_endpoint {
-+ char type[16];
-+ int id;
-+ uint16_t if_id;
-+};
-+
-+/**
-+ * struct dprc_connection_cfg - Connection configuration.
-+ * Used for virtual connections only
-+ * @committed_rate: Committed rate (Mbits/s)
-+ * @max_rate: Maximum rate (Mbits/s)
-+ */
-+struct dprc_connection_cfg {
-+ uint32_t committed_rate;
-+ uint32_t max_rate;
-+};
-+
-+/**
-+ * dprc_connect() - Connect two endpoints to create a network link between them
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @endpoint1: Endpoint 1 configuration parameters
-+ * @endpoint2: Endpoint 2 configuration parameters
-+ * @cfg: Connection configuration. The connection configuration is ignored for
-+ * connections made to DPMAC objects, where rate is retrieved from the
-+ * MAC configuration.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_connect(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dprc_endpoint *endpoint1,
-+ const struct dprc_endpoint *endpoint2,
-+ const struct dprc_connection_cfg *cfg);
-+
-+/**
-+ * dprc_disconnect() - Disconnect one endpoint to remove its network connection
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRC object
-+ * @endpoint: Endpoint configuration parameters
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprc_disconnect(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dprc_endpoint *endpoint);
-+
-+/**
-+* dprc_get_connection() - Get connected endpoint and link status if connection
-+* exists.
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPRC object
-+* @endpoint1: Endpoint 1 configuration parameters
-+* @endpoint2: Returned endpoint 2 configuration parameters
-+* @state: Returned link state:
-+* 1 - link is up;
-+* 0 - link is down;
-+* -1 - no connection (endpoint2 information is irrelevant)
-+*
-+* Return: '0' on Success; -ENAVAIL if connection does not exist.
-+*/
-+int dprc_get_connection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dprc_endpoint *endpoint1,
-+ struct dprc_endpoint *endpoint2,
-+ int *state);
-+
-+#endif /* _FSL_DPRC_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dprc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h
-new file mode 100644
-index 0000000..469e286
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dprc_cmd.h
-@@ -0,0 +1,755 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPRC_CMD_H
-+#define _FSL_DPRC_CMD_H
-+
-+/* DPRC Version */
-+#define DPRC_VER_MAJOR 5
-+#define DPRC_VER_MINOR 1
-+
-+/* Command IDs */
-+#define DPRC_CMDID_CLOSE 0x800
-+#define DPRC_CMDID_OPEN 0x805
-+#define DPRC_CMDID_CREATE 0x905
-+
-+#define DPRC_CMDID_GET_ATTR 0x004
-+#define DPRC_CMDID_RESET_CONT 0x005
-+
-+#define DPRC_CMDID_SET_IRQ 0x010
-+#define DPRC_CMDID_GET_IRQ 0x011
-+#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPRC_CMDID_SET_IRQ_MASK 0x014
-+#define DPRC_CMDID_GET_IRQ_MASK 0x015
-+#define DPRC_CMDID_GET_IRQ_STATUS 0x016
-+#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPRC_CMDID_CREATE_CONT 0x151
-+#define DPRC_CMDID_DESTROY_CONT 0x152
-+#define DPRC_CMDID_GET_CONT_ID 0x830
-+#define DPRC_CMDID_SET_RES_QUOTA 0x155
-+#define DPRC_CMDID_GET_RES_QUOTA 0x156
-+#define DPRC_CMDID_ASSIGN 0x157
-+#define DPRC_CMDID_UNASSIGN 0x158
-+#define DPRC_CMDID_GET_OBJ_COUNT 0x159
-+#define DPRC_CMDID_GET_OBJ 0x15A
-+#define DPRC_CMDID_GET_RES_COUNT 0x15B
-+#define DPRC_CMDID_GET_RES_IDS 0x15C
-+#define DPRC_CMDID_GET_OBJ_REG 0x15E
-+#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
-+#define DPRC_CMDID_GET_OBJ_IRQ 0x160
-+#define DPRC_CMDID_SET_OBJ_LABEL 0x161
-+#define DPRC_CMDID_GET_OBJ_DESC 0x162
-+
-+#define DPRC_CMDID_CONNECT 0x167
-+#define DPRC_CMDID_DISCONNECT 0x168
-+#define DPRC_CMDID_GET_POOL 0x169
-+#define DPRC_CMDID_GET_POOL_COUNT 0x16A
-+
-+#define DPRC_CMDID_GET_CONNECTION 0x16C
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_CONTAINER_ID(cmd, container_id) \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, container_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_OPEN(cmd, container_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_CREATE_CONTAINER(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->icid); \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->options); \
-+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->portal_id); \
-+ MC_CMD_OP(cmd, 2, 0, 8, char, cfg->label[0]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, cfg->label[1]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, cfg->label[2]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, cfg->label[3]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, cfg->label[4]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, cfg->label[5]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, cfg->label[6]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, cfg->label[7]);\
-+ MC_CMD_OP(cmd, 3, 0, 8, char, cfg->label[8]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, cfg->label[9]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, cfg->label[10]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, cfg->label[11]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, cfg->label[12]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, cfg->label[13]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, cfg->label[14]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, cfg->label[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_CREATE_CONTAINER(cmd, child_container_id, child_portal_offset)\
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 32, int, child_container_id); \
-+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, child_portal_offset);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_DESTROY_CONTAINER(cmd, child_container_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_RESET_CONTAINER(cmd, child_container_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->container_id); \
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->icid); \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\
-+ MC_RSP_OP(cmd, 1, 32, 32, int, attr->portal_id); \
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_SET_RES_QUOTA(cmd, child_container_id, type, quota) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, quota);\
-+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_RES_QUOTA(cmd, child_container_id, type) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \
-+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
-+} while (0)
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_RES_QUOTA(cmd, quota) \
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, quota)
-+
-+/* param, offset, width, type, arg_name */
-+#define DPRC_CMD_ASSIGN(cmd, container_id, res_req) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, container_id); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \
-+ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \
-+ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\
-+ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\
-+} while (0)
-+
-+/* param, offset, width, type, arg_name */
-+#define DPRC_CMD_UNASSIGN(cmd, child_container_id, res_req) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, child_container_id); \
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, res_req->options);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, res_req->num); \
-+ MC_CMD_OP(cmd, 1, 32, 32, int, res_req->id_base_align); \
-+ MC_CMD_OP(cmd, 2, 0, 8, char, res_req->type[0]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, res_req->type[1]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, res_req->type[2]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, res_req->type[3]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, res_req->type[4]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, res_req->type[5]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, res_req->type[6]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, res_req->type[7]);\
-+ MC_CMD_OP(cmd, 3, 0, 8, char, res_req->type[8]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, res_req->type[9]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, res_req->type[10]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, res_req->type[11]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, res_req->type[12]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, res_req->type[13]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, res_req->type[14]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, res_req->type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_POOL_COUNT(cmd, pool_count) \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, pool_count)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_POOL(cmd, pool_index) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, pool_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_POOL(cmd, type) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 8, char, type[0]);\
-+ MC_RSP_OP(cmd, 1, 8, 8, char, type[1]);\
-+ MC_RSP_OP(cmd, 1, 16, 8, char, type[2]);\
-+ MC_RSP_OP(cmd, 1, 24, 8, char, type[3]);\
-+ MC_RSP_OP(cmd, 1, 32, 8, char, type[4]);\
-+ MC_RSP_OP(cmd, 1, 40, 8, char, type[5]);\
-+ MC_RSP_OP(cmd, 1, 48, 8, char, type[6]);\
-+ MC_RSP_OP(cmd, 1, 56, 8, char, type[7]);\
-+ MC_RSP_OP(cmd, 2, 0, 8, char, type[8]);\
-+ MC_RSP_OP(cmd, 2, 8, 8, char, type[9]);\
-+ MC_RSP_OP(cmd, 2, 16, 8, char, type[10]);\
-+ MC_RSP_OP(cmd, 2, 24, 8, char, type[11]);\
-+ MC_RSP_OP(cmd, 2, 32, 8, char, type[12]);\
-+ MC_RSP_OP(cmd, 2, 40, 8, char, type[13]);\
-+ MC_RSP_OP(cmd, 2, 48, 8, char, type[14]);\
-+ MC_RSP_OP(cmd, 2, 56, 8, char, type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_OBJ_COUNT(cmd, obj_count) \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_count)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_OBJ(cmd, obj_index) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_OBJ(cmd, obj_desc) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \
-+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \
-+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\
-+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\
-+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \
-+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\
-+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\
-+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\
-+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\
-+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\
-+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\
-+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\
-+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\
-+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\
-+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\
-+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\
-+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\
-+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\
-+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\
-+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\
-+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\
-+ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\
-+ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\
-+ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\
-+ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\
-+ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\
-+ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\
-+ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\
-+ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\
-+ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\
-+ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\
-+ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\
-+ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\
-+ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\
-+ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\
-+ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\
-+ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_OBJ_DESC(cmd, obj_type, obj_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id);\
-+ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_OBJ_DESC(cmd, obj_desc) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, obj_desc->id); \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, obj_desc->vendor); \
-+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, obj_desc->irq_count); \
-+ MC_RSP_OP(cmd, 1, 24, 8, uint8_t, obj_desc->region_count); \
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, obj_desc->state);\
-+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, obj_desc->ver_major);\
-+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, obj_desc->ver_minor);\
-+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, obj_desc->flags); \
-+ MC_RSP_OP(cmd, 3, 0, 8, char, obj_desc->type[0]);\
-+ MC_RSP_OP(cmd, 3, 8, 8, char, obj_desc->type[1]);\
-+ MC_RSP_OP(cmd, 3, 16, 8, char, obj_desc->type[2]);\
-+ MC_RSP_OP(cmd, 3, 24, 8, char, obj_desc->type[3]);\
-+ MC_RSP_OP(cmd, 3, 32, 8, char, obj_desc->type[4]);\
-+ MC_RSP_OP(cmd, 3, 40, 8, char, obj_desc->type[5]);\
-+ MC_RSP_OP(cmd, 3, 48, 8, char, obj_desc->type[6]);\
-+ MC_RSP_OP(cmd, 3, 56, 8, char, obj_desc->type[7]);\
-+ MC_RSP_OP(cmd, 4, 0, 8, char, obj_desc->type[8]);\
-+ MC_RSP_OP(cmd, 4, 8, 8, char, obj_desc->type[9]);\
-+ MC_RSP_OP(cmd, 4, 16, 8, char, obj_desc->type[10]);\
-+ MC_RSP_OP(cmd, 4, 24, 8, char, obj_desc->type[11]);\
-+ MC_RSP_OP(cmd, 4, 32, 8, char, obj_desc->type[12]);\
-+ MC_RSP_OP(cmd, 4, 40, 8, char, obj_desc->type[13]);\
-+ MC_RSP_OP(cmd, 4, 48, 8, char, obj_desc->type[14]);\
-+ MC_RSP_OP(cmd, 4, 56, 8, char, obj_desc->type[15]);\
-+ MC_RSP_OP(cmd, 5, 0, 8, char, obj_desc->label[0]);\
-+ MC_RSP_OP(cmd, 5, 8, 8, char, obj_desc->label[1]);\
-+ MC_RSP_OP(cmd, 5, 16, 8, char, obj_desc->label[2]);\
-+ MC_RSP_OP(cmd, 5, 24, 8, char, obj_desc->label[3]);\
-+ MC_RSP_OP(cmd, 5, 32, 8, char, obj_desc->label[4]);\
-+ MC_RSP_OP(cmd, 5, 40, 8, char, obj_desc->label[5]);\
-+ MC_RSP_OP(cmd, 5, 48, 8, char, obj_desc->label[6]);\
-+ MC_RSP_OP(cmd, 5, 56, 8, char, obj_desc->label[7]);\
-+ MC_RSP_OP(cmd, 6, 0, 8, char, obj_desc->label[8]);\
-+ MC_RSP_OP(cmd, 6, 8, 8, char, obj_desc->label[9]);\
-+ MC_RSP_OP(cmd, 6, 16, 8, char, obj_desc->label[10]);\
-+ MC_RSP_OP(cmd, 6, 24, 8, char, obj_desc->label[11]);\
-+ MC_RSP_OP(cmd, 6, 32, 8, char, obj_desc->label[12]);\
-+ MC_RSP_OP(cmd, 6, 40, 8, char, obj_desc->label[13]);\
-+ MC_RSP_OP(cmd, 6, 48, 8, char, obj_desc->label[14]);\
-+ MC_RSP_OP(cmd, 6, 56, 8, char, obj_desc->label[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_RES_COUNT(cmd, type) \
-+do { \
-+ MC_CMD_OP(cmd, 1, 0, 8, char, type[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, char, type[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, char, type[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, char, type[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, char, type[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, char, type[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, char, type[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, char, type[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, type[8]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, type[9]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, type[10]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, type[11]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, type[12]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, type[13]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, type[14]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_RES_COUNT(cmd, res_count) \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, res_count)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_RES_IDS(cmd, range_desc, type) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 42, 7, enum dprc_iter_status, \
-+ range_desc->iter_status); \
-+ MC_CMD_OP(cmd, 1, 0, 32, int, range_desc->base_id); \
-+ MC_CMD_OP(cmd, 1, 32, 32, int, range_desc->last_id);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, type[0]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, type[1]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, type[2]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, type[3]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, type[4]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, type[5]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, type[6]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, type[7]);\
-+ MC_CMD_OP(cmd, 3, 0, 8, char, type[8]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, type[9]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, type[10]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, type[11]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, type[12]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, type[13]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, type[14]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_RES_IDS(cmd, range_desc) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 42, 7, enum dprc_iter_status, \
-+ range_desc->iter_status);\
-+ MC_RSP_OP(cmd, 1, 0, 32, int, range_desc->base_id); \
-+ MC_RSP_OP(cmd, 1, 32, 32, int, range_desc->last_id);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_OBJ_REGION(cmd, obj_type, obj_id, region_index) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, region_index);\
-+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
-+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
-+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
-+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
-+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
-+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
-+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
-+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
-+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
-+} while (0)
-+
-+/* param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_OBJ_REGION(cmd, region_desc) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, region_desc->base_offset);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, region_desc->size); \
-+ MC_RSP_OP(cmd, 2, 32, 4, enum dprc_region_type, region_desc->type);\
-+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, region_desc->flags);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_SET_OBJ_LABEL(cmd, obj_type, obj_id, label) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
-+ MC_CMD_OP(cmd, 1, 0, 8, char, label[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, char, label[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, char, label[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, char, label[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, char, label[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, char, label[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, char, label[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, char, label[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, label[8]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, label[9]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, label[10]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, label[11]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, label[12]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, label[13]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, label[14]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, label[15]);\
-+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
-+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
-+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
-+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
-+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
-+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
-+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
-+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
-+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_SET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_CMD_OP(cmd, 2, 32, 32, int, obj_id); \
-+ MC_CMD_OP(cmd, 3, 0, 8, char, obj_type[0]);\
-+ MC_CMD_OP(cmd, 3, 8, 8, char, obj_type[1]);\
-+ MC_CMD_OP(cmd, 3, 16, 8, char, obj_type[2]);\
-+ MC_CMD_OP(cmd, 3, 24, 8, char, obj_type[3]);\
-+ MC_CMD_OP(cmd, 3, 32, 8, char, obj_type[4]);\
-+ MC_CMD_OP(cmd, 3, 40, 8, char, obj_type[5]);\
-+ MC_CMD_OP(cmd, 3, 48, 8, char, obj_type[6]);\
-+ MC_CMD_OP(cmd, 3, 56, 8, char, obj_type[7]);\
-+ MC_CMD_OP(cmd, 4, 0, 8, char, obj_type[8]);\
-+ MC_CMD_OP(cmd, 4, 8, 8, char, obj_type[9]);\
-+ MC_CMD_OP(cmd, 4, 16, 8, char, obj_type[10]);\
-+ MC_CMD_OP(cmd, 4, 24, 8, char, obj_type[11]);\
-+ MC_CMD_OP(cmd, 4, 32, 8, char, obj_type[12]);\
-+ MC_CMD_OP(cmd, 4, 40, 8, char, obj_type[13]);\
-+ MC_CMD_OP(cmd, 4, 48, 8, char, obj_type[14]);\
-+ MC_CMD_OP(cmd, 4, 56, 8, char, obj_type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_OBJ_IRQ(cmd, obj_type, obj_id, irq_index) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, obj_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+ MC_CMD_OP(cmd, 1, 0, 8, char, obj_type[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, char, obj_type[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, char, obj_type[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, char, obj_type[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, char, obj_type[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, char, obj_type[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, char, obj_type[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, char, obj_type[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 8, char, obj_type[8]);\
-+ MC_CMD_OP(cmd, 2, 8, 8, char, obj_type[9]);\
-+ MC_CMD_OP(cmd, 2, 16, 8, char, obj_type[10]);\
-+ MC_CMD_OP(cmd, 2, 24, 8, char, obj_type[11]);\
-+ MC_CMD_OP(cmd, 2, 32, 8, char, obj_type[12]);\
-+ MC_CMD_OP(cmd, 2, 40, 8, char, obj_type[13]);\
-+ MC_CMD_OP(cmd, 2, 48, 8, char, obj_type[14]);\
-+ MC_CMD_OP(cmd, 2, 56, 8, char, obj_type[15]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_OBJ_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_CONNECT(cmd, endpoint1, endpoint2, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \
-+ MC_CMD_OP(cmd, 1, 0, 32, int, endpoint2->id); \
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, endpoint2->if_id); \
-+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[0]); \
-+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[1]); \
-+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[2]); \
-+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[3]); \
-+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[4]); \
-+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[5]); \
-+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[6]); \
-+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[7]); \
-+ MC_CMD_OP(cmd, 3, 0, 8, char, endpoint1->type[8]); \
-+ MC_CMD_OP(cmd, 3, 8, 8, char, endpoint1->type[9]); \
-+ MC_CMD_OP(cmd, 3, 16, 8, char, endpoint1->type[10]); \
-+ MC_CMD_OP(cmd, 3, 24, 8, char, endpoint1->type[11]); \
-+ MC_CMD_OP(cmd, 3, 32, 8, char, endpoint1->type[12]); \
-+ MC_CMD_OP(cmd, 3, 40, 8, char, endpoint1->type[13]); \
-+ MC_CMD_OP(cmd, 3, 48, 8, char, endpoint1->type[14]); \
-+ MC_CMD_OP(cmd, 3, 56, 8, char, endpoint1->type[15]); \
-+ MC_CMD_OP(cmd, 4, 0, 32, uint32_t, cfg->max_rate); \
-+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->committed_rate); \
-+ MC_CMD_OP(cmd, 5, 0, 8, char, endpoint2->type[0]); \
-+ MC_CMD_OP(cmd, 5, 8, 8, char, endpoint2->type[1]); \
-+ MC_CMD_OP(cmd, 5, 16, 8, char, endpoint2->type[2]); \
-+ MC_CMD_OP(cmd, 5, 24, 8, char, endpoint2->type[3]); \
-+ MC_CMD_OP(cmd, 5, 32, 8, char, endpoint2->type[4]); \
-+ MC_CMD_OP(cmd, 5, 40, 8, char, endpoint2->type[5]); \
-+ MC_CMD_OP(cmd, 5, 48, 8, char, endpoint2->type[6]); \
-+ MC_CMD_OP(cmd, 5, 56, 8, char, endpoint2->type[7]); \
-+ MC_CMD_OP(cmd, 6, 0, 8, char, endpoint2->type[8]); \
-+ MC_CMD_OP(cmd, 6, 8, 8, char, endpoint2->type[9]); \
-+ MC_CMD_OP(cmd, 6, 16, 8, char, endpoint2->type[10]); \
-+ MC_CMD_OP(cmd, 6, 24, 8, char, endpoint2->type[11]); \
-+ MC_CMD_OP(cmd, 6, 32, 8, char, endpoint2->type[12]); \
-+ MC_CMD_OP(cmd, 6, 40, 8, char, endpoint2->type[13]); \
-+ MC_CMD_OP(cmd, 6, 48, 8, char, endpoint2->type[14]); \
-+ MC_CMD_OP(cmd, 6, 56, 8, char, endpoint2->type[15]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_DISCONNECT(cmd, endpoint) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint->id); \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint->if_id); \
-+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint->type[0]); \
-+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint->type[1]); \
-+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint->type[2]); \
-+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint->type[3]); \
-+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint->type[4]); \
-+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint->type[5]); \
-+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint->type[6]); \
-+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint->type[7]); \
-+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint->type[8]); \
-+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint->type[9]); \
-+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint->type[10]); \
-+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint->type[11]); \
-+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint->type[12]); \
-+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint->type[13]); \
-+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint->type[14]); \
-+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint->type[15]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_CMD_GET_CONNECTION(cmd, endpoint1) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, endpoint1->id); \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, endpoint1->if_id); \
-+ MC_CMD_OP(cmd, 1, 0, 8, char, endpoint1->type[0]); \
-+ MC_CMD_OP(cmd, 1, 8, 8, char, endpoint1->type[1]); \
-+ MC_CMD_OP(cmd, 1, 16, 8, char, endpoint1->type[2]); \
-+ MC_CMD_OP(cmd, 1, 24, 8, char, endpoint1->type[3]); \
-+ MC_CMD_OP(cmd, 1, 32, 8, char, endpoint1->type[4]); \
-+ MC_CMD_OP(cmd, 1, 40, 8, char, endpoint1->type[5]); \
-+ MC_CMD_OP(cmd, 1, 48, 8, char, endpoint1->type[6]); \
-+ MC_CMD_OP(cmd, 1, 56, 8, char, endpoint1->type[7]); \
-+ MC_CMD_OP(cmd, 2, 0, 8, char, endpoint1->type[8]); \
-+ MC_CMD_OP(cmd, 2, 8, 8, char, endpoint1->type[9]); \
-+ MC_CMD_OP(cmd, 2, 16, 8, char, endpoint1->type[10]); \
-+ MC_CMD_OP(cmd, 2, 24, 8, char, endpoint1->type[11]); \
-+ MC_CMD_OP(cmd, 2, 32, 8, char, endpoint1->type[12]); \
-+ MC_CMD_OP(cmd, 2, 40, 8, char, endpoint1->type[13]); \
-+ MC_CMD_OP(cmd, 2, 48, 8, char, endpoint1->type[14]); \
-+ MC_CMD_OP(cmd, 2, 56, 8, char, endpoint1->type[15]); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRC_RSP_GET_CONNECTION(cmd, endpoint2, state) \
-+do { \
-+ MC_RSP_OP(cmd, 3, 0, 32, int, endpoint2->id); \
-+ MC_RSP_OP(cmd, 3, 32, 16, uint16_t, endpoint2->if_id); \
-+ MC_RSP_OP(cmd, 4, 0, 8, char, endpoint2->type[0]); \
-+ MC_RSP_OP(cmd, 4, 8, 8, char, endpoint2->type[1]); \
-+ MC_RSP_OP(cmd, 4, 16, 8, char, endpoint2->type[2]); \
-+ MC_RSP_OP(cmd, 4, 24, 8, char, endpoint2->type[3]); \
-+ MC_RSP_OP(cmd, 4, 32, 8, char, endpoint2->type[4]); \
-+ MC_RSP_OP(cmd, 4, 40, 8, char, endpoint2->type[5]); \
-+ MC_RSP_OP(cmd, 4, 48, 8, char, endpoint2->type[6]); \
-+ MC_RSP_OP(cmd, 4, 56, 8, char, endpoint2->type[7]); \
-+ MC_RSP_OP(cmd, 5, 0, 8, char, endpoint2->type[8]); \
-+ MC_RSP_OP(cmd, 5, 8, 8, char, endpoint2->type[9]); \
-+ MC_RSP_OP(cmd, 5, 16, 8, char, endpoint2->type[10]); \
-+ MC_RSP_OP(cmd, 5, 24, 8, char, endpoint2->type[11]); \
-+ MC_RSP_OP(cmd, 5, 32, 8, char, endpoint2->type[12]); \
-+ MC_RSP_OP(cmd, 5, 40, 8, char, endpoint2->type[13]); \
-+ MC_RSP_OP(cmd, 5, 48, 8, char, endpoint2->type[14]); \
-+ MC_RSP_OP(cmd, 5, 56, 8, char, endpoint2->type[15]); \
-+ MC_RSP_OP(cmd, 6, 0, 32, int, state); \
-+} while (0)
-+
-+#endif /* _FSL_DPRC_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dprtc.h b/drivers/net/dpaa2/mc/fsl_dprtc.h
-new file mode 100644
-index 0000000..2eb6edc
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dprtc.h
-@@ -0,0 +1,434 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPRTC_H
-+#define __FSL_DPRTC_H
-+
-+/* Data Path Real Time Counter API
-+ * Contains initialization APIs and runtime control APIs for RTC
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * Number of irq's
-+ */
-+#define DPRTC_MAX_IRQ_NUM 1
-+#define DPRTC_IRQ_INDEX 0
-+
-+/**
-+ * Interrupt event masks:
-+ */
-+
-+/**
-+ * Interrupt event mask indicating alarm event had occured
-+ */
-+#define DPRTC_EVENT_ALARM 0x40000000
-+/**
-+ * Interrupt event mask indicating periodic pulse event had occured
-+ */
-+#define DPRTC_EVENT_PPS 0x08000000
-+
-+/**
-+ * dprtc_open() - Open a control session for the specified object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dprtc_id: DPRTC unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dprtc_create function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dprtc_id,
-+ uint16_t *token);
-+
-+/**
-+ * dprtc_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dprtc_cfg - Structure representing DPRTC configuration
-+ * @options: place holder
-+ */
-+struct dprtc_cfg {
-+ uint32_t options;
-+};
-+
-+/**
-+ * dprtc_create() - Create the DPRTC object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPRTC object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dprtc_open function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dprtc_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dprtc_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dprtc_set_clock_offset() - Sets the clock's offset
-+ * (usually relative to another clock).
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @offset: New clock offset (in nanoseconds).
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int64_t offset);
-+
-+/**
-+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @freq_compensation:
-+ * The new frequency compensation value to set.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t freq_compensation);
-+
-+/**
-+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @freq_compensation:
-+ * Frequency compensation value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint32_t *freq_compensation);
-+
-+/**
-+ * dprtc_get_time() - Returns the current RTC time.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @time: Current RTC time.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t *time);
-+
-+/**
-+ * dprtc_set_time() - Updates current RTC time.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @time: New RTC time.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_time(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time);
-+
-+/**
-+ * dprtc_set_alarm() - Defines and sets alarm.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @time: In nanoseconds, the time when the alarm
-+ * should go off - must be a multiple of
-+ * 1 microsecond
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_alarm(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint64_t time);
-+
-+/**
-+ * struct dprtc_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dprtc_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dprtc_set_irq() - Set IRQ information for the DPRTC to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dprtc_irq_cfg *irq_cfg);
-+
-+/**
-+ * dprtc_get_irq() - Get IRQ information from the DPRTC.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dprtc_irq_cfg *irq_cfg);
-+
-+/**
-+ * dprtc_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dprtc_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dprtc_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dprtc_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dprtc_attr - Structure representing DPRTC attributes
-+ * @id: DPRTC object ID
-+ * @version: DPRTC version
-+ */
-+struct dprtc_attr {
-+ int id;
-+ /**
-+ * struct version - Structure representing DPRTC version
-+ * @major: DPRTC major version
-+ * @minor: DPRTC minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+};
-+
-+/**
-+ * dprtc_get_attributes - Retrieve DPRTC attributes.
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPRTC object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dprtc_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dprtc_attr *attr);
-+
-+#endif /* __FSL_DPRTC_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h
-new file mode 100644
-index 0000000..aeccece
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dprtc_cmd.h
-@@ -0,0 +1,181 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPRTC_CMD_H
-+#define _FSL_DPRTC_CMD_H
-+
-+/* DPRTC Version */
-+#define DPRTC_VER_MAJOR 1
-+#define DPRTC_VER_MINOR 0
-+
-+/* Command IDs */
-+#define DPRTC_CMDID_CLOSE 0x800
-+#define DPRTC_CMDID_OPEN 0x810
-+#define DPRTC_CMDID_CREATE 0x910
-+#define DPRTC_CMDID_DESTROY 0x900
-+
-+#define DPRTC_CMDID_ENABLE 0x002
-+#define DPRTC_CMDID_DISABLE 0x003
-+#define DPRTC_CMDID_GET_ATTR 0x004
-+#define DPRTC_CMDID_RESET 0x005
-+#define DPRTC_CMDID_IS_ENABLED 0x006
-+
-+#define DPRTC_CMDID_SET_IRQ 0x010
-+#define DPRTC_CMDID_GET_IRQ 0x011
-+#define DPRTC_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPRTC_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPRTC_CMDID_SET_IRQ_MASK 0x014
-+#define DPRTC_CMDID_GET_IRQ_MASK 0x015
-+#define DPRTC_CMDID_GET_IRQ_STATUS 0x016
-+#define DPRTC_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPRTC_CMDID_SET_CLOCK_OFFSET 0x1d0
-+#define DPRTC_CMDID_SET_FREQ_COMPENSATION 0x1d1
-+#define DPRTC_CMDID_GET_FREQ_COMPENSATION 0x1d2
-+#define DPRTC_CMDID_GET_TIME 0x1d3
-+#define DPRTC_CMDID_SET_TIME 0x1d4
-+#define DPRTC_CMDID_SET_ALARM 0x1d5
-+#define DPRTC_CMDID_SET_PERIODIC_PULSE 0x1d6
-+#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE 0x1d7
-+#define DPRTC_CMDID_SET_EXT_TRIGGER 0x1d8
-+#define DPRTC_CMDID_CLEAR_EXT_TRIGGER 0x1d9
-+#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP 0x1dA
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_OPEN(cmd, dpbp_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_IRQ_ENABLE(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_ATTRIBUTES(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_CLOCK_OFFSET(cmd, offset) \
-+ MC_CMD_OP(cmd, 0, 0, 64, int64_t, offset)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_FREQ_COMPENSATION(cmd, freq_compensation) \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, freq_compensation)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_FREQ_COMPENSATION(cmd, freq_compensation) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, freq_compensation)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_RSP_GET_TIME(cmd, time) \
-+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, time)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_TIME(cmd, time) \
-+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPRTC_CMD_SET_ALARM(cmd, time) \
-+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, time)
-+
-+#endif /* _FSL_DPRTC_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpseci.h b/drivers/net/dpaa2/mc/fsl_dpseci.h
-new file mode 100644
-index 0000000..1dd7215
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpseci.h
-@@ -0,0 +1,647 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPSECI_H
-+#define __FSL_DPSECI_H
-+
-+/* Data Path SEC Interface API
-+ * Contains initialization APIs and runtime control APIs for DPSECI
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * General DPSECI macros
-+ */
-+
-+/**
-+ * Maximum number of Tx/Rx priorities per DPSECI object
-+ */
-+#define DPSECI_PRIO_NUM 8
-+
-+/**
-+ * All queues considered; see dpseci_set_rx_queue()
-+ */
-+#define DPSECI_ALL_QUEUES (uint8_t)(-1)
-+
-+/**
-+ * dpseci_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpseci_id: DPSECI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpseci_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpseci_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpseci_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpseci_cfg - Structure representing DPSECI configuration
-+ * @num_tx_queues: num of queues towards the SEC
-+ * @num_rx_queues: num of queues back from the SEC
-+ * @priorities: Priorities for the SEC hardware processing;
-+ * each place in the array is the priority of the tx queue
-+ * towards the SEC,
-+ * valid priorities are configured with values 1-8;
-+ */
-+struct dpseci_cfg {
-+ uint8_t num_tx_queues;
-+ uint8_t num_rx_queues;
-+ uint8_t priorities[DPSECI_PRIO_NUM];
-+};
-+
-+/**
-+ * dpseci_create() - Create the DPSECI object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPSECI object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpseci_open() function to get an authentication
-+ * token first.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpseci_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpseci_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * struct dpseci_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpseci_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpseci_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpseci_get_irq() - Get IRQ information from the DPSECI
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpseci_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpseci_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpseci_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned Interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpseci_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpseci_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpseci_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpseci_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+
-+/**
-+ * struct dpseci_attr - Structure representing DPSECI attributes
-+ * @id: DPSECI object ID
-+ * @version: DPSECI version
-+ * @num_tx_queues: number of queues towards the SEC
-+ * @num_rx_queues: number of queues back from the SEC
-+ */
-+struct dpseci_attr {
-+ int id;
-+ /**
-+ * struct version - DPSECI version
-+ * @major: DPSECI major version
-+ * @minor: DPSECI minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint8_t num_tx_queues;
-+ uint8_t num_rx_queues;
-+};
-+
-+/**
-+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpseci_attr *attr);
-+
-+/**
-+ * enum dpseci_dest - DPSECI destination types
-+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
-+ * and does not generate FQDAN notifications; user is expected to
-+ * dequeue from the queue based on polling or other user-defined
-+ * method
-+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue
-+ * from the queue only after notification is received
-+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON
-+ * object; user is expected to dequeue from the DPCON channel
-+ */
-+enum dpseci_dest {
-+ DPSECI_DEST_NONE = 0,
-+ DPSECI_DEST_DPIO = 1,
-+ DPSECI_DEST_DPCON = 2
-+};
-+
-+/**
-+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPSECI_DEST_NONE' option
-+ */
-+struct dpseci_dest_cfg {
-+ enum dpseci_dest dest_type;
-+ int dest_id;
-+ uint8_t priority;
-+};
-+
-+/**
-+ * DPSECI queue modification options
-+ */
-+
-+/**
-+ * Select to modify the user's context associated with the queue
-+ */
-+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
-+
-+/**
-+ * Select to modify the queue's destination
-+ */
-+#define DPSECI_QUEUE_OPT_DEST 0x00000002
-+
-+/**
-+ * Select to modify the queue's order preservation
-+ */
-+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
-+
-+/**
-+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
-+ * @options: Flags representing the suggested modifications to the queue;
-+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
-+ * @order_preservation_en: order preservation configuration for the rx queue
-+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame;
-+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
-+ * @dest_cfg: Queue destination parameters;
-+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
-+ */
-+struct dpseci_rx_queue_cfg {
-+ uint32_t options;
-+ int order_preservation_en;
-+ uint64_t user_ctx;
-+ struct dpseci_dest_cfg dest_cfg;
-+};
-+
-+/**
-+ * dpseci_set_rx_queue() - Set Rx queue configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @queue: Select the queue relative to number of
-+ * priorities configured at DPSECI creation; use
-+ * DPSECI_ALL_QUEUES to configure all Rx queues identically.
-+ * @cfg: Rx queue configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t queue,
-+ const struct dpseci_rx_queue_cfg *cfg);
-+
-+/**
-+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
-+ * @user_ctx: User context value provided in the frame descriptor of each
-+ * dequeued frame
-+ * @order_preservation_en: Status of the order preservation configuration
-+ * on the queue
-+ * @dest_cfg: Queue destination configuration
-+ * @fqid: Virtual FQID value to be used for dequeue operations
-+ */
-+struct dpseci_rx_queue_attr {
-+ uint64_t user_ctx;
-+ int order_preservation_en;
-+ struct dpseci_dest_cfg dest_cfg;
-+ uint32_t fqid;
-+};
-+
-+/**
-+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @queue: Select the queue relative to number of
-+ * priorities configured at DPSECI creation
-+ * @attr: Returned Rx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t queue,
-+ struct dpseci_rx_queue_attr *attr);
-+
-+/**
-+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
-+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
-+ * @priority: SEC hardware processing priority for the queue
-+ */
-+struct dpseci_tx_queue_attr {
-+ uint32_t fqid;
-+ uint8_t priority;
-+};
-+
-+/**
-+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @queue: Select the queue relative to number of
-+ * priorities configured at DPSECI creation
-+ * @attr: Returned Tx queue attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t queue,
-+ struct dpseci_tx_queue_attr *attr);
-+
-+/**
-+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
-+ * hardware accelerator
-+ * @ip_id: ID for SEC.
-+ * @major_rev: Major revision number for SEC.
-+ * @minor_rev: Minor revision number for SEC.
-+ * @era: SEC Era.
-+ * @deco_num: The number of copies of the DECO that are implemented in
-+ * this version of SEC.
-+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented
-+ * in this version of SEC.
-+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented
-+ * in this version of SEC.
-+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
-+ * implemented in this version of SEC.
-+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
-+ * implemented in this version of SEC.
-+ * @crc_acc_num: The number of copies of the CRC module that are implemented
-+ * in this version of SEC.
-+ * @pk_acc_num: The number of copies of the Public Key module that are
-+ * implemented in this version of SEC.
-+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
-+ * implemented in this version of SEC.
-+ * @rng_acc_num: The number of copies of the Random Number Generator that are
-+ * implemented in this version of SEC.
-+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
-+ * implemented in this version of SEC.
-+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
-+ * in this version of SEC.
-+ * @des_acc_num: The number of copies of the DES module that are implemented
-+ * in this version of SEC.
-+ * @aes_acc_num: The number of copies of the AES module that are implemented
-+ * in this version of SEC.
-+ **/
-+
-+struct dpseci_sec_attr {
-+ uint16_t ip_id;
-+ uint8_t major_rev;
-+ uint8_t minor_rev;
-+ uint8_t era;
-+ uint8_t deco_num;
-+ uint8_t zuc_auth_acc_num;
-+ uint8_t zuc_enc_acc_num;
-+ uint8_t snow_f8_acc_num;
-+ uint8_t snow_f9_acc_num;
-+ uint8_t crc_acc_num;
-+ uint8_t pk_acc_num;
-+ uint8_t kasumi_acc_num;
-+ uint8_t rng_acc_num;
-+ uint8_t md_acc_num;
-+ uint8_t arc4_acc_num;
-+ uint8_t des_acc_num;
-+ uint8_t aes_acc_num;
-+};
-+
-+/**
-+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @attr: Returned SEC attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpseci_sec_attr *attr);
-+
-+/**
-+ * struct dpseci_sec_counters - Structure representing global SEC counters and
-+ * not per dpseci counters
-+ * @dequeued_requests: Number of Requests Dequeued
-+ * @ob_enc_requests: Number of Outbound Encrypt Requests
-+ * @ib_dec_requests: Number of Inbound Decrypt Requests
-+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
-+ * @ob_prot_bytes: Number of Outbound Bytes Protected
-+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
-+ * @ib_valid_bytes: Number of Inbound Bytes Validated
-+ */
-+struct dpseci_sec_counters {
-+ uint64_t dequeued_requests;
-+ uint64_t ob_enc_requests;
-+ uint64_t ib_dec_requests;
-+ uint64_t ob_enc_bytes;
-+ uint64_t ob_prot_bytes;
-+ uint64_t ib_dec_bytes;
-+ uint64_t ib_valid_bytes;
-+};
-+
-+/**
-+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSECI object
-+ * @counters: Returned SEC counters
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpseci_sec_counters *counters);
-+
-+#endif /* __FSL_DPSECI_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h
-new file mode 100644
-index 0000000..6c0b96e
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpseci_cmd.h
-@@ -0,0 +1,241 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPSECI_CMD_H
-+#define _FSL_DPSECI_CMD_H
-+
-+/* DPSECI Version */
-+#define DPSECI_VER_MAJOR 3
-+#define DPSECI_VER_MINOR 1
-+
-+/* Command IDs */
-+#define DPSECI_CMDID_CLOSE 0x800
-+#define DPSECI_CMDID_OPEN 0x809
-+#define DPSECI_CMDID_CREATE 0x909
-+#define DPSECI_CMDID_DESTROY 0x900
-+
-+#define DPSECI_CMDID_ENABLE 0x002
-+#define DPSECI_CMDID_DISABLE 0x003
-+#define DPSECI_CMDID_GET_ATTR 0x004
-+#define DPSECI_CMDID_RESET 0x005
-+#define DPSECI_CMDID_IS_ENABLED 0x006
-+
-+#define DPSECI_CMDID_SET_IRQ 0x010
-+#define DPSECI_CMDID_GET_IRQ 0x011
-+#define DPSECI_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPSECI_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPSECI_CMDID_SET_IRQ_MASK 0x014
-+#define DPSECI_CMDID_GET_IRQ_MASK 0x015
-+#define DPSECI_CMDID_GET_IRQ_STATUS 0x016
-+#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPSECI_CMDID_SET_RX_QUEUE 0x194
-+#define DPSECI_CMDID_GET_RX_QUEUE 0x196
-+#define DPSECI_CMDID_GET_TX_QUEUE 0x197
-+#define DPSECI_CMDID_GET_SEC_ATTR 0x198
-+#define DPSECI_CMDID_GET_SEC_COUNTERS 0x199
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_OPEN(cmd, dpseci_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\
-+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
-+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \
-+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \
-+ MC_RSP_OP(cmd, 5, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 5, 16, 16, uint16_t, attr->version.minor);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \
-+ MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-+ MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
-+ MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\
-+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
-+ MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\
-+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\
-+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\
-+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\
-+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\
-+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\
-+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\
-+ MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\
-+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\
-+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\
-+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\
-+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\
-+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\
-+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\
-+ MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\
-+ MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\
-+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\
-+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\
-+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\
-+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\
-+} while (0)
-+
-+#endif /* _FSL_DPSECI_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpsw.h b/drivers/net/dpaa2/mc/fsl_dpsw.h
-new file mode 100644
-index 0000000..9c1bd9d
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpsw.h
-@@ -0,0 +1,2164 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPSW_H
-+#define __FSL_DPSW_H
-+
-+#include <fsl_net.h>
-+
-+/* Data Path L2-Switch API
-+ * Contains API for handling DPSW topology and functionality
-+ */
-+
-+struct fsl_mc_io;
-+
-+/**
-+ * DPSW general definitions
-+ */
-+
-+/**
-+ * Maximum number of traffic class priorities
-+ */
-+#define DPSW_MAX_PRIORITIES 8
-+/**
-+ * Maximum number of interfaces
-+ */
-+#define DPSW_MAX_IF 64
-+
-+/**
-+ * dpsw_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpsw_id: DPSW unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpsw_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_open(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ int dpsw_id,
-+ uint16_t *token);
-+
-+/**
-+ * dpsw_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_close(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * DPSW options
-+ */
-+
-+/**
-+ * Disable flooding
-+ */
-+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
-+/**
-+ * Disable Multicast
-+ */
-+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
-+/**
-+ * Support control interface
-+ */
-+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
-+/**
-+ * Disable flooding metering
-+ */
-+#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
-+/**
-+ * Enable metering
-+ */
-+#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
-+
-+/**
-+ * enum dpsw_component_type - component type of a bridge
-+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
-+ * enterprise VLAN bridge or of a Provider Bridge used
-+ * to process C-tagged frames
-+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
-+ * Provider Bridge
-+ *
-+ */
-+enum dpsw_component_type {
-+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
-+ DPSW_COMPONENT_TYPE_S_VLAN
-+};
-+
-+/**
-+ * struct dpsw_cfg - DPSW configuration
-+ * @num_ifs: Number of external and internal interfaces
-+ * @adv: Advanced parameters; default is all zeros;
-+ * use this structure to change default settings
-+ */
-+struct dpsw_cfg {
-+ uint16_t num_ifs;
-+ /**
-+ * struct adv - Advanced parameters
-+ * @options: Enable/Disable DPSW features (bitmap)
-+ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
-+ * @max_meters_per_if: Number of meters per interface
-+ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
-+ * @max_fdb_entries: Number of FDB entries for default FDB table;
-+ * 0 - indicates default 1024 entries.
-+ * @fdb_aging_time: Default FDB aging time for default FDB table;
-+ * 0 - indicates default 300 seconds
-+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
-+ * 0 - indicates default 32
-+ * @component_type: Indicates the component type of this bridge
-+ */
-+ struct {
-+ uint64_t options;
-+ uint16_t max_vlans;
-+ uint8_t max_meters_per_if;
-+ uint8_t max_fdbs;
-+ uint16_t max_fdb_entries;
-+ uint16_t fdb_aging_time;
-+ uint16_t max_fdb_mc_groups;
-+ enum dpsw_component_type component_type;
-+ } adv;
-+};
-+
-+/**
-+ * dpsw_create() - Create the DPSW object.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @cfg: Configuration structure
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * Create the DPSW object, allocate required resources and
-+ * perform required initialization.
-+ *
-+ * The object can be created either by declaring it in the
-+ * DPL file, or by calling this function.
-+ *
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent calls to
-+ * this specific object. For objects that are created using the
-+ * DPL file, call dpsw_open() function to get an authentication
-+ * token first
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_create(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ const struct dpsw_cfg *cfg,
-+ uint16_t *token);
-+
-+/**
-+ * dpsw_destroy() - Destroy the DPSW object and release all its resources.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpsw_destroy(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpsw_enable() - Enable DPSW functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpsw_disable() - Disable DPSW functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * dpsw_is_enabled() - Check if the DPSW is enabled
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en);
-+
-+/**
-+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_reset(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+ * DPSW IRQ Index and Events
-+ */
-+
-+#define DPSW_IRQ_INDEX_IF 0x0000
-+#define DPSW_IRQ_INDEX_L2SW 0x0001
-+
-+/**
-+ * IRQ event - Indicates that the link state changed
-+ */
-+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
-+
-+/**
-+ * struct dpsw_irq_cfg - IRQ configuration
-+ * @addr: Address that must be written to signal a message-based interrupt
-+ * @val: Value to write into irq_addr address
-+ * @irq_num: A user defined number associated with this IRQ
-+ */
-+struct dpsw_irq_cfg {
-+ uint64_t addr;
-+ uint32_t val;
-+ int irq_num;
-+};
-+
-+/**
-+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpsw_get_irq() - Get IRQ information from the DPSW
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
-+/**
-+ * dpsw_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t en);
-+
-+/**
-+ * dpsw_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned Interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint8_t *en);
-+
-+/**
-+ * dpsw_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t mask);
-+
-+/**
-+ * dpsw_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *mask);
-+
-+/**
-+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t *status);
-+
-+/**
-+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint8_t irq_index,
-+ uint32_t status);
-+/**
-+ * struct dpsw_attr - Structure representing DPSW attributes
-+ * @id: DPSW object ID
-+ * @version: DPSW version
-+ * @options: Enable/Disable DPSW features
-+ * @max_vlans: Maximum Number of VLANs
-+ * @max_meters_per_if: Number of meters per interface
-+ * @max_fdbs: Maximum Number of FDBs
-+ * @max_fdb_entries: Number of FDB entries for default FDB table;
-+ * 0 - indicates default 1024 entries.
-+ * @fdb_aging_time: Default FDB aging time for default FDB table;
-+ * 0 - indicates default 300 seconds
-+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
-+ * 0 - indicates default 32
-+ * @mem_size: DPSW frame storage memory size
-+ * @num_ifs: Number of interfaces
-+ * @num_vlans: Current number of VLANs
-+ * @num_fdbs: Current number of FDBs
-+ * @component_type: Component type of this bridge
-+ */
-+struct dpsw_attr {
-+ int id;
-+ /**
-+ * struct version - DPSW version
-+ * @major: DPSW major version
-+ * @minor: DPSW minor version
-+ */
-+ struct {
-+ uint16_t major;
-+ uint16_t minor;
-+ } version;
-+ uint64_t options;
-+ uint16_t max_vlans;
-+ uint8_t max_meters_per_if;
-+ uint8_t max_fdbs;
-+ uint16_t max_fdb_entries;
-+ uint16_t fdb_aging_time;
-+ uint16_t max_fdb_mc_groups;
-+ uint16_t num_ifs;
-+ uint16_t mem_size;
-+ uint16_t num_vlans;
-+ uint8_t num_fdbs;
-+ enum dpsw_component_type component_type;
-+};
-+
-+/**
-+ * dpsw_get_attributes() - Retrieve DPSW attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @attr: Returned DPSW attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpsw_attr *attr);
-+
-+/**
-+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Id
-+ *
-+ * Only one reflection receive interface is allowed per switch
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id);
-+
-+/**
-+ * enum dpsw_action - Action selection for special/control frames
-+ * @DPSW_ACTION_DROP: Drop frame
-+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
-+ */
-+enum dpsw_action {
-+ DPSW_ACTION_DROP = 0,
-+ DPSW_ACTION_REDIRECT = 1
-+};
-+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+
-+/**
-+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
-+ */
-+struct dpsw_link_cfg {
-+ uint32_t rate;
-+ uint64_t options;
-+};
-+
-+/**
-+ * dpsw_if_set_link_cfg() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_link_cfg *cfg);
-+/**
-+ * struct dpsw_link_state - Structure representing DPSW link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
-+ * @up: 0 - covers two cases: down and disconnected, 1 - up
-+ */
-+struct dpsw_link_state {
-+ uint32_t rate;
-+ uint64_t options;
-+ int up;
-+};
-+
-+/**
-+ * dpsw_if_get_link_state - Return the link state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: interface id
-+ * @state: link state 1 - linkup, 0 - link down or disconnected
-+ *
-+ * @returns '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_link_state *state);
-+
-+/**
-+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int en);
-+
-+/**
-+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int en);
-+
-+/**
-+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int en);
-+
-+/**
-+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
-+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
-+ * to the IEEE 802.1p priority
-+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
-+ * separately or in conjunction with PCP to indicate frames
-+ * eligible to be dropped in the presence of congestion
-+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
-+ * to which the frame belongs. The hexadecimal values
-+ * of 0x000 and 0xFFF are reserved;
-+ * all other values may be used as VLAN identifiers,
-+ * allowing up to 4,094 VLANs
-+ */
-+struct dpsw_tci_cfg {
-+ uint8_t pcp;
-+ uint8_t dei;
-+ uint16_t vlan_id;
-+};
-+
-+/**
-+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_tci_cfg *cfg);
-+
-+/**
-+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_tci_cfg *cfg);
-+
-+/**
-+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
-+ * @DPSW_STP_STATE_BLOCKING: Blocking state
-+ * @DPSW_STP_STATE_LISTENING: Listening state
-+ * @DPSW_STP_STATE_LEARNING: Learning state
-+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
-+ *
-+ */
-+enum dpsw_stp_state {
-+ DPSW_STP_STATE_BLOCKING = 0,
-+ DPSW_STP_STATE_LISTENING = 1,
-+ DPSW_STP_STATE_LEARNING = 2,
-+ DPSW_STP_STATE_FORWARDING = 3
-+};
-+
-+/**
-+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
-+ * @vlan_id: VLAN ID STP state
-+ * @state: STP state
-+ */
-+struct dpsw_stp_cfg {
-+ uint16_t vlan_id;
-+ enum dpsw_stp_state state;
-+};
-+
-+/**
-+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: STP State configuration parameters
-+ *
-+ * The following STP states are supported -
-+ * blocking, listening, learning, forwarding and disabled.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_stp_cfg *cfg);
-+
-+/**
-+ * enum dpsw_accepted_frames - Types of frames to accept
-+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
-+ * priority tagged frames
-+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
-+ * Priority-Tagged frames received on this interface.
-+ *
-+ */
-+enum dpsw_accepted_frames {
-+ DPSW_ADMIT_ALL = 1,
-+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
-+};
-+
-+/**
-+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
-+ * @type: Defines ingress accepted frames
-+ * @unaccept_act: When a frame is not accepted, it may be discarded or
-+ * redirected to control interface depending on this mode
-+ */
-+struct dpsw_accepted_frames_cfg {
-+ enum dpsw_accepted_frames type;
-+ enum dpsw_action unaccept_act;
-+};
-+
-+/**
-+ * dpsw_if_set_accepted_frames()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Frame types configuration
-+ *
-+ * When is admit_only_vlan_tagged- the device will discard untagged
-+ * frames or Priority-Tagged frames received on this interface.
-+ * When admit_only_untagged- untagged frames or Priority-Tagged
-+ * frames received on this interface will be accepted and assigned
-+ * to a VID based on the PVID and VID Set for this interface.
-+ * When admit_all - the device will accept VLAN tagged, untagged
-+ * and priority tagged frames.
-+ * The default is admit_all
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg);
-+
-+/**
-+ * dpsw_if_set_accept_all_vlan()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @accept_all: Accept or drop frames having different VLAN
-+ *
-+ * When this is accept (FALSE), the device will discard incoming
-+ * frames for VLANs that do not include this interface in its
-+ * Member set. When accept (TRUE), the interface will accept all incoming frames
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ int accept_all);
-+
-+/**
-+ * enum dpsw_counter - Counters types
-+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
-+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
-+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
-+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
-+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
-+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
-+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
-+ */
-+enum dpsw_counter {
-+ DPSW_CNT_ING_FRAME = 0x0,
-+ DPSW_CNT_ING_BYTE = 0x1,
-+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
-+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
-+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
-+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
-+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
-+ DPSW_CNT_EGR_FRAME = 0x8,
-+ DPSW_CNT_EGR_BYTE = 0x9,
-+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
-+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
-+};
-+
-+/**
-+ * dpsw_if_get_counter() - Get specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: return value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ enum dpsw_counter type,
-+ uint64_t *counter);
-+
-+/**
-+ * dpsw_if_set_counter() - Set specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: New counter value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ enum dpsw_counter type,
-+ uint64_t counter);
-+
-+/**
-+ * Maximum number of TC
-+ */
-+#define DPSW_MAX_TC 8
-+
-+/**
-+ * enum dpsw_priority_selector - User priority
-+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
-+ * refers to the IEEE 802.1p priority.
-+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
-+ * field from IP header
-+ *
-+ */
-+enum dpsw_priority_selector {
-+ DPSW_UP_PCP = 0,
-+ DPSW_UP_DSCP = 1
-+};
-+
-+/**
-+ * enum dpsw_schedule_mode - Traffic classes scheduling
-+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
-+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
-+ */
-+enum dpsw_schedule_mode {
-+ DPSW_SCHED_STRICT_PRIORITY,
-+ DPSW_SCHED_WEIGHTED
-+};
-+
-+/**
-+ * struct dpsw_tx_schedule_cfg - traffic class configuration
-+ * @mode: Strict or weight-based scheduling
-+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
-+ */
-+struct dpsw_tx_schedule_cfg {
-+ enum dpsw_schedule_mode mode;
-+ uint16_t delta_bandwidth;
-+};
-+
-+/**
-+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
-+ * class configuration
-+ * @priority_selector: Source for user priority regeneration
-+ * @tc_id: The Regenerated User priority that the incoming
-+ * User Priority is mapped to for this interface
-+ * @tc_sched: Traffic classes configuration
-+ */
-+struct dpsw_tx_selection_cfg {
-+ enum dpsw_priority_selector priority_selector;
-+ uint8_t tc_id[DPSW_MAX_PRIORITIES];
-+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
-+};
-+
-+/**
-+ * dpsw_if_set_tx_selection() - Function is used for mapping variety
-+ * of frame fields
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Traffic class mapping configuration
-+ *
-+ * Function is used for mapping variety of frame fields (DSCP, PCP)
-+ * to Traffic Class. Traffic class is a number
-+ * in the range from 0 to 7
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_tx_selection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_reflection_filter - Filter type for frames to reflect
-+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
-+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
-+ * particular VLAN defined by vid parameter
-+ *
-+ */
-+enum dpsw_reflection_filter {
-+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
-+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
-+};
-+
-+/**
-+ * struct dpsw_reflection_cfg - Structure representing reflection information
-+ * @filter: Filter type for frames to reflect
-+ * @vlan_id: Vlan Id to reflect; valid only when filter type is
-+ * DPSW_INGRESS_VLAN
-+ */
-+struct dpsw_reflection_cfg {
-+ enum dpsw_reflection_filter filter;
-+ uint16_t vlan_id;
-+};
-+
-+/**
-+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+/**
-+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_metering_mode - Metering modes
-+ * @DPSW_METERING_MODE_NONE: metering disabled
-+ * @DPSW_METERING_MODE_RFC2698: RFC 2698
-+ * @DPSW_METERING_MODE_RFC4115: RFC 4115
-+ */
-+enum dpsw_metering_mode {
-+ DPSW_METERING_MODE_NONE = 0,
-+ DPSW_METERING_MODE_RFC2698,
-+ DPSW_METERING_MODE_RFC4115
-+};
-+
-+/**
-+ * enum dpsw_metering_unit - Metering count
-+ * @DPSW_METERING_UNIT_BYTES: count bytes
-+ * @DPSW_METERING_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_metering_unit {
-+ DPSW_METERING_UNIT_BYTES = 0,
-+ DPSW_METERING_UNIT_FRAMES
-+};
-+
-+/**
-+ * struct dpsw_metering_cfg - Metering configuration
-+ * @mode: metering modes
-+ * @units: Bytes or frame units
-+ * @cir: Committed information rate (CIR) in Kbits/s
-+ * @eir: Peak information rate (PIR) Kbit/s rfc2698
-+ * Excess information rate (EIR) Kbit/s rfc4115
-+ * @cbs: Committed burst size (CBS) in bytes
-+ * @ebs: Peak burst size (PBS) in bytes for rfc2698
-+ * Excess bust size (EBS) in bytes rfc4115
-+ *
-+ */
-+struct dpsw_metering_cfg {
-+ enum dpsw_metering_mode mode;
-+ enum dpsw_metering_unit units;
-+ uint32_t cir;
-+ uint32_t eir;
-+ uint32_t cbs;
-+ uint32_t ebs;
-+};
-+
-+/**
-+ * dpsw_if_set_flooding_metering() - Set flooding metering
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+/**
-+ * dpsw_if_set_metering() - Set interface metering for flooding
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class ID
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint8_t tc_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+/**
-+ * enum dpsw_early_drop_unit - DPSW early drop unit
-+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
-+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_early_drop_unit {
-+ DPSW_EARLY_DROP_UNIT_BYTE = 0,
-+ DPSW_EARLY_DROP_UNIT_FRAMES
-+};
-+
-+/**
-+ * enum dpsw_early_drop_mode - DPSW early drop mode
-+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
-+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
-+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
-+ */
-+enum dpsw_early_drop_mode {
-+ DPSW_EARLY_DROP_MODE_NONE = 0,
-+ DPSW_EARLY_DROP_MODE_TAIL,
-+ DPSW_EARLY_DROP_MODE_WRED
-+};
-+
-+/**
-+ * struct dpsw_wred_cfg - WRED configuration
-+ * @max_threshold: maximum threshold that packets may be discarded. Above this
-+ * threshold all packets are discarded; must be less than 2^39;
-+ * approximated to be expressed as (x+256)*2^(y-1) due to HW
-+ * implementation.
-+ * @min_threshold: minimum threshold that packets may be discarded at
-+ * @drop_probability: probability that a packet will be discarded (1-100,
-+ * associated with the maximum threshold)
-+ */
-+struct dpsw_wred_cfg {
-+ uint64_t min_threshold;
-+ uint64_t max_threshold;
-+ uint8_t drop_probability;
-+};
-+
-+/**
-+ * struct dpsw_early_drop_cfg - early-drop configuration
-+ * @drop_mode: drop mode
-+ * @units: count units
-+ * @yellow: WRED - 'yellow' configuration
-+ * @green: WRED - 'green' configuration
-+ * @tail_drop_threshold: tail drop threshold
-+ */
-+struct dpsw_early_drop_cfg {
-+ enum dpsw_early_drop_mode drop_mode;
-+ enum dpsw_early_drop_unit units;
-+ struct dpsw_wred_cfg yellow;
-+ struct dpsw_wred_cfg green;
-+ uint32_t tail_drop_threshold;
-+};
-+
-+/**
-+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
-+ * @cfg: Early-drop configuration
-+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpsw_if_tc_set_early_drop
-+ *
-+ */
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ uint8_t *early_drop_buf);
-+
-+/**
-+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
-+ * configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 64 bytes;
-+ * Must be cacheline-aligned and DMA-able memory
-+ *
-+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
-+ * to prepare the early_drop_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint8_t tc_id,
-+ uint64_t early_drop_iova);
-+
-+/**
-+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
-+ * @tpid: An additional tag protocol identifier
-+ */
-+struct dpsw_custom_tpid_cfg {
-+ uint16_t tpid;
-+};
-+
-+/**
-+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * API Configures a distinct Ethernet type value (or TPID value)
-+ * to indicate a VLAN tag in addition to the common
-+ * TPID values 0x8100 and 0x88A8.
-+ * Two additional TPID's are supported
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
-+
-+/**
-+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
-+
-+/**
-+ * dpsw_if_enable() - Enable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id);
-+
-+/**
-+ * dpsw_if_disable() - Disable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id);
-+
-+/**
-+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
-+ * @num_tcs: Number of traffic classes
-+ * @rate: Transmit rate in bits per second
-+ * @options: Interface configuration options (bitmap)
-+ * @enabled: Indicates if interface is enabled
-+ * @accept_all_vlan: The device discards/accepts incoming frames
-+ * for VLANs that do not include this interface
-+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
-+ * discards untagged frames or priority-tagged frames received on
-+ * this interface;
-+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
-+ * tagged frames received on this interface are accepted
-+ * @qdid: control frames transmit qdid
-+ */
-+struct dpsw_if_attr {
-+ uint8_t num_tcs;
-+ uint32_t rate;
-+ uint32_t options;
-+ int enabled;
-+ int accept_all_vlan;
-+ enum dpsw_accepted_frames admit_untagged;
-+ uint16_t qdid;
-+};
-+
-+/**
-+ * dpsw_if_get_attributes() - Function obtains attributes of interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @attr: Returned interface attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ struct dpsw_if_attr *attr);
-+
-+/**
-+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint16_t frame_length);
-+
-+/**
-+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Returned maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t if_id,
-+ uint16_t *frame_length);
-+
-+/**
-+ * struct dpsw_vlan_cfg - VLAN Configuration
-+ * @fdb_id: Forwarding Data Base
-+ */
-+struct dpsw_vlan_cfg {
-+ uint16_t fdb_id;
-+};
-+
-+/**
-+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: VLAN configuration
-+ *
-+ * Only VLAN ID and FDB ID are required parameters here.
-+ * 12 bit VLAN ID is defined in IEEE802.1Q.
-+ * Adding a duplicate VLAN ID is not allowed.
-+ * FDB ID can be shared across multiple VLANs. Shared learning
-+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
-+ * with same fdb_id
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_cfg *cfg);
-+
-+/**
-+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
-+ * @num_ifs: The number of interfaces that are assigned to the egress
-+ * list for this VLAN
-+ * @if_id: The set of interfaces that are
-+ * assigned to the egress list for this VLAN
-+ */
-+struct dpsw_vlan_if_cfg {
-+ uint16_t num_ifs;
-+ uint16_t if_id[DPSW_MAX_IF];
-+};
-+
-+/**
-+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces to add
-+ *
-+ * It adds only interfaces not belonging to this VLAN yet,
-+ * otherwise an error is generated and an entire command is
-+ * ignored. This function can be called numerous times always
-+ * providing required interfaces delta.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
-+ * transmitted as untagged.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: set of interfaces that should be transmitted as untagged
-+ *
-+ * These interfaces should already belong to this VLAN.
-+ * By default all interfaces are transmitted as tagged.
-+ * Providing un-existing interface or untagged interface that is
-+ * configured untagged already generates an error and the entire
-+ * command is ignored.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
-+ * included in flooding when frame with unknown destination
-+ * unicast MAC arrived.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be used for flooding
-+ *
-+ * These interfaces should belong to this VLAN. By default all
-+ * interfaces are included into flooding list. Providing
-+ * un-existing interface or an interface that already in the
-+ * flooding list generates an error and the entire command is
-+ * ignored.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
-+ *
-+ * Interfaces must belong to this VLAN, otherwise an error
-+ * is returned and an the command is ignored
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
-+ * converted from transmitted as untagged to transmit as tagged.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: set of interfaces that should be removed
-+ *
-+ * Interfaces provided by API have to belong to this VLAN and
-+ * configured untagged, otherwise an error is returned and the
-+ * command is ignored
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
-+ * removed from the flooding list.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: set of interfaces used for flooding
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_remove() - Remove an entire VLAN
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id);
-+
-+/**
-+ * struct dpsw_vlan_attr - VLAN attributes
-+ * @fdb_id: Associated FDB ID
-+ * @num_ifs: Number of interfaces
-+ * @num_untagged_ifs: Number of untagged interfaces
-+ * @num_flooding_ifs: Number of flooding interfaces
-+ */
-+struct dpsw_vlan_attr {
-+ uint16_t fdb_id;
-+ uint16_t num_ifs;
-+ uint16_t num_untagged_ifs;
-+ uint16_t num_flooding_ifs;
-+};
-+
-+/**
-+ * dpsw_vlan_get_attributes() - Get VLAN attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @attr: Returned DPSW attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_attr *attr);
-+
-+/**
-+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of interfaces belong to this VLAN
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of flooding interfaces
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
-+ * untagged
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of untagged interfaces
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_fdb_cfg - FDB Configuration
-+ * @num_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ */
-+struct dpsw_fdb_cfg {
-+ uint16_t num_fdb_entries;
-+ uint16_t fdb_aging_time;
-+};
-+
-+/**
-+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
-+ * the reference
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Returned Forwarding Database Identifier
-+ * @cfg: FDB Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg);
-+
-+/**
-+ * dpsw_fdb_remove() - Remove FDB from switch
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id);
-+
-+/**
-+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
-+ * @DPSW_FDB_ENTRY_STATIC: Static entry
-+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
-+ */
-+enum dpsw_fdb_entry_type {
-+ DPSW_FDB_ENTRY_STATIC = 0,
-+ DPSW_FDB_ENTRY_DINAMIC = 1
-+};
-+
-+/**
-+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
-+ * @type: Select static or dynamic entry
-+ * @mac_addr: MAC address
-+ * @if_egress: Egress interface ID
-+ */
-+struct dpsw_fdb_unicast_cfg {
-+ enum dpsw_fdb_entry_type type;
-+ uint8_t mac_addr[6];
-+ uint16_t if_egress;
-+};
-+
-+/**
-+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg);
-+
-+/**
-+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
-+ * unicast Ethernet address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned unicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg);
-+
-+/**
-+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg);
-+
-+/**
-+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
-+ * @type: Select static or dynamic entry
-+ * @mac_addr: MAC address
-+ * @num_ifs: Number of external and internal interfaces
-+ * @if_id: Egress interface IDs
-+ */
-+struct dpsw_fdb_multicast_cfg {
-+ enum dpsw_fdb_entry_type type;
-+ uint8_t mac_addr[6];
-+ uint16_t num_ifs;
-+ uint16_t if_id[DPSW_MAX_IF];
-+};
-+
-+/**
-+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
-+ *
-+ * If group doesn't exist, it will be created.
-+ * It adds only interfaces not belonging to this multicast group
-+ * yet, otherwise error will be generated and the command is
-+ * ignored.
-+ * This function may be called numerous times always providing
-+ * required interfaces delta.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg);
-+
-+/**
-+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
-+ * address.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned multicast entry configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg);
-+
-+/**
-+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
-+ * group.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
-+ *
-+ * Interfaces provided by this API have to exist in the group,
-+ * otherwise an error will be returned and an entire command
-+ * ignored. If there is no interface left in the group,
-+ * an entire group is deleted
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg);
-+
-+/**
-+ * enum dpsw_fdb_learning_mode - Auto-learning modes
-+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
-+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
-+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
-+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
-+ *
-+ * NONE - SECURE LEARNING
-+ * SMAC found DMAC found CTLU Action
-+ * v v Forward frame to
-+ * 1. DMAC destination
-+ * - v Forward frame to
-+ * 1. DMAC destination
-+ * 2. Control interface
-+ * v - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * - - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * 2. Control interface
-+ * SECURE LEARING
-+ * SMAC found DMAC found CTLU Action
-+ * v v Forward frame to
-+ * 1. DMAC destination
-+ * - v Forward frame to
-+ * 1. Control interface
-+ * v - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * - - Forward frame to
-+ * 1. Control interface
-+ */
-+enum dpsw_fdb_learning_mode {
-+ DPSW_FDB_LEARNING_MODE_DIS = 0,
-+ DPSW_FDB_LEARNING_MODE_HW = 1,
-+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
-+ DPSW_FDB_LEARNING_MODE_SECURE = 3
-+};
-+
-+/**
-+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @mode: learning mode
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ enum dpsw_fdb_learning_mode mode);
-+
-+/**
-+ * struct dpsw_fdb_attr - FDB Attributes
-+ * @max_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ * @learning_mode: Learning mode
-+ * @num_fdb_mc_groups: Current number of multicast groups
-+ * @max_fdb_mc_groups: Maximum number of multicast groups
-+ */
-+struct dpsw_fdb_attr {
-+ uint16_t max_fdb_entries;
-+ uint16_t fdb_aging_time;
-+ enum dpsw_fdb_learning_mode learning_mode;
-+ uint16_t num_fdb_mc_groups;
-+ uint16_t max_fdb_mc_groups;
-+};
-+
-+/**
-+ * dpsw_fdb_get_attributes() - Get FDB attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @attr: Returned FDB attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t fdb_id,
-+ struct dpsw_fdb_attr *attr);
-+
-+/**
-+ * struct dpsw_acl_cfg - ACL Configuration
-+ * @max_entries: Number of FDB entries
-+ */
-+struct dpsw_acl_cfg {
-+ uint16_t max_entries;
-+};
-+
-+/**
-+ * struct dpsw_acl_fields - ACL fields.
-+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
-+ * slow protocols, MVRP, STP
-+ * @l2_source_mac: Source MAC address
-+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
-+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
-+ * Q-in-Q, IPv4, IPv6, PPPoE
-+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
-+ * @l2_vlan_id: layer 2 VLAN ID
-+ * @l2_ether_type: layer 2 Ethernet type
-+ * @l3_dscp: Layer 3 differentiated services code point
-+ * @l3_protocol: Tells the Network layer at the destination host, to which
-+ * Protocol this packet belongs to. The following protocol are
-+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
-+ * (encapsulation), GRE, PTP
-+ * @l3_source_ip: Source IPv4 IP
-+ * @l3_dest_ip: Destination IPv4 IP
-+ * @l4_source_port: Source TCP/UDP Port
-+ * @l4_dest_port: Destination TCP/UDP Port
-+ */
-+struct dpsw_acl_fields {
-+ uint8_t l2_dest_mac[6];
-+ uint8_t l2_source_mac[6];
-+ uint16_t l2_tpid;
-+ uint8_t l2_pcp_dei;
-+ uint16_t l2_vlan_id;
-+ uint16_t l2_ether_type;
-+ uint8_t l3_dscp;
-+ uint8_t l3_protocol;
-+ uint32_t l3_source_ip;
-+ uint32_t l3_dest_ip;
-+ uint16_t l4_source_port;
-+ uint16_t l4_dest_port;
-+};
-+
-+/**
-+ * struct dpsw_acl_key - ACL key
-+ * @match: Match fields
-+ * @mask: Mask: b'1 - valid, b'0 don't care
-+ */
-+struct dpsw_acl_key {
-+ struct dpsw_acl_fields match;
-+ struct dpsw_acl_fields mask;
-+};
-+
-+/**
-+ * enum dpsw_acl_action
-+ * @DPSW_ACL_ACTION_DROP: Drop frame
-+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
-+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
-+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
-+ */
-+enum dpsw_acl_action {
-+ DPSW_ACL_ACTION_DROP,
-+ DPSW_ACL_ACTION_REDIRECT,
-+ DPSW_ACL_ACTION_ACCEPT,
-+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
-+};
-+
-+/**
-+ * struct dpsw_acl_result - ACL action
-+ * @action: Action should be taken when ACL entry hit
-+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
-+ * action
-+ */
-+struct dpsw_acl_result {
-+ enum dpsw_acl_action action;
-+ uint16_t if_id;
-+};
-+
-+/**
-+ * struct dpsw_acl_entry_cfg - ACL entry
-+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
-+ * to dpsw_acl_prepare_entry_cfg()
-+ * @result: Required action when entry hit occurs
-+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
-+ * during the lifetime of a Policy. It is user responsibility to
-+ * space the priorities according to consequent rule additions.
-+ */
-+struct dpsw_acl_entry_cfg {
-+ uint64_t key_iova;
-+ struct dpsw_acl_result result;
-+ int precedence;
-+};
-+
-+/**
-+ * dpsw_acl_add() - Adds ACL to L2 switch.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: Returned ACL ID, for the future reference
-+ * @cfg: ACL configuration
-+ *
-+ * Create Access Control List. Multiple ACLs can be created and
-+ * co-exist in L2 switch
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t *acl_id,
-+ const struct dpsw_acl_cfg *cfg);
-+
-+/**
-+ * dpsw_acl_remove() - Removes ACL from L2 switch.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id);
-+
-+/**
-+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
-+ * @key: key
-+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before adding or removing acl_entry
-+ *
-+ */
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ uint8_t *entry_cfg_buf);
-+
-+/**
-+ * dpsw_acl_add_entry() - Adds an entry to ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: entry configuration
-+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+/**
-+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: entry configuration
-+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
-+ * @num_ifs: Number of interfaces
-+ * @if_id: List of interfaces
-+ */
-+struct dpsw_acl_if_cfg {
-+ uint16_t num_ifs;
-+ uint16_t if_id[DPSW_MAX_IF];
-+};
-+
-+/**
-+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: interfaces list
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+/**
-+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: interfaces list
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_attr - ACL Attributes
-+ * @max_entries: Max number of ACL entries
-+ * @num_entries: Number of used ACL entries
-+ * @num_ifs: Number of interfaces associated with ACL
-+ */
-+struct dpsw_acl_attr {
-+ uint16_t max_entries;
-+ uint16_t num_entries;
-+ uint16_t num_ifs;
-+};
-+
-+/**
-+* dpsw_acl_get_attributes() - Get specific counter of particular interface
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPSW object
-+* @acl_id: ACL Identifier
-+* @attr: Returned ACL attributes
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ uint16_t acl_id,
-+ struct dpsw_acl_attr *attr);
-+/**
-+* struct dpsw_ctrl_if_attr - Control interface attributes
-+* @rx_fqid: Receive FQID
-+* @rx_err_fqid: Receive error FQID
-+* @tx_err_conf_fqid: Transmit error and confirmation FQID
-+*/
-+struct dpsw_ctrl_if_attr {
-+ uint32_t rx_fqid;
-+ uint32_t rx_err_fqid;
-+ uint32_t tx_err_conf_fqid;
-+};
-+
-+/**
-+* dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPSW object
-+* @attr: Returned control interface attributes
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ struct dpsw_ctrl_if_attr *attr);
-+
-+/**
-+ * Maximum number of DPBP
-+ */
-+#define DPSW_MAX_DPBP 8
-+
-+/**
-+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
-+ */
-+struct dpsw_ctrl_if_pools_cfg {
-+ uint8_t num_dpbp;
-+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
-+ */
-+ struct {
-+ int dpbp_id;
-+ uint16_t buffer_size;
-+ int backup_pool;
-+ } pools[DPSW_MAX_DPBP];
-+};
-+
-+/**
-+* dpsw_ctrl_if_set_pools() - Set control interface buffer pools
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPSW object
-+* @cfg: buffer pools configuration
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ const struct dpsw_ctrl_if_pools_cfg *cfg);
-+
-+/**
-+* dpsw_ctrl_if_enable() - Enable control interface
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPSW object
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+/**
-+* dpsw_ctrl_if_disable() - Function disables control interface
-+* @mc_io: Pointer to MC portal's I/O object
-+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+* @token: Token of DPSW object
-+*
-+* Return: '0' on Success; Error code otherwise.
-+*/
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token);
-+
-+#endif /* __FSL_DPSW_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h
-new file mode 100644
-index 0000000..c65fe38
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_dpsw_cmd.h
-@@ -0,0 +1,916 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPSW_CMD_H
-+#define __FSL_DPSW_CMD_H
-+
-+/* DPSW Version */
-+#define DPSW_VER_MAJOR 7
-+#define DPSW_VER_MINOR 0
-+
-+/* Command IDs */
-+#define DPSW_CMDID_CLOSE 0x800
-+#define DPSW_CMDID_OPEN 0x802
-+#define DPSW_CMDID_CREATE 0x902
-+#define DPSW_CMDID_DESTROY 0x900
-+
-+#define DPSW_CMDID_ENABLE 0x002
-+#define DPSW_CMDID_DISABLE 0x003
-+#define DPSW_CMDID_GET_ATTR 0x004
-+#define DPSW_CMDID_RESET 0x005
-+#define DPSW_CMDID_IS_ENABLED 0x006
-+
-+#define DPSW_CMDID_SET_IRQ 0x010
-+#define DPSW_CMDID_GET_IRQ 0x011
-+#define DPSW_CMDID_SET_IRQ_ENABLE 0x012
-+#define DPSW_CMDID_GET_IRQ_ENABLE 0x013
-+#define DPSW_CMDID_SET_IRQ_MASK 0x014
-+#define DPSW_CMDID_GET_IRQ_MASK 0x015
-+#define DPSW_CMDID_GET_IRQ_STATUS 0x016
-+#define DPSW_CMDID_CLEAR_IRQ_STATUS 0x017
-+
-+#define DPSW_CMDID_SET_REFLECTION_IF 0x022
-+
-+#define DPSW_CMDID_ADD_CUSTOM_TPID 0x024
-+
-+#define DPSW_CMDID_REMOVE_CUSTOM_TPID 0x026
-+
-+#define DPSW_CMDID_IF_SET_TCI 0x030
-+#define DPSW_CMDID_IF_SET_STP 0x031
-+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES 0x032
-+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN 0x033
-+#define DPSW_CMDID_IF_GET_COUNTER 0x034
-+#define DPSW_CMDID_IF_SET_COUNTER 0x035
-+#define DPSW_CMDID_IF_SET_TX_SELECTION 0x036
-+#define DPSW_CMDID_IF_ADD_REFLECTION 0x037
-+#define DPSW_CMDID_IF_REMOVE_REFLECTION 0x038
-+#define DPSW_CMDID_IF_SET_FLOODING_METERING 0x039
-+#define DPSW_CMDID_IF_SET_METERING 0x03A
-+#define DPSW_CMDID_IF_SET_EARLY_DROP 0x03B
-+
-+#define DPSW_CMDID_IF_ENABLE 0x03D
-+#define DPSW_CMDID_IF_DISABLE 0x03E
-+
-+#define DPSW_CMDID_IF_GET_ATTR 0x042
-+
-+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH 0x044
-+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH 0x045
-+#define DPSW_CMDID_IF_GET_LINK_STATE 0x046
-+#define DPSW_CMDID_IF_SET_FLOODING 0x047
-+#define DPSW_CMDID_IF_SET_BROADCAST 0x048
-+#define DPSW_CMDID_IF_SET_MULTICAST 0x049
-+#define DPSW_CMDID_IF_GET_TCI 0x04A
-+
-+#define DPSW_CMDID_IF_SET_LINK_CFG 0x04C
-+
-+#define DPSW_CMDID_VLAN_ADD 0x060
-+#define DPSW_CMDID_VLAN_ADD_IF 0x061
-+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED 0x062
-+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING 0x063
-+#define DPSW_CMDID_VLAN_REMOVE_IF 0x064
-+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED 0x065
-+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING 0x066
-+#define DPSW_CMDID_VLAN_REMOVE 0x067
-+#define DPSW_CMDID_VLAN_GET_IF 0x068
-+#define DPSW_CMDID_VLAN_GET_IF_FLOODING 0x069
-+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED 0x06A
-+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES 0x06B
-+
-+#define DPSW_CMDID_FDB_GET_MULTICAST 0x080
-+#define DPSW_CMDID_FDB_GET_UNICAST 0x081
-+#define DPSW_CMDID_FDB_ADD 0x082
-+#define DPSW_CMDID_FDB_REMOVE 0x083
-+#define DPSW_CMDID_FDB_ADD_UNICAST 0x084
-+#define DPSW_CMDID_FDB_REMOVE_UNICAST 0x085
-+#define DPSW_CMDID_FDB_ADD_MULTICAST 0x086
-+#define DPSW_CMDID_FDB_REMOVE_MULTICAST 0x087
-+#define DPSW_CMDID_FDB_SET_LEARNING_MODE 0x088
-+#define DPSW_CMDID_FDB_GET_ATTR 0x089
-+
-+#define DPSW_CMDID_ACL_ADD 0x090
-+#define DPSW_CMDID_ACL_REMOVE 0x091
-+#define DPSW_CMDID_ACL_ADD_ENTRY 0x092
-+#define DPSW_CMDID_ACL_REMOVE_ENTRY 0x093
-+#define DPSW_CMDID_ACL_ADD_IF 0x094
-+#define DPSW_CMDID_ACL_REMOVE_IF 0x095
-+#define DPSW_CMDID_ACL_GET_ATTR 0x096
-+
-+#define DPSW_CMDID_CTRL_IF_GET_ATTR 0x0A0
-+#define DPSW_CMDID_CTRL_IF_SET_POOLS 0x0A1
-+#define DPSW_CMDID_CTRL_IF_ENABLE 0x0A2
-+#define DPSW_CMDID_CTRL_IF_DISABLE 0x0A3
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_OPEN(cmd, dpsw_id) \
-+ MC_CMD_OP(cmd, 0, 0, 32, int, dpsw_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_CREATE(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->num_ifs);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->adv.max_fdbs);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->adv.max_meters_per_if);\
-+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_component_type, \
-+ cfg->adv.component_type);\
-+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->adv.max_vlans);\
-+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, cfg->adv.max_fdb_entries);\
-+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->adv.fdb_aging_time);\
-+ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, cfg->adv.max_fdb_mc_groups);\
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->adv.options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_IS_ENABLED(cmd, en) \
-+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_GET_IRQ(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_GET_IRQ(cmd, type, irq_cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
-+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_GET_IRQ_MASK(cmd, irq_index) \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_GET_IRQ_MASK(cmd, mask) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_GET_IRQ_STATUS(cmd, status) \
-+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->num_ifs);\
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->max_fdbs);\
-+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->num_fdbs);\
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->max_vlans);\
-+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_vlans);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
-+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->max_fdb_entries);\
-+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->fdb_aging_time);\
-+ MC_RSP_OP(cmd, 2, 0, 32, int, attr->id);\
-+ MC_RSP_OP(cmd, 2, 32, 16, uint16_t, attr->mem_size);\
-+ MC_RSP_OP(cmd, 2, 48, 16, uint16_t, attr->max_fdb_mc_groups);\
-+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, attr->options);\
-+ MC_RSP_OP(cmd, 4, 0, 8, uint8_t, attr->max_meters_per_if);\
-+ MC_RSP_OP(cmd, 4, 8, 4, enum dpsw_component_type, \
-+ attr->component_type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_SET_REFLECTION_IF(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_FLOODING(cmd, if_id, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_BROADCAST(cmd, if_id, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_MULTICAST(cmd, if_id, en) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 1, int, en);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_TCI(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 12, uint16_t, cfg->vlan_id);\
-+ MC_CMD_OP(cmd, 0, 28, 1, uint8_t, cfg->dei);\
-+ MC_CMD_OP(cmd, 0, 29, 3, uint8_t, cfg->pcp);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_GET_TCI(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_IF_GET_TCI(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
-+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, cfg->dei);\
-+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, cfg->pcp);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_STP(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
-+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_stp_state, cfg->state);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_ACCEPTED_FRAMES(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_accepted_frames, cfg->type);\
-+ MC_CMD_OP(cmd, 0, 20, 4, enum dpsw_action, cfg->unaccept_act);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_ACCEPT_ALL_VLAN(cmd, if_id, accept_all) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 1, int, accept_all);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_GET_COUNTER(cmd, if_id, type) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_IF_GET_COUNTER(cmd, counter) \
-+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_COUNTER(cmd, if_id, type, counter) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 5, enum dpsw_counter, type);\
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, counter);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_TX_SELECTION(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 3, enum dpsw_priority_selector, \
-+ cfg->priority_selector);\
-+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->tc_id[0]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->tc_id[1]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->tc_id[2]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->tc_id[3]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->tc_id[4]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->tc_id[5]);\
-+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, cfg->tc_id[6]);\
-+ MC_CMD_OP(cmd, 1, 56, 8, uint8_t, cfg->tc_id[7]);\
-+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 2, 16, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[0].mode);\
-+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 2, 48, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[1].mode);\
-+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 3, 16, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[2].mode);\
-+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 3, 48, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[3].mode);\
-+ MC_CMD_OP(cmd, 4, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 4, 16, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[4].mode);\
-+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 4, 48, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[5].mode);\
-+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 5, 16, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[6].mode);\
-+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\
-+ MC_CMD_OP(cmd, 5, 48, 4, enum dpsw_schedule_mode, \
-+ cfg->tc_sched[7].mode);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_ADD_REFLECTION(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
-+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_REMOVE_REFLECTION(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->vlan_id);\
-+ MC_CMD_OP(cmd, 0, 32, 2, enum dpsw_reflection_filter, cfg->filter);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_FLOODING_METERING(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\
-+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_METERING(cmd, if_id, tc_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id);\
-+ MC_CMD_OP(cmd, 0, 24, 4, enum dpsw_metering_mode, cfg->mode);\
-+ MC_CMD_OP(cmd, 0, 28, 4, enum dpsw_metering_unit, cfg->units);\
-+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->cir);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->eir);\
-+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs);\
-+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->ebs);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_PREP_EARLY_DROP(ext, cfg) \
-+do { \
-+ MC_PREP_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \
-+ MC_PREP_OP(ext, 0, 2, 2, \
-+ enum dpsw_early_drop_unit, cfg->units); \
-+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
-+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
-+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
-+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
-+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
-+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
-+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_EXT_EARLY_DROP(ext, cfg) \
-+do { \
-+ MC_EXT_OP(ext, 0, 0, 2, enum dpsw_early_drop_mode, cfg->drop_mode); \
-+ MC_EXT_OP(ext, 0, 2, 2, \
-+ enum dpsw_early_drop_unit, cfg->units); \
-+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \
-+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \
-+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \
-+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \
-+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\
-+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \
-+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_EARLY_DROP(cmd, if_id, tc_id, early_drop_iova) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, if_id); \
-+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ADD_CUSTOM_TPID(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_REMOVE_CUSTOM_TPID(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->tpid)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_ENABLE(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_DISABLE(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_GET_ATTR(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_IF_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 0, 4, enum dpsw_accepted_frames, \
-+ attr->admit_untagged);\
-+ MC_RSP_OP(cmd, 0, 5, 1, int, attr->enabled);\
-+ MC_RSP_OP(cmd, 0, 6, 1, int, attr->accept_all_vlan);\
-+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->num_tcs);\
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qdid);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->rate);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_MAX_FRAME_LENGTH(cmd, if_id, frame_length) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, frame_length);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_GET_MAX_FRAME_LENGTH(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_IF_GET_MAX_FRAME_LENGTH(cmd, frame_length) \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, frame_length)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_SET_LINK_CFG(cmd, if_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id);\
-+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
-+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_IF_GET_LINK_STATE(cmd, if_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, if_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_IF_GET_LINK_STATE(cmd, state) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
-+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_ADD(cmd, vlan_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_ADD_IF(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_ADD_IF_UNTAGGED(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_ADD_IF_FLOODING(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+#define DPSW_CMD_VLAN_REMOVE_IF(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_REMOVE_IF_UNTAGGED(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_REMOVE_IF_FLOODING(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_REMOVE(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_GET_ATTR(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_VLAN_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->fdb_id); \
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_ifs); \
-+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_untagged_ifs); \
-+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, attr->num_flooding_ifs); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_GET_IF(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_VLAN_GET_IF(cmd, cfg) \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_GET_IF_FLOODING(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_VLAN_GET_IF_FLOODING(cmd, cfg) \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_VLAN_GET_IF_UNTAGGED(cmd, vlan_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, vlan_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_VLAN_GET_IF_UNTAGGED(cmd, cfg) \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs)
-+
-+/* param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_ADD(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->fdb_aging_time);\
-+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->num_fdb_entries);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_FDB_ADD(cmd, fdb_id) \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_REMOVE(cmd, fdb_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_ADD_UNICAST(cmd, fdb_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
-+ MC_CMD_OP(cmd, 1, 0, 8, uint16_t, cfg->if_egress);\
-+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_GET_UNICAST(cmd, fdb_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_FDB_GET_UNICAST(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\
-+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_REMOVE_UNICAST(cmd, fdb_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
-+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->if_egress);\
-+ MC_CMD_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_ADD_MULTICAST(cmd, fdb_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
-+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\
-+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_GET_MULTICAST(cmd, fdb_id) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_FDB_GET_MULTICAST(cmd, cfg) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, cfg->num_ifs);\
-+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_entry_type, cfg->type);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_REMOVE_MULTICAST(cmd, fdb_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs);\
-+ MC_CMD_OP(cmd, 0, 32, 4, enum dpsw_fdb_entry_type, cfg->type);\
-+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->mac_addr[5]);\
-+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->mac_addr[4]);\
-+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, cfg->mac_addr[3]);\
-+ MC_CMD_OP(cmd, 1, 24, 8, uint8_t, cfg->mac_addr[2]);\
-+ MC_CMD_OP(cmd, 1, 32, 8, uint8_t, cfg->mac_addr[1]);\
-+ MC_CMD_OP(cmd, 1, 40, 8, uint8_t, cfg->mac_addr[0]);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_SET_LEARNING_MODE(cmd, fdb_id, mode) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id);\
-+ MC_CMD_OP(cmd, 0, 16, 4, enum dpsw_fdb_learning_mode, mode);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_FDB_GET_ATTR(cmd, fdb_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, fdb_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_FDB_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, attr->max_fdb_entries);\
-+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->fdb_aging_time);\
-+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, attr->num_fdb_mc_groups);\
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_fdb_mc_groups);\
-+ MC_RSP_OP(cmd, 1, 16, 4, enum dpsw_fdb_learning_mode, \
-+ attr->learning_mode);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_ADD(cmd, cfg) \
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->max_entries)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_ACL_ADD(cmd, acl_id) \
-+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, acl_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_REMOVE(cmd, acl_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_PREP_ACL_ENTRY(ext, key) \
-+do { \
-+ MC_PREP_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\
-+ MC_PREP_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\
-+ MC_PREP_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\
-+ MC_PREP_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\
-+ MC_PREP_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\
-+ MC_PREP_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\
-+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\
-+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\
-+ MC_PREP_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\
-+ MC_PREP_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\
-+ MC_PREP_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\
-+ MC_PREP_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\
-+ MC_PREP_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\
-+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\
-+ MC_PREP_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\
-+ MC_PREP_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\
-+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\
-+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\
-+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\
-+ MC_PREP_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\
-+ MC_PREP_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\
-+ MC_PREP_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\
-+ MC_PREP_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\
-+ MC_PREP_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\
-+ MC_PREP_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\
-+ MC_PREP_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\
-+ MC_PREP_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\
-+ MC_PREP_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\
-+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\
-+ MC_PREP_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\
-+ MC_PREP_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\
-+ MC_PREP_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\
-+ MC_PREP_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\
-+ MC_PREP_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\
-+ MC_PREP_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\
-+ MC_PREP_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\
-+ MC_PREP_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\
-+ MC_PREP_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\
-+ MC_PREP_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\
-+ MC_PREP_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\
-+ MC_PREP_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\
-+ MC_PREP_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\
-+ MC_PREP_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\
-+ MC_PREP_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_EXT_ACL_ENTRY(ext, key) \
-+do { \
-+ MC_EXT_OP(ext, 0, 0, 8, uint8_t, key->match.l2_dest_mac[5]);\
-+ MC_EXT_OP(ext, 0, 8, 8, uint8_t, key->match.l2_dest_mac[4]);\
-+ MC_EXT_OP(ext, 0, 16, 8, uint8_t, key->match.l2_dest_mac[3]);\
-+ MC_EXT_OP(ext, 0, 24, 8, uint8_t, key->match.l2_dest_mac[2]);\
-+ MC_EXT_OP(ext, 0, 32, 8, uint8_t, key->match.l2_dest_mac[1]);\
-+ MC_EXT_OP(ext, 0, 40, 8, uint8_t, key->match.l2_dest_mac[0]);\
-+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, key->match.l2_tpid);\
-+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, key->match.l2_source_mac[5]);\
-+ MC_EXT_OP(ext, 1, 8, 8, uint8_t, key->match.l2_source_mac[4]);\
-+ MC_EXT_OP(ext, 1, 16, 8, uint8_t, key->match.l2_source_mac[3]);\
-+ MC_EXT_OP(ext, 1, 24, 8, uint8_t, key->match.l2_source_mac[2]);\
-+ MC_EXT_OP(ext, 1, 32, 8, uint8_t, key->match.l2_source_mac[1]);\
-+ MC_EXT_OP(ext, 1, 40, 8, uint8_t, key->match.l2_source_mac[0]);\
-+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, key->match.l2_vlan_id);\
-+ MC_EXT_OP(ext, 2, 0, 32, uint32_t, key->match.l3_dest_ip);\
-+ MC_EXT_OP(ext, 2, 32, 32, uint32_t, key->match.l3_source_ip);\
-+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, key->match.l4_dest_port);\
-+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, key->match.l4_source_port);\
-+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, key->match.l2_ether_type);\
-+ MC_EXT_OP(ext, 3, 48, 8, uint8_t, key->match.l2_pcp_dei);\
-+ MC_EXT_OP(ext, 3, 56, 8, uint8_t, key->match.l3_dscp);\
-+ MC_EXT_OP(ext, 4, 0, 8, uint8_t, key->mask.l2_dest_mac[5]);\
-+ MC_EXT_OP(ext, 4, 8, 8, uint8_t, key->mask.l2_dest_mac[4]);\
-+ MC_EXT_OP(ext, 4, 16, 8, uint8_t, key->mask.l2_dest_mac[3]);\
-+ MC_EXT_OP(ext, 4, 24, 8, uint8_t, key->mask.l2_dest_mac[2]);\
-+ MC_EXT_OP(ext, 4, 32, 8, uint8_t, key->mask.l2_dest_mac[1]);\
-+ MC_EXT_OP(ext, 4, 40, 8, uint8_t, key->mask.l2_dest_mac[0]);\
-+ MC_EXT_OP(ext, 4, 48, 16, uint16_t, key->mask.l2_tpid);\
-+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, key->mask.l2_source_mac[5]);\
-+ MC_EXT_OP(ext, 5, 8, 8, uint8_t, key->mask.l2_source_mac[4]);\
-+ MC_EXT_OP(ext, 5, 16, 8, uint8_t, key->mask.l2_source_mac[3]);\
-+ MC_EXT_OP(ext, 5, 24, 8, uint8_t, key->mask.l2_source_mac[2]);\
-+ MC_EXT_OP(ext, 5, 32, 8, uint8_t, key->mask.l2_source_mac[1]);\
-+ MC_EXT_OP(ext, 5, 40, 8, uint8_t, key->mask.l2_source_mac[0]);\
-+ MC_EXT_OP(ext, 5, 48, 16, uint16_t, key->mask.l2_vlan_id);\
-+ MC_EXT_OP(ext, 6, 0, 32, uint32_t, key->mask.l3_dest_ip);\
-+ MC_EXT_OP(ext, 6, 32, 32, uint32_t, key->mask.l3_source_ip);\
-+ MC_EXT_OP(ext, 7, 0, 16, uint16_t, key->mask.l4_dest_port);\
-+ MC_EXT_OP(ext, 7, 16, 16, uint16_t, key->mask.l4_source_port);\
-+ MC_EXT_OP(ext, 7, 32, 16, uint16_t, key->mask.l2_ether_type);\
-+ MC_EXT_OP(ext, 7, 48, 8, uint8_t, key->mask.l2_pcp_dei);\
-+ MC_EXT_OP(ext, 7, 56, 8, uint8_t, key->mask.l3_dscp);\
-+ MC_EXT_OP(ext, 8, 0, 8, uint8_t, key->match.l3_protocol);\
-+ MC_EXT_OP(ext, 8, 8, 8, uint8_t, key->mask.l3_protocol);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_ADD_ENTRY(cmd, acl_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\
-+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\
-+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\
-+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_REMOVE_ENTRY(cmd, acl_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->result.if_id);\
-+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->precedence);\
-+ MC_CMD_OP(cmd, 1, 0, 4, enum dpsw_acl_action, cfg->result.action);\
-+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_iova); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_ADD_IF(cmd, acl_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_REMOVE_IF(cmd, acl_id, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id);\
-+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->num_ifs); \
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_ACL_GET_ATTR(cmd, acl_id) \
-+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, acl_id)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_ACL_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->max_entries);\
-+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->num_entries);\
-+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, attr->num_ifs);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_RSP_CTRL_IF_GET_ATTR(cmd, attr) \
-+do { \
-+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->rx_fqid);\
-+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, attr->rx_err_fqid);\
-+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tx_err_conf_fqid);\
-+} while (0)
-+
-+/* cmd, param, offset, width, type, arg_name */
-+#define DPSW_CMD_CTRL_IF_SET_POOLS(cmd, cfg) \
-+do { \
-+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
-+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
-+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
-+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
-+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
-+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
-+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
-+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
-+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
-+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
-+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
-+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
-+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
-+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
-+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
-+} while (0)
-+
-+#endif /* __FSL_DPSW_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_mc_cmd.h b/drivers/net/dpaa2/mc/fsl_mc_cmd.h
-new file mode 100644
-index 0000000..ac4f2b4
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_mc_cmd.h
-@@ -0,0 +1,221 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_MC_CMD_H
-+#define __FSL_MC_CMD_H
-+
-+#define MC_CMD_NUM_OF_PARAMS 7
-+
-+#define MAKE_UMASK64(_width) \
-+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 :\
-+ (uint64_t)-1))
-+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
-+{
-+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
-+}
-+
-+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
-+{
-+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
-+}
-+
-+struct mc_command {
-+ uint64_t header;
-+ uint64_t params[MC_CMD_NUM_OF_PARAMS];
-+};
-+
-+/**
-+ * enum mc_cmd_status - indicates MC status at command response
-+ * @MC_CMD_STATUS_OK: Completed successfully
-+ * @MC_CMD_STATUS_READY: Ready to be processed
-+ * @MC_CMD_STATUS_AUTH_ERR: Authentication error
-+ * @MC_CMD_STATUS_NO_PRIVILEGE: No privilege
-+ * @MC_CMD_STATUS_DMA_ERR: DMA or I/O error
-+ * @MC_CMD_STATUS_CONFIG_ERR: Configuration error
-+ * @MC_CMD_STATUS_TIMEOUT: Operation timed out
-+ * @MC_CMD_STATUS_NO_RESOURCE: No resources
-+ * @MC_CMD_STATUS_NO_MEMORY: No memory available
-+ * @MC_CMD_STATUS_BUSY: Device is busy
-+ * @MC_CMD_STATUS_UNSUPPORTED_OP: Unsupported operation
-+ * @MC_CMD_STATUS_INVALID_STATE: Invalid state
-+ */
-+enum mc_cmd_status {
-+ MC_CMD_STATUS_OK = 0x0,
-+ MC_CMD_STATUS_READY = 0x1,
-+ MC_CMD_STATUS_AUTH_ERR = 0x3,
-+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4,
-+ MC_CMD_STATUS_DMA_ERR = 0x5,
-+ MC_CMD_STATUS_CONFIG_ERR = 0x6,
-+ MC_CMD_STATUS_TIMEOUT = 0x7,
-+ MC_CMD_STATUS_NO_RESOURCE = 0x8,
-+ MC_CMD_STATUS_NO_MEMORY = 0x9,
-+ MC_CMD_STATUS_BUSY = 0xA,
-+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB,
-+ MC_CMD_STATUS_INVALID_STATE = 0xC
-+};
-+
-+/* MC command flags */
-+
-+/**
-+ * High priority flag
-+ */
-+#define MC_CMD_FLAG_PRI 0x00008000
-+/**
-+ * Command completion flag
-+ */
-+#define MC_CMD_FLAG_INTR_DIS 0x01000000
-+
-+/**
-+ * Command ID field offset
-+ */
-+#define MC_CMD_HDR_CMDID_O 52
-+/**
-+ * Command ID field size
-+ */
-+#define MC_CMD_HDR_CMDID_S 12
-+/**
-+ * Token field offset
-+ */
-+#define MC_CMD_HDR_TOKEN_O 38
-+/**
-+ * Token field size
-+ */
-+#define MC_CMD_HDR_TOKEN_S 10
-+/**
-+ * Status field offset
-+ */
-+#define MC_CMD_HDR_STATUS_O 16
-+/**
-+ * Status field size
-+ */
-+#define MC_CMD_HDR_STATUS_S 8
-+/**
-+ * Flags field offset
-+ */
-+#define MC_CMD_HDR_FLAGS_O 0
-+/**
-+ * Flags field size
-+ */
-+#define MC_CMD_HDR_FLAGS_S 32
-+/**
-+ * Command flags mask
-+ */
-+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
-+
-+#define MC_CMD_HDR_READ_STATUS(_hdr) \
-+ ((enum mc_cmd_status)mc_dec((_hdr), \
-+ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
-+
-+#define MC_CMD_HDR_READ_TOKEN(_hdr) \
-+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
-+
-+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \
-+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg)))
-+
-+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
-+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width)))
-+
-+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
-+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
-+
-+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
-+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
-+
-+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
-+ uint32_t cmd_flags,
-+ uint16_t token)
-+{
-+ uint64_t hdr;
-+
-+ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
-+ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
-+ (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
-+ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
-+ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
-+ MC_CMD_STATUS_READY);
-+
-+ return hdr;
-+}
-+
-+/**
-+ * mc_write_command - writes a command to a Management Complex (MC) portal
-+ *
-+ * @portal: pointer to an MC portal
-+ * @cmd: pointer to a filled command
-+ */
-+static inline void mc_write_command(struct mc_command __iomem *portal,
-+ struct mc_command *cmd)
-+{
-+ int i;
-+ uint32_t word;
-+
-+ /* copy command parameters into the portal */
-+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-+ iowrite64(cmd->params[i], &portal->params[i]);
-+
-+ /* submit the command by writing the header */
-+ word = (uint32_t)mc_dec(cmd->header, 32, 32);
-+ iowrite32(word, (((uint32_t *)&portal->header) + 1));
-+
-+ word = (uint32_t)mc_dec(cmd->header, 0, 32);
-+ iowrite32(word, (uint32_t *)&portal->header);
-+}
-+
-+/**
-+ * mc_read_response - reads the response for the last MC command from a
-+ * Management Complex (MC) portal
-+ *
-+ * @portal: pointer to an MC portal
-+ * @resp: pointer to command response buffer
-+ *
-+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
-+ */
-+static inline enum mc_cmd_status mc_read_response(
-+ struct mc_command __iomem *portal,
-+ struct mc_command *resp)
-+{
-+ int i;
-+ enum mc_cmd_status status;
-+
-+ /* Copy command response header from MC portal: */
-+ resp->header = ioread64(&portal->header);
-+ status = MC_CMD_HDR_READ_STATUS(resp->header);
-+ if (status != MC_CMD_STATUS_OK)
-+ return status;
-+
-+ /* Copy command response data from MC portal: */
-+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-+ resp->params[i] = ioread64(&portal->params[i]);
-+
-+ return status;
-+}
-+
-+#endif /* __FSL_MC_CMD_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_mc_sys.h b/drivers/net/dpaa2/mc/fsl_mc_sys.h
-new file mode 100644
-index 0000000..769c129
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_mc_sys.h
-@@ -0,0 +1,95 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_MC_SYS_H
-+#define _FSL_MC_SYS_H
-+
-+#ifdef __linux_driver__
-+
-+#include <linux/errno.h>
-+#include <asm/io.h>
-+#include <linux/slab.h>
-+
-+struct fsl_mc_io {
-+ void *regs;
-+};
-+
-+#ifndef ENOTSUP
-+#define ENOTSUP 95
-+#endif
-+
-+#define ioread64(_p) readq(_p)
-+#define iowrite64(_v, _p) writeq(_v, _p)
-+
-+#else /* __linux_driver__ */
-+
-+#include <stdio.h>
-+#include <libio.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <sys/uio.h>
-+#include <linux/byteorder/little_endian.h>
-+
-+#define cpu_to_le64(x) __cpu_to_le64(x)
-+#ifndef dmb
-+#define dmb() __asm__ __volatile__ ("" : : : "memory")
-+#endif
-+#define __iormb() dmb()
-+#define __iowmb() dmb()
-+#define __arch_getq(a) (*(volatile unsigned long *)(a))
-+#define __arch_putq(v, a) (*(volatile unsigned long *)(a) = (v))
-+#define __arch_putq32(v, a) (*(volatile unsigned int *)(a) = (v))
-+#define readq(c) ({ uint64_t __v = __arch_getq(c); __iormb(); __v; })
-+#define writeq(v, c) ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; })
-+#define writeq32(v, c) ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; })
-+#define ioread64(_p) readq(_p)
-+#define iowrite64(_v, _p) writeq(_v, _p)
-+#define iowrite32(_v, _p) writeq32(_v, _p)
-+#define __iomem
-+
-+struct fsl_mc_io {
-+ void *regs;
-+};
-+
-+#ifndef ENOTSUP
-+#define ENOTSUP 95
-+#endif
-+
-+/*GPP is supposed to use MC commands with low priority*/
-+#define CMD_PRI_LOW 0 /*!< Low Priority command indication */
-+
-+struct mc_command;
-+
-+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
-+
-+#endif /* __linux_driver__ */
-+
-+#endif /* _FSL_MC_SYS_H */
-diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
-new file mode 100644
-index 0000000..43825b8
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/fsl_net.h
-@@ -0,0 +1,480 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_NET_H
-+#define __FSL_NET_H
-+
-+#define LAST_HDR_INDEX 0xFFFFFFFF
-+
-+/*****************************************************************************/
-+/* Protocol fields */
-+/*****************************************************************************/
-+
-+/************************* Ethernet fields *********************************/
-+#define NH_FLD_ETH_DA (1)
-+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
-+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
-+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
-+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
-+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
-+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
-+
-+#define NH_FLD_ETH_ADDR_SIZE 6
-+
-+/*************************** VLAN fields ***********************************/
-+#define NH_FLD_VLAN_VPRI (1)
-+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
-+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
-+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
-+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
-+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
-+
-+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
-+ NH_FLD_VLAN_CFI | \
-+ NH_FLD_VLAN_VID)
-+
-+/************************ IP (generic) fields ******************************/
-+#define NH_FLD_IP_VER (1)
-+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
-+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
-+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
-+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
-+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
-+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
-+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
-+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
-+
-+#define NH_FLD_IP_PROTO_SIZE 1
-+
-+/***************************** IPV4 fields *********************************/
-+#define NH_FLD_IPV4_VER (1)
-+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
-+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
-+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
-+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
-+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
-+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
-+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
-+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
-+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
-+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
-+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
-+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
-+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
-+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
-+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
-+
-+#define NH_FLD_IPV4_ADDR_SIZE 4
-+#define NH_FLD_IPV4_PROTO_SIZE 1
-+
-+/***************************** IPV6 fields *********************************/
-+#define NH_FLD_IPV6_VER (1)
-+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
-+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
-+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
-+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
-+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
-+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
-+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
-+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
-+
-+#define NH_FLD_IPV6_ADDR_SIZE 16
-+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
-+
-+/***************************** ICMP fields *********************************/
-+#define NH_FLD_ICMP_TYPE (1)
-+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
-+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
-+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
-+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
-+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
-+
-+#define NH_FLD_ICMP_CODE_SIZE 1
-+#define NH_FLD_ICMP_TYPE_SIZE 1
-+
-+/***************************** IGMP fields *********************************/
-+#define NH_FLD_IGMP_VERSION (1)
-+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
-+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
-+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
-+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
-+
-+/***************************** TCP fields **********************************/
-+#define NH_FLD_TCP_PORT_SRC (1)
-+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
-+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
-+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
-+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
-+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
-+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
-+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
-+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
-+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
-+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
-+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
-+
-+#define NH_FLD_TCP_PORT_SIZE 2
-+
-+/***************************** UDP fields **********************************/
-+#define NH_FLD_UDP_PORT_SRC (1)
-+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
-+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
-+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
-+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
-+
-+#define NH_FLD_UDP_PORT_SIZE 2
-+
-+/*************************** UDP-lite fields *******************************/
-+#define NH_FLD_UDP_LITE_PORT_SRC (1)
-+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
-+#define NH_FLD_UDP_LITE_ALL_FIELDS \
-+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
-+
-+#define NH_FLD_UDP_LITE_PORT_SIZE 2
-+
-+/*************************** UDP-encap-ESP fields **************************/
-+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
-+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
-+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
-+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
-+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
-+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
-+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
-+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
-+
-+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
-+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
-+
-+/***************************** SCTP fields *********************************/
-+#define NH_FLD_SCTP_PORT_SRC (1)
-+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
-+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
-+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
-+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
-+
-+#define NH_FLD_SCTP_PORT_SIZE 2
-+
-+/***************************** DCCP fields *********************************/
-+#define NH_FLD_DCCP_PORT_SRC (1)
-+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
-+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
-+
-+#define NH_FLD_DCCP_PORT_SIZE 2
-+
-+/***************************** IPHC fields *********************************/
-+#define NH_FLD_IPHC_CID (1)
-+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
-+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
-+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
-+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
-+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
-+
-+/***************************** SCTP fields *********************************/
-+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
-+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
-+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
-+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
-+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
-+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
-+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
-+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
-+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
-+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
-+
-+/*************************** L2TPV2 fields *********************************/
-+#define NH_FLD_L2TPV2_TYPE_BIT (1)
-+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
-+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
-+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
-+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
-+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
-+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
-+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
-+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
-+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
-+#define NH_FLD_L2TPV2_ALL_FIELDS \
-+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
-+
-+/*************************** L2TPV3 fields *********************************/
-+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
-+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
-+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
-+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
-+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
-+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
-+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
-+
-+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
-+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
-+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
-+
-+/**************************** PPP fields ***********************************/
-+#define NH_FLD_PPP_PID (1)
-+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
-+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
-+
-+/************************** PPPoE fields ***********************************/
-+#define NH_FLD_PPPOE_VER (1)
-+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
-+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
-+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
-+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
-+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
-+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
-+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
-+
-+/************************* PPP-Mux fields **********************************/
-+#define NH_FLD_PPPMUX_PID (1)
-+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
-+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
-+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
-+
-+/*********************** PPP-Mux sub-frame fields **************************/
-+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
-+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
-+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
-+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
-+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
-+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
-+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
-+
-+/*************************** LLC fields ************************************/
-+#define NH_FLD_LLC_DSAP (1)
-+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
-+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
-+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
-+
-+/*************************** NLPID fields **********************************/
-+#define NH_FLD_NLPID_NLPID (1)
-+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
-+
-+/*************************** SNAP fields ***********************************/
-+#define NH_FLD_SNAP_OUI (1)
-+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
-+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
-+
-+/*************************** LLC SNAP fields *******************************/
-+#define NH_FLD_LLC_SNAP_TYPE (1)
-+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
-+
-+#define NH_FLD_ARP_HTYPE (1)
-+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
-+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
-+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
-+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
-+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
-+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
-+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
-+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
-+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
-+
-+/*************************** RFC2684 fields ********************************/
-+#define NH_FLD_RFC2684_LLC (1)
-+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
-+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
-+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
-+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
-+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
-+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
-+
-+/*************************** User defined fields ***************************/
-+#define NH_FLD_USER_DEFINED_SRCPORT (1)
-+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
-+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
-+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
-+
-+/*************************** Payload fields ********************************/
-+#define NH_FLD_PAYLOAD_BUFFER (1)
-+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
-+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
-+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
-+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
-+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
-+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
-+
-+/*************************** GRE fields ************************************/
-+#define NH_FLD_GRE_TYPE (1)
-+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
-+
-+/*************************** MINENCAP fields *******************************/
-+#define NH_FLD_MINENCAP_SRC_IP (1)
-+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
-+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
-+#define NH_FLD_MINENCAP_ALL_FIELDS \
-+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
-+
-+/*************************** IPSEC AH fields *******************************/
-+#define NH_FLD_IPSEC_AH_SPI (1)
-+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
-+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
-+
-+/*************************** IPSEC ESP fields ******************************/
-+#define NH_FLD_IPSEC_ESP_SPI (1)
-+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
-+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
-+
-+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
-+
-+/*************************** MPLS fields ***********************************/
-+#define NH_FLD_MPLS_LABEL_STACK (1)
-+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
-+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
-+
-+/*************************** MACSEC fields *********************************/
-+#define NH_FLD_MACSEC_SECTAG (1)
-+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
-+
-+/*************************** GTP fields ************************************/
-+#define NH_FLD_GTP_TEID (1)
-+
-+/* Protocol options */
-+
-+/* Ethernet options */
-+#define NH_OPT_ETH_BROADCAST 1
-+#define NH_OPT_ETH_MULTICAST 2
-+#define NH_OPT_ETH_UNICAST 3
-+#define NH_OPT_ETH_BPDU 4
-+
-+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
-+/* also applicable for broadcast */
-+
-+/* VLAN options */
-+#define NH_OPT_VLAN_CFI 1
-+
-+/* IPV4 options */
-+#define NH_OPT_IPV4_UNICAST 1
-+#define NH_OPT_IPV4_MULTICAST 2
-+#define NH_OPT_IPV4_BROADCAST 3
-+#define NH_OPT_IPV4_OPTION 4
-+#define NH_OPT_IPV4_FRAG 5
-+#define NH_OPT_IPV4_INITIAL_FRAG 6
-+
-+/* IPV6 options */
-+#define NH_OPT_IPV6_UNICAST 1
-+#define NH_OPT_IPV6_MULTICAST 2
-+#define NH_OPT_IPV6_OPTION 3
-+#define NH_OPT_IPV6_FRAG 4
-+#define NH_OPT_IPV6_INITIAL_FRAG 5
-+
-+/* General IP options (may be used for any version) */
-+#define NH_OPT_IP_FRAG 1
-+#define NH_OPT_IP_INITIAL_FRAG 2
-+#define NH_OPT_IP_OPTION 3
-+
-+/* Minenc. options */
-+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
-+
-+/* GRE. options */
-+#define NH_OPT_GRE_ROUTING_PRESENT 1
-+
-+/* TCP options */
-+#define NH_OPT_TCP_OPTIONS 1
-+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
-+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
-+
-+/* CAPWAP options */
-+#define NH_OPT_CAPWAP_DTLS 1
-+
-+enum net_prot {
-+ NET_PROT_NONE = 0,
-+ NET_PROT_PAYLOAD,
-+ NET_PROT_ETH,
-+ NET_PROT_VLAN,
-+ NET_PROT_IPV4,
-+ NET_PROT_IPV6,
-+ NET_PROT_IP,
-+ NET_PROT_TCP,
-+ NET_PROT_UDP,
-+ NET_PROT_UDP_LITE,
-+ NET_PROT_IPHC,
-+ NET_PROT_SCTP,
-+ NET_PROT_SCTP_CHUNK_DATA,
-+ NET_PROT_PPPOE,
-+ NET_PROT_PPP,
-+ NET_PROT_PPPMUX,
-+ NET_PROT_PPPMUX_SUBFRM,
-+ NET_PROT_L2TPV2,
-+ NET_PROT_L2TPV3_CTRL,
-+ NET_PROT_L2TPV3_SESS,
-+ NET_PROT_LLC,
-+ NET_PROT_LLC_SNAP,
-+ NET_PROT_NLPID,
-+ NET_PROT_SNAP,
-+ NET_PROT_MPLS,
-+ NET_PROT_IPSEC_AH,
-+ NET_PROT_IPSEC_ESP,
-+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
-+ NET_PROT_MACSEC,
-+ NET_PROT_GRE,
-+ NET_PROT_MINENCAP,
-+ NET_PROT_DCCP,
-+ NET_PROT_ICMP,
-+ NET_PROT_IGMP,
-+ NET_PROT_ARP,
-+ NET_PROT_CAPWAP_DATA,
-+ NET_PROT_CAPWAP_CTRL,
-+ NET_PROT_RFC2684,
-+ NET_PROT_ICMPV6,
-+ NET_PROT_FCOE,
-+ NET_PROT_FIP,
-+ NET_PROT_ISCSI,
-+ NET_PROT_GTP,
-+ NET_PROT_USER_DEFINED_L2,
-+ NET_PROT_USER_DEFINED_L3,
-+ NET_PROT_USER_DEFINED_L4,
-+ NET_PROT_USER_DEFINED_L5,
-+ NET_PROT_USER_DEFINED_SHIM1,
-+ NET_PROT_USER_DEFINED_SHIM2,
-+
-+ NET_PROT_DUMMY_LAST
-+};
-+
-+/*! IEEE8021.Q */
-+#define NH_IEEE8021Q_ETYPE 0x8100
-+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
-+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \
-+ (((uint32_t)(pcp & 0x07)) << 13) | \
-+ (((uint32_t)(dei & 0x01)) << 12) | \
-+ (((uint32_t)(vlan_id & 0xFFF))))
-+
-+#endif /* __FSL_NET_H */
-diff --git a/drivers/net/dpaa2/mc/mc_sys.c b/drivers/net/dpaa2/mc/mc_sys.c
-new file mode 100644
-index 0000000..0a88cad
---- /dev/null
-+++ b/drivers/net/dpaa2/mc/mc_sys.c
-@@ -0,0 +1,129 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <fsl_mc_sys.h>
-+#include <fsl_mc_cmd.h>
-+
-+/* ODP framework using MC poratl in shared mode. Following
-+ changes to introduce Locks must be maintained while
-+ merging the FLIB.
-+*/
-+
-+/**
-+* The mc_spinlock_t type.
-+*/
-+typedef struct {
-+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
-+} mc_spinlock_t;
-+
-+/**
-+* A static spinlock initializer.
-+*/
-+static mc_spinlock_t mc_portal_lock = { 0 };
-+
-+static inline void mc_pause(void) {}
-+
-+static inline void mc_spinlock_lock(mc_spinlock_t *sl)
-+{
-+ while (__sync_lock_test_and_set(&sl->locked, 1))
-+ while (sl->locked)
-+ mc_pause();
-+}
-+
-+static inline void mc_spinlock_unlock(mc_spinlock_t *sl)
-+{
-+ __sync_lock_release(&sl->locked);
-+}
-+
-+
-+static int mc_status_to_error(enum mc_cmd_status status)
-+{
-+ switch (status) {
-+ case MC_CMD_STATUS_OK:
-+ return 0;
-+ case MC_CMD_STATUS_AUTH_ERR:
-+ return -EACCES; /* Token error */
-+ case MC_CMD_STATUS_NO_PRIVILEGE:
-+ return -EPERM; /* Permission denied */
-+ case MC_CMD_STATUS_DMA_ERR:
-+ return -EIO; /* Input/Output error */
-+ case MC_CMD_STATUS_CONFIG_ERR:
-+ return -EINVAL; /* Device not configured */
-+ case MC_CMD_STATUS_TIMEOUT:
-+ return -ETIMEDOUT; /* Operation timed out */
-+ case MC_CMD_STATUS_NO_RESOURCE:
-+ return -ENAVAIL; /* Resource temporarily unavailable */
-+ case MC_CMD_STATUS_NO_MEMORY:
-+ return -ENOMEM; /* Cannot allocate memory */
-+ case MC_CMD_STATUS_BUSY:
-+ return -EBUSY; /* Device busy */
-+ case MC_CMD_STATUS_UNSUPPORTED_OP:
-+ return -ENOTSUP; /* Operation not supported by device */
-+ case MC_CMD_STATUS_INVALID_STATE:
-+ return -ENODEV; /* Invalid device state */
-+ default:
-+ break;
-+ }
-+
-+ /* Not expected to reach here */
-+ return -EINVAL;
-+}
-+
-+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
-+{
-+ enum mc_cmd_status status;
-+
-+ if (!mc_io || !mc_io->regs)
-+ return -EACCES;
-+
-+ /* --- Call lock function here in case portal is shared --- */
-+ mc_spinlock_lock(&mc_portal_lock);
-+
-+ mc_write_command(mc_io->regs, cmd);
-+
-+ /* Spin until status changes */
-+ do {
-+ status = MC_CMD_HDR_READ_STATUS(ioread64(mc_io->regs));
-+
-+ /* --- Call wait function here to prevent blocking ---
-+ * Change the loop condition accordingly to exit on timeout.
-+ */
-+ } while (status == MC_CMD_STATUS_READY);
-+
-+ /* Read the response back into the command buffer */
-+ mc_read_response(mc_io->regs, cmd);
-+
-+ /* --- Call unlock function here in case portal is shared --- */
-+ mc_spinlock_unlock(&mc_portal_lock);
-+
-+ return mc_status_to_error(status);
-+}
-+
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.c b/drivers/net/dpaa2/qbman/driver/qbman_debug.c
-new file mode 100644
-index 0000000..e205681
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.c
-@@ -0,0 +1,926 @@
-+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qbman_portal.h"
-+#include "qbman_debug.h"
-+#include <drivers/fsl_qbman_portal.h>
-+
-+/* QBMan portal management command code */
-+#define QBMAN_BP_QUERY 0x32
-+#define QBMAN_FQ_QUERY 0x44
-+#define QBMAN_FQ_QUERY_NP 0x45
-+#define QBMAN_WQ_QUERY 0x47
-+#define QBMAN_CGR_QUERY 0x51
-+#define QBMAN_WRED_QUERY 0x54
-+#define QBMAN_CGR_STAT_QUERY 0x55
-+#define QBMAN_CGR_STAT_QUERY_CLR 0x56
-+
-+enum qbman_attr_usage_e {
-+ qbman_attr_usage_fq,
-+ qbman_attr_usage_bpool,
-+ qbman_attr_usage_cgr,
-+ qbman_attr_usage_wqchan
-+};
-+
-+struct int_qbman_attr {
-+ uint32_t words[32];
-+ enum qbman_attr_usage_e usage;
-+};
-+
-+#define attr_type_set(a, e) \
-+{ \
-+ struct qbman_attr *__attr = a; \
-+ enum qbman_attr_usage_e __usage = e; \
-+ ((struct int_qbman_attr *)__attr)->usage = __usage; \
-+}
-+
-+#define ATTR32(d) (&(d)->dont_manipulate_directly[0])
-+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16])
-+
-+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16);
-+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1);
-+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1);
-+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1);
-+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16);
-+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16);
-+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16);
-+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16);
-+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16);
-+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16);
-+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14);
-+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15);
-+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1);
-+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32);
-+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32);
-+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32);
-+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32);
-+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16);
-+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3);
-+static struct qb_attr_code code_bp_fill = QB_CODE(2 , 0, 32);
-+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32);
-+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8);
-+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 8, 8);
-+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 16, 8);
-+
-+static void qbman_bp_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_bpool);
-+}
-+
-+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
-+ struct qbman_attr *a)
-+{
-+ uint32_t *p;
-+ uint32_t rslt;
-+ uint32_t *attr = ATTR32(a);
-+
-+ qbman_bp_attr_clear(a);
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ /* Encode the caller-provided attributes */
-+ qb_attr_code_encode(&code_bp_bpid, p, bpid);
-+
-+ /* Complete the management command */
-+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_BP_QUERY);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt);
-+ return -EIO;
-+ }
-+
-+ /* For the query, word[0] of the result contains only the
-+ * verb/rslt fields, so skip word[0].
-+ */
-+ word_copy(&attr[1], &p[1], 15);
-+ return 0;
-+}
-+
-+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p);
-+ *va = !!qb_attr_code_decode(&code_bp_va, p);
-+ *wae = !!qb_attr_code_decode(&code_bp_wae, p);
-+}
-+
-+static uint32_t qbman_bp_thresh_to_value(uint32_t val)
-+{
-+ return (val & 0xff) << ((val & 0xf00) >> 8);
-+}
-+
-+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet,
-+ p));
-+}
-+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt,
-+ p));
-+}
-+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet,
-+ p));
-+}
-+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt,
-+ p));
-+}
-+
-+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset,
-+ p));
-+}
-+
-+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt,
-+ p));
-+}
-+
-+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p);
-+}
-+
-+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *icid = qb_attr_code_decode(&code_bp_icid, p);
-+ *pl = !!qb_attr_code_decode(&code_bp_pl, p);
-+}
-+
-+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi,
-+ p) << 32) |
-+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo,
-+ p);
-+}
-+
-+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p)
-+ << 32) |
-+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo,
-+ p);
-+}
-+
-+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p);
-+}
-+
-+int qbman_bp_info_has_free_bufs(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1);
-+}
-+
-+int qbman_bp_info_is_depleted(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2);
-+}
-+
-+int qbman_bp_info_is_surplus(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4);
-+}
-+
-+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return qb_attr_code_decode(&code_bp_fill, p);
-+}
-+
-+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return qb_attr_code_decode(&code_bp_hdptr, p);
-+}
-+
-+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return qb_attr_code_decode(&code_bp_sdcnt, p);
-+}
-+
-+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return qb_attr_code_decode(&code_bp_hdcnt, p);
-+}
-+
-+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a)
-+{
-+ uint32_t *p = ATTR32(a);
-+
-+ return qb_attr_code_decode(&code_bp_sscnt, p);
-+}
-+
-+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24);
-+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16);
-+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15);
-+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8);
-+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15);
-+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13);
-+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12);
-+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1);
-+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1);
-+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1);
-+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1);
-+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1);
-+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1);
-+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32);
-+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32);
-+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15);
-+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1);
-+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24);
-+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24);
-+
-+static void qbman_fq_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_fq);
-+}
-+
-+/* FQ query function for programmable fields */
-+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc)
-+{
-+ uint32_t *p;
-+ uint32_t rslt;
-+ uint32_t *d = ATTR32(desc);
-+
-+ qbman_fq_attr_clear(desc);
-+
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+ qb_attr_code_encode(&code_fq_fqid, p, fqid);
-+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n",
-+ fqid, rslt);
-+ return -EIO;
-+ }
-+ /* For the configure, word[0] of the command contains only the WE-mask.
-+ * For the query, word[0] of the result contains only the verb/rslt
-+ * fields. Skip word[0] in the latter case. */
-+ word_copy(&d[1], &p[1], 15);
-+ return 0;
-+}
-+
-+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p);
-+}
-+
-+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p);
-+}
-+
-+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *destwq = qb_attr_code_decode(&code_fq_destwq, p);
-+}
-+
-+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *icscred = qb_attr_code_decode(&code_fq_icscred, p);
-+}
-+
-+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5);
-+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8);
-+static uint32_t qbman_thresh_to_value(uint32_t val)
-+{
-+ uint32_t m, e;
-+
-+ m = qb_attr_code_decode(&code_tdthresh_mant, &val);
-+ e = qb_attr_code_decode(&code_tdthresh_exp, &val);
-+ return m << e;
-+}
-+
-+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh,
-+ p));
-+}
-+
-+void qbman_fq_attr_get_oa(struct qbman_attr *d,
-+ int *oa_ics, int *oa_cgr, int32_t *oa_len)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p);
-+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p);
-+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len,
-+ qb_attr_code_decode(&code_fq_oa_len, p));
-+}
-+
-+void qbman_fq_attr_get_mctl(struct qbman_attr *d,
-+ int *bdi, int *ff, int *va, int *ps)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p);
-+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p);
-+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p);
-+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p);
-+}
-+
-+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p);
-+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p);
-+}
-+
-+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *icid = qb_attr_code_decode(&code_fq_icid, p);
-+ *pl = !!qb_attr_code_decode(&code_fq_pl, p);
-+}
-+
-+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p);
-+}
-+
-+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid)
-+{
-+ uint32_t *p = ATTR32(d);
-+
-+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p);
-+}
-+
-+/* Query FQ Non-Programmalbe Fields */
-+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3);
-+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1);
-+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1);
-+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1);
-+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1);
-+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24);
-+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32);
-+
-+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
-+ struct qbman_attr *state)
-+{
-+ uint32_t *p;
-+ uint32_t rslt;
-+ uint32_t *d = ATTR32(state);
-+
-+ qbman_fq_attr_clear(state);
-+
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+ qb_attr_code_encode(&code_fq_fqid, p, fqid);
-+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_FQ_QUERY_NP);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
-+ fqid, rslt);
-+ return -EIO;
-+ }
-+ word_copy(&d[0], &p[0], 16);
-+ return 0;
-+}
-+
-+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return qb_attr_code_decode(&code_fq_np_state, p);
-+}
-+
-+int qbman_fq_state_force_eligible(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return !!qb_attr_code_decode(&code_fq_np_fe, p);
-+}
-+
-+int qbman_fq_state_xoff(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return !!qb_attr_code_decode(&code_fq_np_x, p);
-+}
-+
-+int qbman_fq_state_retirement_pending(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return !!qb_attr_code_decode(&code_fq_np_r, p);
-+}
-+
-+int qbman_fq_state_overflow_error(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return !!qb_attr_code_decode(&code_fq_np_oe, p);
-+}
-+
-+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p);
-+}
-+
-+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state)
-+{
-+ const uint32_t *p = ATTR32(state);
-+
-+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p);
-+}
-+
-+/* Query CGR */
-+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16);
-+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1);
-+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1);
-+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1);
-+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2);
-+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1);
-+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1);
-+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1);
-+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1);
-+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1);
-+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1);
-+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1);
-+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1);
-+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5);
-+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1);
-+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13);
-+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13);
-+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13);
-+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16);
-+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16);
-+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16);
-+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15);
-+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1);
-+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32);
-+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32);
-+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32);
-+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32);
-+
-+static void qbman_cgr_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_cgr);
-+}
-+
-+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr)
-+{
-+ uint32_t *p;
-+ uint32_t verb, rslt;
-+ uint32_t *d[2];
-+ int i;
-+ uint32_t query_verb;
-+
-+ d[0] = ATTR32(attr);
-+ d[1] = ATTR32_1(attr);
-+
-+ qbman_cgr_attr_clear(attr);
-+
-+ for (i = 0; i < 2; i++) {
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY;
-+
-+ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
-+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
-+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(verb != query_verb);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query CGID 0x%x failed,", cgid);
-+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt);
-+ return -EIO;
-+ }
-+ /* For the configure, word[0] of the command contains only the
-+ * verb/cgid. For the query, word[0] of the result contains
-+ * only the verb/rslt fields. Skip word[0] in the latter case.
-+ */
-+ word_copy(&d[i][1], &p[1], 15);
-+ }
-+ return 0;
-+}
-+
-+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
-+ int *cscn_wq_en_exit, int *cscn_wq_icd)
-+ {
-+ uint32_t *p = ATTR32(d);
-+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter,
-+ p);
-+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p);
-+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p);
-+}
-+
-+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode,
-+ int *rej_cnt_mode, int *cscn_bdi)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *mode = qb_attr_code_decode(&code_cgr_mode, p);
-+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p);
-+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p);
-+}
-+
-+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
-+ int *cscn_wr_en_exit, int *cg_wr_ae,
-+ int *cscn_dcp_en, int *cg_wr_va)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter,
-+ p);
-+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p);
-+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p);
-+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p);
-+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p);
-+}
-+
-+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
-+ uint32_t *i_cnt_wr_bnd)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p);
-+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p);
-+}
-+
-+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p);
-+}
-+
-+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode(
-+ &code_cgr_cs_thres, p));
-+}
-+
-+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
-+ uint32_t *cs_thres_x)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode(
-+ &code_cgr_cs_thres_x, p));
-+}
-+
-+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode(
-+ &code_cgr_td_thres, p));
-+}
-+
-+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p);
-+}
-+
-+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p);
-+}
-+
-+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
-+ uint32_t *cscn_vcgid)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p);
-+}
-+
-+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid,
-+ int *pl)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p);
-+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p);
-+}
-+
-+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
-+ uint64_t *cg_wr_addr)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi,
-+ p) << 32) |
-+ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo,
-+ p);
-+}
-+
-+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx)
-+{
-+ uint32_t *p = ATTR32(d);
-+ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p)
-+ << 32) |
-+ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p);
-+}
-+
-+#define WRED_EDP_WORD(n) (18 + n/4)
-+#define WRED_EDP_OFFSET(n) (8 * (n % 4))
-+#define WRED_PARM_DP_WORD(n) (n + 20)
-+#define WRED_WE_EDP(n) (16 + n * 2)
-+#define WRED_WE_PARM_DP(n) (17 + n * 2)
-+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx,
-+ int *edp)
-+{
-+ uint32_t *p = ATTR32(d);
-+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx),
-+ WRED_EDP_OFFSET(idx), 8);
-+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p);
-+}
-+
-+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
-+ uint64_t *maxth, uint8_t *maxp)
-+{
-+ uint8_t ma, mn, step_i, step_s, pn;
-+
-+ ma = (uint8_t)(dp >> 24);
-+ mn = (uint8_t)(dp >> 19) & 0x1f;
-+ step_i = (uint8_t)(dp >> 11);
-+ step_s = (uint8_t)(dp >> 6) & 0x1f;
-+ pn = (uint8_t)dp & 0x3f;
-+
-+ *maxp = (uint8_t)(((pn<<2) * 100)/256);
-+
-+ if (mn == 0)
-+ *maxth = ma;
-+ else
-+ *maxth = ((ma+256) * (1<<(mn-1)));
-+
-+ if (step_s == 0)
-+ *minth = *maxth - step_i;
-+ else
-+ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1));
-+}
-+
-+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx,
-+ uint32_t *dp)
-+{
-+ uint32_t *p = ATTR32(d);
-+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx),
-+ 0, 8);
-+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p);
-+}
-+
-+/* Query CGR/CCGR/CQ statistics */
-+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32);
-+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32);
-+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8);
-+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32);
-+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16);
-+static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid,
-+ int clear, uint32_t command_type,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt)
-+{
-+ uint32_t *p;
-+ uint32_t verb, rslt;
-+ uint32_t query_verb;
-+ uint32_t hi, lo;
-+
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
-+ if (command_type < 2)
-+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type);
-+ query_verb = clear ?
-+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY;
-+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
-+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(verb != query_verb);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query statistics of CGID 0x%x failed,", cgid);
-+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt);
-+ return -EIO;
-+ }
-+
-+ if (*frame_cnt) {
-+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p);
-+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p);
-+ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo;
-+ }
-+ if (*byte_cnt) {
-+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p);
-+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p);
-+ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo;
-+ }
-+
-+ return 0;
-+}
-+
-+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt)
-+{
-+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff,
-+ frame_cnt, byte_cnt);
-+}
-+
-+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt)
-+{
-+ return qbman_cgr_statistics_query(s, cgid, clear, 1,
-+ frame_cnt, byte_cnt);
-+}
-+
-+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt)
-+{
-+ return qbman_cgr_statistics_query(s, cgid, clear, 0,
-+ frame_cnt, byte_cnt);
-+}
-+
-+/* WQ Chan Query */
-+static struct qb_attr_code code_wqchan_chanid = QB_CODE(0, 16, 16);
-+static struct qb_attr_code code_wqchan_cdan_ctx_lo = QB_CODE(2, 0, 32);
-+static struct qb_attr_code code_wqchan_cdan_ctx_hi = QB_CODE(3, 0, 32);
-+static struct qb_attr_code code_wqchan_cdan_wqid = QB_CODE(1, 16, 16);
-+static struct qb_attr_code code_wqchan_ctrl = QB_CODE(1, 8, 8);
-+
-+static void qbman_wqchan_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_wqchan);
-+}
-+
-+int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
-+ struct qbman_attr *a)
-+{
-+ uint32_t *p;
-+ uint32_t rslt;
-+ uint32_t *attr = ATTR32(a);
-+
-+ qbman_wqchan_attr_clear(a);
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ /* Encode the caller-provided attributes */
-+ qb_attr_code_encode(&code_wqchan_chanid, p, chanid);
-+
-+ /* Complete the management command */
-+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQ_QUERY);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p); != QBMAN_WQ_QUERY);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query of WQCHAN 0x%x failed, code=0x%02x\n",
-+ chanid, rslt);
-+ return -EIO;
-+ }
-+
-+ /* For the query, word[0] of the result contains only the
-+ * verb/rslt fields, so skip word[0].
-+ */
-+ word_copy(&attr[1], &p[1], 15);
-+ return 0;
-+}
-+
-+void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len)
-+{
-+ uint32_t *p = ATTR32(attr);
-+ struct qb_attr_code code_wqchan_len = QB_CODE(wq+ 8, 0, 24);
-+ *len = qb_attr_code_decode(&code_wqchan_len, p);
-+}
-+
-+void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx)
-+{
-+ uint32_t lo, hi;
-+ uint32_t *p = ATTR32(attr);
-+
-+ lo = qb_attr_code_decode(&code_wqchan_cdan_ctx_lo, p);
-+ hi = qb_attr_code_decode(&code_wqchan_cdan_ctx_hi, p);
-+ *cdan_ctx = ((uint64_t)hi << 32) | (uint64_t)lo;
-+}
-+
-+void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr,
-+ uint16_t *cdan_wqid)
-+{
-+ uint32_t *p = ATTR32(attr);
-+ *cdan_wqid = (uint16_t)qb_attr_code_decode(&code_wqchan_cdan_wqid, p);
-+}
-+
-+void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl)
-+{
-+ uint32_t *p = ATTR32(attr);
-+ *ctrl = (uint8_t)qb_attr_code_decode(&code_wqchan_ctrl, p);
-+}
-+void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid)
-+{
-+ uint32_t *p = ATTR32(attr);
-+ *chanid = (uint16_t)qb_attr_code_decode(&code_wqchan_chanid, p);
-+}
-+
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_debug.h b/drivers/net/dpaa2/qbman/driver/qbman_debug.h
-new file mode 100644
-index 0000000..8c89731
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_debug.h
-@@ -0,0 +1,140 @@
-+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+struct qbman_attr {
-+ uint32_t dont_manipulate_directly[40];
-+};
-+
-+/* Buffer pool query commands */
-+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid,
-+ struct qbman_attr *a);
-+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae);
-+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet);
-+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt);
-+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet);
-+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt);
-+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset);
-+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt);
-+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid);
-+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl);
-+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr);
-+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx);
-+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ);
-+int qbman_bp_info_has_free_bufs(struct qbman_attr *a);
-+int qbman_bp_info_is_depleted(struct qbman_attr *a);
-+int qbman_bp_info_is_surplus(struct qbman_attr *a);
-+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a);
-+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a);
-+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a);
-+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a);
-+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a);
-+
-+/* FQ query function for programmable fields */
-+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid,
-+ struct qbman_attr *desc);
-+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl);
-+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid);
-+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq);
-+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred);
-+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh);
-+void qbman_fq_attr_get_oa(struct qbman_attr *d,
-+ int *oa_ics, int *oa_cgr, int32_t *oa_len);
-+void qbman_fq_attr_get_mctl(struct qbman_attr *d,
-+ int *bdi, int *ff, int *va, int *ps);
-+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo);
-+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl);
-+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid);
-+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid);
-+
-+/* FQ query command for non-programmable fields*/
-+enum qbman_fq_schedstate_e {
-+ qbman_fq_schedstate_oos = 0,
-+ qbman_fq_schedstate_retired,
-+ qbman_fq_schedstate_tentatively_scheduled,
-+ qbman_fq_schedstate_truly_scheduled,
-+ qbman_fq_schedstate_parked,
-+ qbman_fq_schedstate_held_active,
-+};
-+
-+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
-+ struct qbman_attr *state);
-+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state);
-+int qbman_fq_state_force_eligible(const struct qbman_attr *state);
-+int qbman_fq_state_xoff(const struct qbman_attr *state);
-+int qbman_fq_state_retirement_pending(const struct qbman_attr *state);
-+int qbman_fq_state_overflow_error(const struct qbman_attr *state);
-+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state);
-+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state);
-+
-+/* CGR query */
-+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid,
-+ struct qbman_attr *attr);
-+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
-+ int *cscn_wq_en_exit, int *cscn_wq_icd);
-+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode,
-+ int *rej_cnt_mode, int *cscn_bdi);
-+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
-+ int *cscn_wr_en_exit, int *cg_wr_ae,
-+ int *cscn_dcp_en, int *cg_wr_va);
-+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
-+ uint32_t *i_cnt_wr_bnd);
-+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en);
-+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres);
-+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
-+ uint32_t *cs_thres_x);
-+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres);
-+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp);
-+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid);
-+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
-+ uint32_t *cscn_vcgid);
-+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid,
-+ int *pl);
-+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
-+ uint64_t *cg_wr_addr);
-+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx);
-+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx,
-+ int *edp);
-+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth,
-+ uint64_t *maxth, uint8_t *maxp);
-+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx,
-+ uint32_t *dp);
-+
-+/* CGR/CCGR/CQ statistics query */
-+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt);
-+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt);
-+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear,
-+ uint64_t *frame_cnt, uint64_t *byte_cnt);
-+
-+/* Query Work Queue Channel */
-+int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid,
-+ struct qbman_attr *attr);
-+void qbman_wqchan_attr_get_wqlen(struct qbman_attr *attr, int wq, uint32_t *len);
-+void qbman_wqchan_attr_get_cdan_ctx(struct qbman_attr *attr, uint64_t *cdan_ctx);
-+void qbman_wqchan_attr_get_cdan_wqid(struct qbman_attr *attr,
-+ uint16_t *cdan_wqid);
-+void qbman_wqchan_attr_get_ctrl(struct qbman_attr *attr, uint8_t *ctrl);
-+void qbman_wqchan_attr_get_chanid(struct qbman_attr *attr, uint16_t *chanid);
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.c b/drivers/net/dpaa2/qbman/driver/qbman_portal.c
-new file mode 100644
-index 0000000..464f386
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.c
-@@ -0,0 +1,1407 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qbman_portal.h"
-+
-+/* QBMan portal management command codes */
-+#define QBMAN_MC_ACQUIRE 0x30
-+#define QBMAN_WQCHAN_CONFIGURE 0x46
-+
-+/* CINH register offsets */
-+#define QBMAN_CINH_SWP_EQCR_PI 0x800
-+#define QBMAN_CINH_SWP_EQCR_CI 0x840
-+#define QBMAN_CINH_SWP_EQAR 0x8c0
-+#define QBMAN_CINH_SWP_DQPI 0xa00
-+#define QBMAN_CINH_SWP_DCAP 0xac0
-+#define QBMAN_CINH_SWP_SDQCR 0xb00
-+#define QBMAN_CINH_SWP_RAR 0xcc0
-+#define QBMAN_CINH_SWP_ISR 0xe00
-+#define QBMAN_CINH_SWP_IER 0xe40
-+#define QBMAN_CINH_SWP_ISDR 0xe80
-+#define QBMAN_CINH_SWP_IIR 0xec0
-+
-+/* CENA register offsets */
-+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
-+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
-+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
-+#define QBMAN_CENA_SWP_CR 0x600
-+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
-+#define QBMAN_CENA_SWP_VDQCR 0x780
-+#define QBMAN_CENA_SWP_EQCR_CI 0x840
-+
-+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
-+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
-+
-+/* QBMan FQ management command codes */
-+#define QBMAN_FQ_SCHEDULE 0x48
-+#define QBMAN_FQ_FORCE 0x49
-+#define QBMAN_FQ_XON 0x4d
-+#define QBMAN_FQ_XOFF 0x4e
-+
-+/*******************************/
-+/* Pre-defined attribute codes */
-+/*******************************/
-+
-+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
-+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
-+
-+/*************************/
-+/* SDQCR attribute codes */
-+/*************************/
-+
-+/* we put these here because at least some of them are required by
-+ * qbman_swp_init() */
-+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
-+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
-+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
-+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
-+enum qbman_sdqcr_dct {
-+ qbman_sdqcr_dct_null = 0,
-+ qbman_sdqcr_dct_prio_ics,
-+ qbman_sdqcr_dct_active_ics,
-+ qbman_sdqcr_dct_active
-+};
-+enum qbman_sdqcr_fc {
-+ qbman_sdqcr_fc_one = 0,
-+ qbman_sdqcr_fc_up_to_3 = 1
-+};
-+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
-+
-+/*********************************/
-+/* Portal constructor/destructor */
-+/*********************************/
-+
-+/* Software portals should always be in the power-on state when we initialise,
-+ * due to the CCSR-based portal reset functionality that MC has.
-+ *
-+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
-+ * valid-bits, so we need to support a workaround where we don't trust
-+ * valid-bits when detecting new entries until any stale ring entries have been
-+ * overwritten at least once. The idea is that we read PI for the first few
-+ * entries, then switch to valid-bit after that. The trick is to clear the
-+ * bug-work-around boolean once the PI wraps around the ring for the first time.
-+ *
-+ * Note: this still carries a slight additional cost once the decrementer hits
-+ * zero.
-+ */
-+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
-+{
-+ int ret;
-+ uint32_t eqcr_pi;
-+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ if (!p)
-+ return NULL;
-+ p->desc = d;
-+#ifdef QBMAN_CHECKING
-+ p->mc.check = swp_mc_can_start;
-+#endif
-+ p->mc.valid_bit = QB_VALID_BIT;
-+ p->sdq = 0;
-+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
-+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
-+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
-+ atomic_set(&p->vdq.busy, 1);
-+ p->vdq.valid_bit = QB_VALID_BIT;
-+ p->dqrr.next_idx = 0;
-+ p->dqrr.valid_bit = QB_VALID_BIT;
-+ qman_version = p->desc->qman_version;
-+ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
-+ p->dqrr.dqrr_size = 4;
-+ p->dqrr.reset_bug = 1;
-+ } else {
-+ p->dqrr.dqrr_size = 8;
-+ p->dqrr.reset_bug = 0;
-+ }
-+
-+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
-+ if (ret) {
-+ kfree(p);
-+ pr_err("qbman_swp_sys_init() failed %d\n", ret);
-+ return NULL;
-+ }
-+ /* SDQCR needs to be initialized to 0 when no channels are
-+ being dequeued from or else the QMan HW will indicate an
-+ error. The values that were calculated above will be
-+ applied when dequeues from a specific channel are enabled */
-+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
-+ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
-+ p->eqcr.pi = eqcr_pi & 0xF;
-+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
-+ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
-+ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
-+ p->eqcr.ci, p->eqcr.pi);
-+
-+ return p;
-+}
-+
-+void qbman_swp_finish(struct qbman_swp *p)
-+{
-+#ifdef QBMAN_CHECKING
-+ BUG_ON(p->mc.check != swp_mc_can_start);
-+#endif
-+ qbman_swp_sys_finish(&p->sys);
-+ kfree(p);
-+}
-+
-+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
-+{
-+ return p->desc;
-+}
-+
-+/**************/
-+/* Interrupts */
-+/**************/
-+
-+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
-+{
-+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
-+}
-+
-+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
-+{
-+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
-+}
-+
-+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
-+{
-+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
-+}
-+
-+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
-+{
-+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
-+}
-+
-+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
-+{
-+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
-+}
-+
-+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
-+{
-+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
-+}
-+
-+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
-+{
-+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
-+}
-+
-+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
-+{
-+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
-+}
-+
-+/***********************/
-+/* Management commands */
-+/***********************/
-+
-+/*
-+ * Internal code common to all types of management commands.
-+ */
-+
-+void *qbman_swp_mc_start(struct qbman_swp *p)
-+{
-+ void *ret;
-+#ifdef QBMAN_CHECKING
-+ BUG_ON(p->mc.check != swp_mc_can_start);
-+#endif
-+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
-+#ifdef QBMAN_CHECKING
-+ if (!ret)
-+ p->mc.check = swp_mc_can_submit;
-+#endif
-+ return ret;
-+}
-+
-+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
-+{
-+ uint32_t *v = cmd;
-+#ifdef QBMAN_CHECKING
-+ BUG_ON(!p->mc.check != swp_mc_can_submit);
-+#endif
-+ /* TBD: "|=" is going to hurt performance. Need to move as many fields
-+ * out of word zero, and for those that remain, the "OR" needs to occur
-+ * at the caller side. This debug check helps to catch cases where the
-+ * caller wants to OR but has forgotten to do so. */
-+ BUG_ON((*v & cmd_verb) != *v);
-+ *v = cmd_verb | p->mc.valid_bit;
-+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
-+#ifdef QBMAN_CHECKING
-+ p->mc.check = swp_mc_can_poll;
-+#endif
-+}
-+
-+void *qbman_swp_mc_result(struct qbman_swp *p)
-+{
-+ uint32_t *ret, verb;
-+#ifdef QBMAN_CHECKING
-+ BUG_ON(p->mc.check != swp_mc_can_poll);
-+#endif
-+ qbman_cena_invalidate_prefetch(&p->sys,
-+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+ /* Remove the valid-bit - command completed iff the rest is non-zero */
-+ verb = ret[0] & ~QB_VALID_BIT;
-+ if (!verb)
-+ return NULL;
-+#ifdef QBMAN_CHECKING
-+ p->mc.check = swp_mc_can_start;
-+#endif
-+ p->mc.valid_bit ^= QB_VALID_BIT;
-+ return ret;
-+}
-+
-+/***********/
-+/* Enqueue */
-+/***********/
-+
-+/* These should be const, eventually */
-+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
-+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
-+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
-+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
-+static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2);
-+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
-+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
-+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
-+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
-+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
-+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
-+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
-+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
-+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
-+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
-+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
-+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
-+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
-+
-+enum qbman_eq_cmd_e {
-+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */
-+ qbman_eq_cmd_empty,
-+ /* DMA an enqueue response once complete */
-+ qbman_eq_cmd_respond,
-+ /* DMA an enqueue response only if the enqueue fails */
-+ qbman_eq_cmd_respond_reject
-+};
-+
-+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
-+{
-+ memset(d, 0, sizeof(*d));
-+}
-+
-+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_orp_en, cl, 0);
-+ qb_attr_code_encode(&code_eq_cmd, cl,
-+ respond_success ? qbman_eq_cmd_respond :
-+ qbman_eq_cmd_respond_reject);
-+}
-+
-+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
-+ uint32_t opr_id, uint32_t seqnum, int incomplete)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
-+ qb_attr_code_encode(&code_eq_cmd, cl,
-+ respond_success ? qbman_eq_cmd_respond :
-+ qbman_eq_cmd_respond_reject);
-+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
-+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
-+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
-+}
-+
-+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
-+ uint32_t seqnum)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
-+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
-+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
-+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
-+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
-+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
-+}
-+
-+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
-+ uint32_t seqnum)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
-+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
-+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
-+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
-+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
-+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
-+}
-+
-+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
-+ dma_addr_t storage_phys,
-+ int stash)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
-+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
-+}
-+
-+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
-+}
-+
-+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_qd_en, cl, 0);
-+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
-+}
-+
-+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
-+ uint32_t qd_bin, uint32_t qd_prio)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_qd_en, cl, 1);
-+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
-+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
-+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
-+}
-+
-+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
-+}
-+
-+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
-+ uint32_t dqrr_idx, int park)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
-+ if (enable) {
-+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
-+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
-+ }
-+}
-+
-+#define EQAR_IDX(eqar) ((eqar) & 0x7)
-+#define EQAR_VB(eqar) ((eqar) & 0x80)
-+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
-+ const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd)
-+{
-+ uint32_t *p;
-+ const uint32_t *cl = qb_cl(d);
-+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
-+ pr_debug("EQAR=%08x\n", eqar);
-+ if (!EQAR_SUCCESS(eqar))
-+ return -EBUSY;
-+ p = qbman_cena_write_start_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
-+ word_copy(&p[1], &cl[1], 7);
-+ word_copy(&p[8], fd, sizeof(*fd) >> 2);
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ lwsync();
-+ p[0] = cl[0] | EQAR_VB(eqar);
-+ qbman_cena_write_complete_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
-+ return 0;
-+}
-+
-+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
-+ const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd)
-+{
-+ uint32_t *p;
-+ const uint32_t *cl = qb_cl(d);
-+ uint32_t eqcr_ci;
-+ uint8_t diff;
-+
-+ if (!s->eqcr.available) {
-+ eqcr_ci = s->eqcr.ci;
-+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
-+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
-+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
-+ eqcr_ci, s->eqcr.ci);
-+ s->eqcr.available += diff;
-+ if (!diff)
-+ return -EBUSY;
-+ }
-+
-+ p = qbman_cena_write_start_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
-+ word_copy(&p[1], &cl[1], 7);
-+ word_copy(&p[8], fd, sizeof(*fd) >> 2);
-+ lwsync();
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ p[0] = cl[0] | s->eqcr.pi_vb;
-+ qbman_cena_write_complete_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
-+ s->eqcr.pi++;
-+ s->eqcr.pi &= 0xF;
-+ s->eqcr.available--;
-+ if (!(s->eqcr.pi & 7))
-+ s->eqcr.pi_vb ^= QB_VALID_BIT;
-+ return 0;
-+}
-+
-+int qbman_swp_fill_ring(struct qbman_swp *s,
-+ const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd,
-+ __attribute__((unused)) uint8_t burst_index)
-+{
-+ uint32_t *p;
-+ const uint32_t *cl = qb_cl(d);
-+ uint32_t eqcr_ci;
-+ uint8_t diff;
-+
-+ if (!s->eqcr.available) {
-+ eqcr_ci = s->eqcr.ci;
-+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
-+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
-+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
-+ eqcr_ci, s->eqcr.ci);
-+ s->eqcr.available += diff;
-+ if (!diff) {
-+ return -EBUSY;
-+ }
-+ }
-+ p = qbman_cena_write_start_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
-+ //word_copy(&p[1], &cl[1], 7);
-+ memcpy(&p[1], &cl[1], 7);
-+ /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
-+ memcpy(&p[8], fd, sizeof(struct qbman_fd));
-+
-+ //lwsync();
-+
-+ p[0] = cl[0] | s->eqcr.pi_vb;
-+
-+ s->eqcr.pi++;
-+ s->eqcr.pi &= 0xF;
-+ s->eqcr.available--;
-+ if (!(s->eqcr.pi & 7))
-+ s->eqcr.pi_vb ^= QB_VALID_BIT;
-+
-+ return 0;
-+}
-+
-+int qbman_swp_flush_ring(struct qbman_swp *s)
-+{
-+ void *ptr = s->sys.addr_cena;
-+ dcbf((uint64_t)ptr);
-+ dcbf((uint64_t)ptr + 0x40);
-+ dcbf((uint64_t)ptr + 0x80);
-+ dcbf((uint64_t)ptr + 0xc0);
-+ dcbf((uint64_t)ptr + 0x100);
-+ dcbf((uint64_t)ptr + 0x140);
-+ dcbf((uint64_t)ptr + 0x180);
-+ dcbf((uint64_t)ptr + 0x1c0);
-+
-+ return 0;
-+}
-+
-+void qbman_sync(void)
-+{
-+ lwsync();
-+}
-+
-+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd)
-+{
-+ if (s->sys.eqcr_mode == qman_eqcr_vb_array)
-+ return qbman_swp_enqueue_array_mode(s, d, fd);
-+ else /* Use ring mode by default */
-+ return qbman_swp_enqueue_ring_mode(s, d, fd);
-+}
-+
-+/*************************/
-+/* Static (push) dequeue */
-+/*************************/
-+
-+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
-+{
-+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
-+
-+ BUG_ON(channel_idx > 15);
-+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
-+}
-+
-+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
-+{
-+ uint16_t dqsrc;
-+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
-+ BUG_ON(channel_idx > 15);
-+ qb_attr_code_encode(&code, &s->sdq, !!enable);
-+ /* Read make the complete src map. If no channels are enabled
-+ the SDQCR must be 0 or else QMan will assert errors */
-+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
-+ if (dqsrc != 0)
-+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
-+ else
-+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
-+}
-+
-+/***************************/
-+/* Volatile (pull) dequeue */
-+/***************************/
-+
-+/* These should be const, eventually */
-+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
-+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
-+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
-+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
-+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
-+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
-+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
-+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
-+
-+enum qb_pull_dt_e {
-+ qb_pull_dt_channel,
-+ qb_pull_dt_workqueue,
-+ qb_pull_dt_framequeue
-+};
-+
-+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
-+{
-+ memset(d, 0, sizeof(*d));
-+}
-+
-+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
-+ struct qbman_result *storage,
-+ dma_addr_t storage_phys,
-+ int stash)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ /* Squiggle the pointer 'storage' into the extra 2 words of the
-+ * descriptor (which aren't copied to the hw command) */
-+ *(void **)&cl[4] = storage;
-+ if (!storage) {
-+ qb_attr_code_encode(&code_pull_rls, cl, 0);
-+ return;
-+ }
-+ qb_attr_code_encode(&code_pull_rls, cl, 1);
-+ qb_attr_code_encode(&code_pull_stash, cl, !!stash);
-+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
-+}
-+
-+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ BUG_ON(!numframes || (numframes > 16));
-+ qb_attr_code_encode(&code_pull_numframes, cl,
-+ (uint32_t)(numframes - 1));
-+}
-+
-+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_pull_token, cl, token);
-+}
-+
-+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_pull_dct, cl, 1);
-+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
-+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
-+}
-+
-+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
-+ enum qbman_pull_type_e dct)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_pull_dct, cl, dct);
-+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
-+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
-+}
-+
-+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
-+ enum qbman_pull_type_e dct)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_pull_dct, cl, dct);
-+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
-+ qb_attr_code_encode(&code_pull_dqsource, cl, chid);
-+}
-+
-+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
-+{
-+ uint32_t *p;
-+ uint32_t *cl = qb_cl(d);
-+ if (!atomic_dec_and_test(&s->vdq.busy)) {
-+ atomic_inc(&s->vdq.busy);
-+ return -EBUSY;
-+ }
-+ s->vdq.storage = *(void **)&cl[4];
-+ qb_attr_code_encode(&code_pull_token, cl, 1);
-+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
-+ word_copy(&p[1], &cl[1], 3);
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ lwsync();
-+ p[0] = cl[0] | s->vdq.valid_bit;
-+ s->vdq.valid_bit ^= QB_VALID_BIT;
-+ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
-+ return 0;
-+}
-+
-+/****************/
-+/* Polling DQRR */
-+/****************/
-+
-+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
-+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
-+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
-+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
-+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
-+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
-+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
-+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
-+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
-+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
-+
-+#define QBMAN_RESULT_DQ 0x60
-+#define QBMAN_RESULT_FQRN 0x21
-+#define QBMAN_RESULT_FQRNI 0x22
-+#define QBMAN_RESULT_FQPN 0x24
-+#define QBMAN_RESULT_FQDAN 0x25
-+#define QBMAN_RESULT_CDAN 0x26
-+#define QBMAN_RESULT_CSCN_MEM 0x27
-+#define QBMAN_RESULT_CGCU 0x28
-+#define QBMAN_RESULT_BPSCN 0x29
-+#define QBMAN_RESULT_CSCN_WQ 0x2a
-+
-+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
-+
-+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
-+ * only once, so repeated calls can return a sequence of DQRR entries, without
-+ * requiring they be consumed immediately or in any particular order. */
-+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
-+{
-+ uint32_t verb;
-+ uint32_t response_verb;
-+ uint32_t flags;
-+ const struct qbman_result *dq;
-+ const uint32_t *p;
-+
-+ /* Before using valid-bit to detect if something is there, we have to
-+ * handle the case of the DQRR reset bug... */
-+ if (unlikely(s->dqrr.reset_bug)) {
-+ /* We pick up new entries by cache-inhibited producer index,
-+ * which means that a non-coherent mapping would require us to
-+ * invalidate and read *only* once that PI has indicated that
-+ * there's an entry here. The first trip around the DQRR ring
-+ * will be much less efficient than all subsequent trips around
-+ * it...
-+ */
-+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
-+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
-+ /* there are new entries iff pi != next_idx */
-+ if (pi == s->dqrr.next_idx)
-+ return NULL;
-+ /* if next_idx is/was the last ring index, and 'pi' is
-+ * different, we can disable the workaround as all the ring
-+ * entries have now been DMA'd to so valid-bit checking is
-+ * repaired. Note: this logic needs to be based on next_idx
-+ * (which increments one at a time), rather than on pi (which
-+ * can burst and wrap-around between our snapshots of it).
-+ */
-+ BUG_ON((s->dqrr.dqrr_size - 1) < 0);
-+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
-+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
-+ s->dqrr.next_idx, pi);
-+ s->dqrr.reset_bug = 0;
-+ }
-+ qbman_cena_invalidate_prefetch(&s->sys,
-+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ }
-+ dq = qbman_cena_read_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ p = qb_cl(dq);
-+ verb = qb_attr_code_decode(&code_dqrr_verb, p);
-+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
-+ * in the DQRR reset bug workaround, we shouldn't need to skip these
-+ * check, because we've already determined that a new entry is available
-+ * and we've invalidated the cacheline before reading it, so the
-+ * valid-bit behaviour is repaired and should tell us what we already
-+ * knew from reading PI.
-+ */
-+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
-+ return NULL;
-+
-+ /* There's something there. Move "next_idx" attention to the next ring
-+ * entry (and prefetch it) before returning what we found. */
-+ s->dqrr.next_idx++;
-+ if (s->dqrr.next_idx == QBMAN_DQRR_SIZE) {
-+ s->dqrr.next_idx = 0;
-+ s->dqrr.valid_bit ^= QB_VALID_BIT;
-+ }
-+ /* If this is the final response to a volatile dequeue command
-+ indicate that the vdq is no longer busy */
-+ flags = qbman_result_DQ_flags(dq);
-+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
-+ if ((response_verb == QBMAN_RESULT_DQ) &&
-+ (flags & QBMAN_DQ_STAT_VOLATILE) &&
-+ (flags & QBMAN_DQ_STAT_EXPIRED))
-+ atomic_inc(&s->vdq.busy);
-+
-+ return dq;
-+}
-+
-+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
-+void qbman_swp_dqrr_consume(struct qbman_swp *s,
-+ const struct qbman_result *dq)
-+{
-+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
-+}
-+
-+/*********************************/
-+/* Polling user-provided storage */
-+/*********************************/
-+
-+int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
-+ const struct qbman_result *dq)
-+{
-+ /* To avoid converting the little-endian DQ entry to host-endian prior
-+ * to us knowing whether there is a valid entry or not (and run the
-+ * risk of corrupting the incoming hardware LE write), we detect in
-+ * hardware endianness rather than host. This means we need a different
-+ * "code" depending on whether we are BE or LE in software, which is
-+ * where DQRR_TOK_OFFSET comes in... */
-+ static struct qb_attr_code code_dqrr_tok_detect =
-+ QB_CODE(0, DQRR_TOK_OFFSET, 8);
-+ /* The user trying to poll for a result treats "dq" as const. It is
-+ * however the same address that was provided to us non-const in the
-+ * first place, for directing hardware DMA to. So we can cast away the
-+ * const because it is mutable from our perspective. */
-+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
-+ uint32_t token;
-+
-+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
-+ if (token != 1)
-+ return 0;
-+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
-+
-+ /* Only now do we convert from hardware to host endianness. Also, as we
-+ * are returning success, the user has promised not to call us again, so
-+ * there's no risk of us converting the endianness twice... */
-+ make_le32_n(p, 16);
-+ return 1;
-+}
-+
-+int qbman_check_command_complete(struct qbman_swp *s,
-+ const struct qbman_result *dq)
-+{
-+ /* To avoid converting the little-endian DQ entry to host-endian prior
-+ * to us knowing whether there is a valid entry or not (and run the
-+ * risk of corrupting the incoming hardware LE write), we detect in
-+ * hardware endianness rather than host. This means we need a different
-+ * "code" depending on whether we are BE or LE in software, which is
-+ * where DQRR_TOK_OFFSET comes in... */
-+ static struct qb_attr_code code_dqrr_tok_detect =
-+ QB_CODE(0, DQRR_TOK_OFFSET, 8);
-+ /* The user trying to poll for a result treats "dq" as const. It is
-+ * however the same address that was provided to us non-const in the
-+ * first place, for directing hardware DMA to. So we can cast away the
-+ * const because it is mutable from our perspective. */
-+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
-+ uint32_t token;
-+
-+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
-+ if(token!=1)
-+ return 0;
-+ /*When token is set it indicates that VDQ command has been fetched by qbman and
-+ *is working on it. It is safe for software to issue another VDQ command, so
-+ *incrementing the busy variable.*/
-+ if (s->vdq.storage == dq) {
-+ s->vdq.storage = NULL;
-+ atomic_inc(&s->vdq.busy);
-+ }
-+ return 1;
-+}
-+
-+/********************************/
-+/* Categorising qbman results */
-+/********************************/
-+
-+static struct qb_attr_code code_result_in_mem =
-+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
-+
-+static inline int __qbman_result_is_x(const struct qbman_result *dq,
-+ uint32_t x)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
-+ return (response_verb == x);
-+}
-+
-+static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
-+ uint32_t x)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
-+
-+ return (response_verb == x);
-+}
-+
-+int qbman_result_is_DQ(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
-+}
-+
-+int qbman_result_is_FQDAN(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
-+}
-+
-+int qbman_result_is_CDAN(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
-+}
-+
-+int qbman_result_is_CSCN(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
-+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
-+}
-+
-+int qbman_result_is_BPSCN(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
-+}
-+
-+int qbman_result_is_CGCU(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
-+}
-+
-+int qbman_result_is_FQRN(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
-+}
-+
-+int qbman_result_is_FQRNI(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
-+}
-+
-+int qbman_result_is_FQPN(const struct qbman_result *dq)
-+{
-+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
-+}
-+
-+/*********************************/
-+/* Parsing frame dequeue results */
-+/*********************************/
-+
-+/* These APIs assume qbman_result_is_DQ() is TRUE */
-+
-+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return qb_attr_code_decode(&code_dqrr_stat, p);
-+}
-+
-+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
-+}
-+
-+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
-+}
-+
-+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return qb_attr_code_decode(&code_dqrr_fqid, p);
-+}
-+
-+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return qb_attr_code_decode(&code_dqrr_byte_count, p);
-+}
-+
-+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return qb_attr_code_decode(&code_dqrr_frame_count, p);
-+}
-+
-+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
-+{
-+ const uint64_t *p = (const uint64_t *)qb_cl(dq);
-+
-+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
-+}
-+
-+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
-+{
-+ const uint32_t *p = qb_cl(dq);
-+ return (const struct qbman_fd *)&p[8];
-+}
-+
-+/**************************************/
-+/* Parsing state-change notifications */
-+/**************************************/
-+
-+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
-+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
-+static struct qb_attr_code code_scn_state_in_mem =
-+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
-+static struct qb_attr_code code_scn_rid_in_mem =
-+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
-+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
-+
-+uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
-+{
-+ const uint32_t *p = qb_cl(scn);
-+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
-+}
-+
-+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
-+{
-+ const uint32_t *p = qb_cl(scn);
-+ return qb_attr_code_decode(&code_scn_rid, p);
-+}
-+
-+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
-+{
-+ const uint64_t *p = (const uint64_t *)qb_cl(scn);
-+
-+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
-+}
-+
-+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
-+{
-+ const uint32_t *p = qb_cl(scn);
-+
-+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
-+}
-+
-+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
-+{
-+ const uint32_t *p = qb_cl(scn);
-+ uint32_t result_rid;
-+
-+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
-+ return make_le24(result_rid);
-+}
-+
-+/*****************/
-+/* Parsing BPSCN */
-+/*****************/
-+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
-+{
-+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
-+}
-+
-+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
-+{
-+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
-+}
-+
-+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
-+{
-+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
-+}
-+
-+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
-+{
-+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
-+}
-+
-+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
-+{
-+ uint64_t ctx;
-+ uint32_t ctx_hi, ctx_lo;
-+
-+ ctx = qbman_result_SCN_ctx(scn);
-+ ctx_hi = upper32(ctx);
-+ ctx_lo = lower32(ctx);
-+ return ((uint64_t)make_le32(ctx_hi) << 32 |
-+ (uint64_t)make_le32(ctx_lo));
-+}
-+
-+/*****************/
-+/* Parsing CGCU */
-+/*****************/
-+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
-+{
-+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
-+}
-+
-+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
-+{
-+ uint64_t ctx;
-+ uint32_t ctx_hi, ctx_lo;
-+
-+ ctx = qbman_result_SCN_ctx(scn);
-+ ctx_hi = upper32(ctx);
-+ ctx_lo = lower32(ctx);
-+ return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
-+ (uint64_t)make_le32(ctx_lo);
-+}
-+
-+/******************/
-+/* Buffer release */
-+/******************/
-+
-+/* These should be const, eventually */
-+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
-+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
-+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
-+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
-+
-+void qbman_release_desc_clear(struct qbman_release_desc *d)
-+{
-+ uint32_t *cl;
-+ memset(d, 0, sizeof(*d));
-+ cl = qb_cl(d);
-+ qb_attr_code_encode(&code_release_set_me, cl, 1);
-+}
-+
-+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_release_bpid, cl, bpid);
-+}
-+
-+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
-+{
-+ uint32_t *cl = qb_cl(d);
-+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
-+}
-+
-+#define RAR_IDX(rar) ((rar) & 0x7)
-+#define RAR_VB(rar) ((rar) & 0x80)
-+#define RAR_SUCCESS(rar) ((rar) & 0x100)
-+
-+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
-+ const uint64_t *buffers, unsigned int num_buffers)
-+{
-+ uint32_t *p;
-+ const uint32_t *cl = qb_cl(d);
-+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
-+ pr_debug("RAR=%08x\n", rar);
-+ if (!RAR_SUCCESS(rar))
-+ return -EBUSY;
-+ BUG_ON(!num_buffers || (num_buffers > 7));
-+ /* Start the release command */
-+ p = qbman_cena_write_start_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
-+ /* Copy the caller's buffer pointers to the command */
-+ u64_to_le32_copy(&p[2], buffers, num_buffers);
-+ /* Set the verb byte, have to substitute in the valid-bit and the number
-+ * of buffers. */
-+ lwsync();
-+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
-+ qbman_cena_write_complete_wo_shadow(&s->sys,
-+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
-+ return 0;
-+}
-+
-+/*******************/
-+/* Buffer acquires */
-+/*******************/
-+
-+/* These should be const, eventually */
-+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
-+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
-+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
-+
-+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
-+ unsigned int num_buffers)
-+{
-+ uint32_t *p;
-+ uint32_t rslt, num;
-+ BUG_ON(!num_buffers || (num_buffers > 7));
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+
-+ if (!p)
-+ return -EBUSY;
-+
-+ /* Encode the caller-provided attributes */
-+ qb_attr_code_encode(&code_acquire_bpid, p, bpid);
-+ qb_attr_code_encode(&code_acquire_num, p, num_buffers);
-+
-+ /* Complete the management command */
-+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ num = qb_attr_code_decode(&code_acquire_r_num, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_MC_ACQUIRE);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
-+ bpid, rslt);
-+ return -EIO;
-+ }
-+ BUG_ON(num > num_buffers);
-+ /* Copy the acquired buffers to the caller's array */
-+ u64_from_le32_copy(buffers, &p[2], num);
-+ return (int)num;
-+}
-+
-+/*****************/
-+/* FQ management */
-+/*****************/
-+
-+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
-+
-+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
-+ uint8_t alt_fq_verb)
-+{
-+ uint32_t *p;
-+ uint32_t rslt;
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
-+ /* Complete the management command */
-+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
-+ fqid, alt_fq_verb, rslt);
-+ return -EIO;
-+ }
-+
-+ return 0;
-+}
-+
-+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
-+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
-+}
-+
-+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
-+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
-+}
-+
-+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
-+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
-+}
-+
-+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
-+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
-+}
-+
-+/**********************/
-+/* Channel management */
-+/**********************/
-+
-+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
-+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
-+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
-+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
-+
-+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
-+ * would be irresponsible to expose it. */
-+#define CODE_CDAN_WE_EN 0x1
-+#define CODE_CDAN_WE_CTX 0x4
-+
-+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
-+ uint8_t we_mask, uint8_t cdan_en,
-+ uint64_t ctx)
-+{
-+ uint32_t *p;
-+ uint32_t rslt;
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ /* Encode the caller-provided attributes */
-+ qb_attr_code_encode(&code_cdan_cid, p, channelid);
-+ qb_attr_code_encode(&code_cdan_we, p, we_mask);
-+ qb_attr_code_encode(&code_cdan_en, p, cdan_en);
-+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
-+ /* Complete the management command */
-+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
-+
-+ /* Decode the outcome */
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
-+ != QBMAN_WQCHAN_CONFIGURE);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("CDAN cQID %d failed: code = 0x%02x\n",
-+ channelid, rslt);
-+ return -EIO;
-+ }
-+
-+ return 0;
-+}
-+
-+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
-+ uint64_t ctx)
-+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_CTX,
-+ 0, ctx);
-+}
-+
-+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
-+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_EN,
-+ 1, 0);
-+}
-+
-+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
-+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_EN,
-+ 0, 0);
-+}
-+
-+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
-+ uint64_t ctx)
-+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
-+ 1, ctx);
-+}
-+
-+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
-+{
-+ return QBMAN_IDX_FROM_DQRR(dqrr);
-+}
-+
-+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
-+{
-+ struct qbman_result *dq;
-+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
-+ return dq;
-+}
-+
-+int qbman_swp_send_multiple(struct qbman_swp *s,
-+ const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd,
-+ int frames_to_send)
-+{
-+ uint32_t *p;
-+ const uint32_t *cl = qb_cl(d);
-+ uint32_t eqcr_ci;
-+ uint8_t diff;
-+ int sent = 0;
-+ int i;
-+ int initial_pi = s->eqcr.pi;
-+ uint64_t start_pointer;
-+
-+
-+ /* we are trying to send frames_to_send if we have enough space in the ring */
-+ while(frames_to_send--)
-+ {
-+ if (!s->eqcr.available) {
-+ eqcr_ci = s->eqcr.ci;
-+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
-+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
-+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
-+ eqcr_ci, s->eqcr.ci);
-+ s->eqcr.available += diff;
-+ if (!diff)
-+ {
-+ goto done;
-+ }
-+ }
-+
-+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
-+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
-+ /* Write command (except of first byte) and FD */
-+ memcpy(&p[1], &cl[1], 7);
-+ memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
-+
-+ initial_pi++;
-+ initial_pi &= 0xF;
-+ s->eqcr.available--;
-+ sent++;
-+
-+ }
-+
-+ done:
-+ initial_pi = s->eqcr.pi;
-+ lwsync();
-+
-+ /* in order for flushes to complete faster */
-+ /*For that we use a following trick: we record all lines in 32 bit word */
-+
-+ initial_pi = s->eqcr.pi;
-+ for(i = 0; i < sent; i++)
-+ {
-+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
-+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
-+
-+ p[0] = cl[0] | s->eqcr.pi_vb;
-+ initial_pi++;
-+ initial_pi &= 0xF;
-+
-+ if (!(initial_pi & 7))
-+ s->eqcr.pi_vb ^= QB_VALID_BIT;
-+
-+ }
-+
-+ initial_pi = s->eqcr.pi;
-+
-+ /* We need to flush all the lines but without load/store operations between them */
-+ /* We assign start_pointer before we start loop so that in loop we do not read it from memory */
-+ start_pointer = (uint64_t) s->sys.addr_cena;
-+ for(i = 0; i < sent; i++)
-+ {
-+ p = (uint32_t *)(start_pointer + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
-+ dcbf((uint64_t)p);
-+ initial_pi++;
-+ initial_pi &= 0xF;
-+ }
-+
-+ /* Update producer index for the next call */
-+ s->eqcr.pi = initial_pi;
-+
-+ return sent;
-+}
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_portal.h b/drivers/net/dpaa2/qbman/driver/qbman_portal.h
-new file mode 100644
-index 0000000..f6ba86a
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_portal.h
-@@ -0,0 +1,266 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include "qbman_private.h"
-+#include <drivers/fsl_qbman_portal.h>
-+
-+uint32_t qman_version;
-+/* All QBMan command and result structures use this "valid bit" encoding */
-+#define QB_VALID_BIT ((uint32_t)0x80)
-+
-+/* Management command result codes */
-+#define QBMAN_MC_RSLT_OK 0xf0
-+
-+/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */
-+#define QBMAN_DQRR_SIZE 4
-+
-+#define QBMAN_EQCR_SIZE 8
-+
-+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
-+{
-+ /* 'first' is included, 'last' is excluded */
-+ if (first <= last)
-+ return last - first;
-+ return (2 * ringsize) + last - first;
-+}
-+
-+/* --------------------- */
-+/* portal data structure */
-+/* --------------------- */
-+
-+struct qbman_swp {
-+ const struct qbman_swp_desc *desc;
-+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
-+ * needs in here. */
-+ struct qbman_swp_sys sys;
-+ /* Management commands */
-+ struct {
-+#ifdef QBMAN_CHECKING
-+ enum swp_mc_check {
-+ swp_mc_can_start, /* call __qbman_swp_mc_start() */
-+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
-+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
-+ } check;
-+#endif
-+ uint32_t valid_bit; /* 0x00 or 0x80 */
-+ } mc;
-+ /* Push dequeues */
-+ uint32_t sdq;
-+ /* Volatile dequeues */
-+ struct {
-+ /* VDQCR supports a "1 deep pipeline", meaning that if you know
-+ * the last-submitted command is already executing in the
-+ * hardware (as evidenced by at least 1 valid dequeue result),
-+ * you can write another dequeue command to the register, the
-+ * hardware will start executing it as soon as the
-+ * already-executing command terminates. (This minimises latency
-+ * and stalls.) With that in mind, this "busy" variable refers
-+ * to whether or not a command can be submitted, not whether or
-+ * not a previously-submitted command is still executing. In
-+ * other words, once proof is seen that the previously-submitted
-+ * command is executing, "vdq" is no longer "busy". */
-+ atomic_t busy;
-+ uint32_t valid_bit; /* 0x00 or 0x80 */
-+ /* We need to determine when vdq is no longer busy. This depends
-+ * on whether the "busy" (last-submitted) dequeue command is
-+ * targetting DQRR or main-memory, and detected is based on the
-+ * presence of the dequeue command's "token" showing up in
-+ * dequeue entries in DQRR or main-memory (respectively). */
-+ struct qbman_result *storage; /* NULL if DQRR */
-+ } vdq;
-+ /* DQRR */
-+ struct {
-+ uint32_t next_idx;
-+ uint32_t valid_bit;
-+ uint8_t dqrr_size;
-+ int reset_bug;
-+ } dqrr;
-+ struct {
-+ uint32_t pi;
-+ uint32_t pi_vb;
-+ uint32_t ci;
-+ int available;
-+ } eqcr;
-+};
-+
-+/* -------------------------- */
-+/* portal management commands */
-+/* -------------------------- */
-+
-+/* Different management commands all use this common base layer of code to issue
-+ * commands and poll for results. The first function returns a pointer to where
-+ * the caller should fill in their MC command (though they should ignore the
-+ * verb byte), the second function commits merges in the caller-supplied command
-+ * verb (which should not include the valid-bit) and submits the command to
-+ * hardware, and the third function checks for a completed response (returns
-+ * non-NULL if only if the response is complete). */
-+void *qbman_swp_mc_start(struct qbman_swp *p);
-+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
-+void *qbman_swp_mc_result(struct qbman_swp *p);
-+
-+/* Wraps up submit + poll-for-result */
-+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
-+ uint32_t cmd_verb)
-+{
-+ int loopvar;
-+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
-+ DBG_POLL_START(loopvar);
-+ do {
-+ DBG_POLL_CHECK(loopvar);
-+ cmd = qbman_swp_mc_result(swp);
-+ } while (!cmd);
-+ return cmd;
-+}
-+
-+/* ------------ */
-+/* qb_attr_code */
-+/* ------------ */
-+
-+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
-+ * is either serving as a configuration command or a query result. The
-+ * representation is inherently little-endian, as the indexing of the words is
-+ * itself little-endian in nature and DPAA2 QBMan is little endian for anything
-+ * that crosses a word boundary too (64-bit fields are the obvious examples).
-+ */
-+struct qb_attr_code {
-+ unsigned int word; /* which uint32_t[] array member encodes the field */
-+ unsigned int lsoffset; /* encoding offset from ls-bit */
-+ unsigned int width; /* encoding width. (bool must be 1.) */
-+};
-+
-+/* Some pre-defined codes */
-+extern struct qb_attr_code code_generic_verb;
-+extern struct qb_attr_code code_generic_rslt;
-+
-+/* Macros to define codes */
-+#define QB_CODE(a, b, c) { a, b, c}
-+#define QB_CODE_NULL \
-+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
-+
-+/* Rotate a code "ms", meaning that it moves from less-significant bytes to
-+ * more-significant, from less-significant words to more-significant, etc. The
-+ * "ls" version does the inverse, from more-significant towards
-+ * less-significant.
-+ */
-+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
-+ unsigned int bits)
-+{
-+ code->lsoffset += bits;
-+ while (code->lsoffset > 31) {
-+ code->word++;
-+ code->lsoffset -= 32;
-+ }
-+}
-+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
-+ unsigned int bits)
-+{
-+ /* Don't be fooled, this trick should work because the types are
-+ * unsigned. So the case that interests the while loop (the rotate has
-+ * gone too far and the word count needs to compensate for it), is
-+ * manifested when lsoffset is negative. But that equates to a really
-+ * large unsigned value, starting with lots of "F"s. As such, we can
-+ * continue adding 32 back to it until it wraps back round above zero,
-+ * to a value of 31 or less...
-+ */
-+ code->lsoffset -= bits;
-+ while (code->lsoffset > 31) {
-+ code->word--;
-+ code->lsoffset += 32;
-+ }
-+}
-+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
-+#define qb_attr_code_for_ms(code, bits, expr) \
-+ for (; expr; qb_attr_code_rotate_ms(code, bits))
-+#define qb_attr_code_for_ls(code, bits, expr) \
-+ for (; expr; qb_attr_code_rotate_ls(code, bits))
-+
-+/* decode a field from a cacheline */
-+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
-+ const uint32_t *cacheline)
-+{
-+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
-+}
-+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code,
-+ const uint64_t *cacheline)
-+{
-+ return cacheline[code->word / 2];
-+}
-+
-+/* encode a field to a cacheline */
-+static inline void qb_attr_code_encode(const struct qb_attr_code *code,
-+ uint32_t *cacheline, uint32_t val)
-+{
-+ cacheline[code->word] =
-+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
-+ | e32_uint32_t(code->lsoffset, code->width, val);
-+}
-+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
-+ uint64_t *cacheline, uint64_t val)
-+{
-+ cacheline[code->word / 2] = val;
-+}
-+
-+/* Small-width signed values (two's-complement) will decode into medium-width
-+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
-+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
-+ * 249. Likewise -120 would decode as 136.) This function allows the caller to
-+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
-+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t).
-+ */
-+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
-+ uint32_t val)
-+{
-+ BUG_ON(val >= (1u << code->width));
-+ /* code->width should never exceed the width of val. If it does then a
-+ * different function with larger val size must be used to translate
-+ * from unsigned to signed */
-+ BUG_ON(code->width > sizeof(val) * CHAR_BIT);
-+ /* If the high bit was set, it was encoding a negative */
-+ if (val >= 1u << (code->width - 1))
-+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) -
-+ val);
-+ /* Otherwise, it was encoding a positive */
-+ return (int32_t)val;
-+}
-+
-+/* ---------------------- */
-+/* Descriptors/cachelines */
-+/* ---------------------- */
-+
-+/* To avoid needless dynamic allocation, the driver API often gives the caller
-+ * a "descriptor" type that the caller can instantiate however they like.
-+ * Ultimately though, it is just a cacheline of binary storage (or something
-+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
-+ * holding pre-formatted pieces of harware commands. The performance-critical
-+ * code can then copy these descriptors directly into hardware command
-+ * registers more efficiently than trying to construct/format commands
-+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
-+ * order for the compiler to know its size, but the internal details are not
-+ * exposed. The following macro is used within the driver for converting *any*
-+ * descriptor pointer to a usable array pointer. The use of a macro (instead of
-+ * an inline) is necessary to work with different descriptor types and to work
-+ * correctly with const and non-const inputs (and similarly-qualified outputs).
-+ */
-+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_private.h b/drivers/net/dpaa2/qbman/driver/qbman_private.h
-new file mode 100644
-index 0000000..4e50b61
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_private.h
-@@ -0,0 +1,165 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+*/
-+
-+/* Perform extra checking */
-+#define QBMAN_CHECKING
-+
-+/* To maximise the amount of logic that is common between the Linux driver and
-+ * other targets (such as the embedded MC firmware), we pivot here between the
-+ * inclusion of two platform-specific headers.
-+ *
-+ * The first, qbman_sys_decl.h, includes any and all required system headers as
-+ * well as providing any definitions for the purposes of compatibility. The
-+ * second, qbman_sys.h, is where platform-specific routines go.
-+ *
-+ * The point of the split is that the platform-independent code (including this
-+ * header) may depend on platform-specific declarations, yet other
-+ * platform-specific routines may depend on platform-independent definitions.
-+ */
-+
-+#include "qbman_sys_decl.h"
-+
-+/* When things go wrong, it is a convenient trick to insert a few FOO()
-+ * statements in the code to trace progress. TODO: remove this once we are
-+ * hacking the code less actively.
-+ */
-+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
-+
-+/* Any time there is a register interface which we poll on, this provides a
-+ * "break after x iterations" scheme for it. It's handy for debugging, eg.
-+ * where you don't want millions of lines of log output from a polling loop
-+ * that won't, because such things tend to drown out the earlier log output
-+ * that might explain what caused the problem. (NB: put ";" after each macro!)
-+ * TODO: we should probably remove this once we're done sanitising the
-+ * simulator...
-+ */
-+#define DBG_POLL_START(loopvar) (loopvar = 10)
-+#define DBG_POLL_CHECK(loopvar) \
-+ do {if (!(loopvar--)) BUG_ON(NULL == "DBG_POLL_CHECK"); } while (0)
-+
-+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
-+ * and widths, these macro-generated encode/decode/isolate/remove inlines can
-+ * be used.
-+ *
-+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type),
-+ * where the field is located 3 bits "up" from the least-significant bit of the
-+ * register (ie. the field location within the 32-bit register corresponds to a
-+ * mask of 0x0001fff8), you would do;
-+ * uint16_t field = d32_uint16_t(3, 14, reg_value);
-+ *
-+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
-+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
-+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
-+ * LS bit), do;
-+ * reg_value |= e32_int(19, 1, !!field);
-+ *
-+ * If you wish to read-modify-write a register, such that you leave the 14-bit
-+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
-+ * value using;
-+ * reg_value = i32_uint16_t(3, 14, reg_value);
-+ *
-+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
-+ * zero) but leaving all other fields as-is;
-+ * reg_val = r32_int(19, 1, reg_value);
-+ *
-+ */
-+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
-+ (uint32_t)((1 << width) - 1))
-+#define DECLARE_CODEC32(t) \
-+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \
-+{ \
-+ BUG_ON(width > (sizeof(t) * 8)); \
-+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \
-+} \
-+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \
-+{ \
-+ BUG_ON(width > (sizeof(t) * 8)); \
-+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
-+} \
-+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \
-+ uint32_t val) \
-+{ \
-+ BUG_ON(width > (sizeof(t) * 8)); \
-+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
-+} \
-+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \
-+ uint32_t val) \
-+{ \
-+ BUG_ON(width > (sizeof(t) * 8)); \
-+ return ~(MAKE_MASK32(width) << lsoffset) & val; \
-+}
-+DECLARE_CODEC32(uint32_t)
-+DECLARE_CODEC32(uint16_t)
-+DECLARE_CODEC32(uint8_t)
-+DECLARE_CODEC32(int)
-+
-+ /*********************/
-+ /* Debugging assists */
-+ /*********************/
-+
-+static inline void __hexdump(unsigned long start, unsigned long end,
-+ unsigned long p, size_t sz, const unsigned char *c)
-+{
-+ while (start < end) {
-+ unsigned int pos = 0;
-+ char buf[64];
-+ int nl = 0;
-+ pos += sprintf(buf + pos, "%08lx: ", start);
-+ do {
-+ if ((start < p) || (start >= (p + sz)))
-+ pos += sprintf(buf + pos, "..");
-+ else
-+ pos += sprintf(buf + pos, "%02x", *(c++));
-+ if (!(++start & 15)) {
-+ buf[pos++] = '\n';
-+ nl = 1;
-+ } else {
-+ nl = 0;
-+ if (!(start & 1))
-+ buf[pos++] = ' ';
-+ if (!(start & 3))
-+ buf[pos++] = ' ';
-+ }
-+ } while (start & 15);
-+ if (!nl)
-+ buf[pos++] = '\n';
-+ buf[pos] = '\0';
-+ pr_info("%s", buf);
-+ }
-+}
-+static inline void hexdump(const void *ptr, size_t sz)
-+{
-+ unsigned long p = (unsigned long)ptr;
-+ unsigned long start = p & ~(unsigned long)15;
-+ unsigned long end = (p + sz + 15) & ~(unsigned long)15;
-+ const unsigned char *c = ptr;
-+ __hexdump(start, end, p, sz, c);
-+}
-+
-+#define QMAN_REV_4000 0x04000000
-+#define QMAN_REV_4100 0x04010000
-+#define QMAN_REV_4101 0x04010001
-+
-+#include "qbman_sys.h"
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys.h b/drivers/net/dpaa2/qbman/driver/qbman_sys.h
-new file mode 100644
-index 0000000..d912ab0
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_sys.h
-@@ -0,0 +1,367 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
-+ * driver. They are only included via qbman_private.h, which is itself a
-+ * platform-independent file and is included by all the other driver source.
-+ *
-+ * qbman_sys_decl.h is included prior to all other declarations and logic, and
-+ * it exists to provide compatibility with any linux interfaces our
-+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
-+ * provides linux compatibility.
-+ *
-+ * This qbman_sys.h header, on the other hand, is included *after* any common
-+ * and platform-neutral declarations and logic in qbman_private.h, and exists to
-+ * implement any platform-specific logic of the qbman driver itself. Ie. it is
-+ * *not* to provide linux compatibility.
-+ */
-+
-+/* Trace the 3 different classes of read/write access to QBMan. #undef as
-+ * required. */
-+#undef QBMAN_CCSR_TRACE
-+#undef QBMAN_CINH_TRACE
-+#undef QBMAN_CENA_TRACE
-+
-+static inline void word_copy(void *d, const void *s, unsigned int cnt)
-+{
-+ uint32_t *dd = d;
-+ const uint32_t *ss = s;
-+ while (cnt--)
-+ *(dd++) = *(ss++);
-+}
-+
-+/* Currently, the CENA support code expects each 32-bit word to be written in
-+ * host order, and these are converted to hardware (little-endian) order on
-+ * command submission. However, 64-bit quantities are must be written (and read)
-+ * as two 32-bit words with the least-significant word first, irrespective of
-+ * host endianness. */
-+static inline void u64_to_le32_copy(void *d, const uint64_t *s,
-+ unsigned int cnt)
-+{
-+ uint32_t *dd = d;
-+ const uint32_t *ss = (const uint32_t *)s;
-+ while (cnt--) {
-+ /* TBD: the toolchain was choking on the use of 64-bit types up
-+ * until recently so this works entirely with 32-bit variables.
-+ * When 64-bit types become usable again, investigate better
-+ * ways of doing this. */
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ *(dd++) = ss[1];
-+ *(dd++) = ss[0];
-+ ss += 2;
-+#else
-+ *(dd++) = *(ss++);
-+ *(dd++) = *(ss++);
-+#endif
-+ }
-+}
-+static inline void u64_from_le32_copy(uint64_t *d, const void *s,
-+ unsigned int cnt)
-+{
-+ const uint32_t *ss = s;
-+ uint32_t *dd = (uint32_t *)d;
-+ while (cnt--) {
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+ dd[1] = *(ss++);
-+ dd[0] = *(ss++);
-+ dd += 2;
-+#else
-+ *(dd++) = *(ss++);
-+ *(dd++) = *(ss++);
-+#endif
-+ }
-+}
-+
-+/* Convert a host-native 32bit value into little endian */
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+static inline uint32_t make_le32(uint32_t val)
-+{
-+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
-+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);
-+}
-+static inline uint32_t make_le24(uint32_t val)
-+{
-+ return (((val & 0xff) << 16) | (val & 0xff00) |
-+ ((val & 0xff0000) >> 16));
-+}
-+#else
-+#define make_le32(val) (val)
-+#define make_le24(val) (val)
-+#endif
-+static inline void make_le32_n(uint32_t *val, unsigned int num)
-+{
-+ while (num--) {
-+ *val = make_le32(*val);
-+ val++;
-+ }
-+}
-+
-+ /******************/
-+ /* Portal access */
-+ /******************/
-+struct qbman_swp_sys {
-+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
-+ * not an mmap() of the real portal registers, but an allocated
-+ * place-holder, because the actual writes/reads to/from the portal are
-+ * marshalled from these allocated areas using QBMan's "MC access
-+ * registers". CINH accesses are atomic so there's no need for a
-+ * place-holder. */
-+ uint8_t *cena;
-+ uint8_t __iomem *addr_cena;
-+ uint8_t __iomem *addr_cinh;
-+ uint32_t idx;
-+ enum qbman_eqcr_mode eqcr_mode;
-+};
-+
-+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
-+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
-+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
-+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
-+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
-+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
-+ */
-+
-+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
-+ uint32_t val)
-+{
-+
-+ __raw_writel(val, s->addr_cinh + offset);
-+#ifdef QBMAN_CINH_TRACE
-+ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
-+ s->addr_cinh, s->idx, offset, val);
-+#endif
-+}
-+
-+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
-+{
-+ uint32_t reg = __raw_readl(s->addr_cinh + offset);
-+#ifdef QBMAN_CINH_TRACE
-+ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
-+ s->addr_cinh, s->idx, offset, reg);
-+#endif
-+ return reg;
-+}
-+
-+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+ void *shadow = s->cena + offset;
-+
-+#ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
-+ s->addr_cena, s->idx, offset, shadow);
-+#endif
-+ BUG_ON(offset & 63);
-+ dcbz(shadow);
-+ return shadow;
-+}
-+
-+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+#ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
-+ s->addr_cena, s->idx, offset);
-+#endif
-+ BUG_ON(offset & 63);
-+ return (s->addr_cena + offset);
-+}
-+
-+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
-+ uint32_t offset, void *cmd)
-+{
-+ const uint32_t *shadow = cmd;
-+ int loop;
-+#ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
-+ s->addr_cena, s->idx, offset, shadow);
-+ hexdump(cmd, 64);
-+#endif
-+ for (loop = 15; loop >= 1; loop--)
-+ __raw_writel(shadow[loop], s->addr_cena +
-+ offset + loop * 4);
-+ lwsync();
-+ __raw_writel(shadow[0], s->addr_cena + offset);
-+ dcbf(s->addr_cena + offset);
-+}
-+
-+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+#ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
-+ s->addr_cena, s->idx, offset);
-+ hexdump(cmd, 64);
-+#endif
-+ dcbf(s->addr_cena + offset);
-+}
-+
-+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+ return __raw_readl(s->addr_cena + offset);
-+}
-+
-+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
-+{
-+ uint32_t *shadow = (uint32_t *)(s->cena + offset);
-+ unsigned int loop;
-+#ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
-+ s->addr_cena, s->idx, offset, shadow);
-+#endif
-+
-+ for (loop = 0; loop < 16; loop++)
-+ shadow[loop] = __raw_readl(s->addr_cena + offset
-+ + loop * 4);
-+#ifdef QBMAN_CENA_TRACE
-+ hexdump(shadow, 64);
-+#endif
-+ return shadow;
-+}
-+
-+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+#ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
-+ s->addr_cena, s->idx, offset, shadow);
-+#endif
-+
-+#ifdef QBMAN_CENA_TRACE
-+ hexdump(shadow, 64);
-+#endif
-+ return s->addr_cena + offset;
-+}
-+
-+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+ dccivac(s->addr_cena + offset);
-+}
-+
-+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+ dccivac(s->addr_cena + offset);
-+ prefetch_for_load(s->addr_cena + offset);
-+}
-+
-+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+ prefetch_for_load(s->addr_cena + offset);
-+}
-+
-+ /******************/
-+ /* Portal support */
-+ /******************/
-+
-+/* The SWP_CFG portal register is special, in that it is used by the
-+ * platform-specific code rather than the platform-independent code in
-+ * qbman_portal.c. So use of it is declared locally here. */
-+#define QBMAN_CINH_SWP_CFG 0xd00
-+
-+/* For MC portal use, we always configure with
-+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)
-+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2)
-+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)
-+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)
-+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2)
-+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE)
-+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)
-+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE)
-+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)
-+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE)
-+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE)
-+ */
-+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
-+ uint8_t est, uint8_t rpm, uint8_t dcm,
-+ uint8_t epm, int sd, int sp, int se,
-+ int dp, int de, int ep)
-+{
-+ uint32_t reg;
-+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) |
-+ e32_uint8_t(16, 3, est) |
-+ e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) |
-+ e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) |
-+ e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) |
-+ e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn);
-+ return reg;
-+}
-+
-+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
-+ const struct qbman_swp_desc *d,
-+ uint8_t dqrr_size)
-+{
-+ uint32_t reg;
-+ s->addr_cena = d->cena_bar;
-+ s->addr_cinh = d->cinh_bar;
-+ s->idx = (uint32_t)d->idx;
-+ s->cena = (void *)get_zeroed_page(GFP_KERNEL);
-+ if (!s->cena) {
-+ pr_err("Could not allocate page for cena shadow\n");
-+ return -1;
-+ }
-+ s->eqcr_mode = d->eqcr_mode;
-+ BUG_ON(d->idx < 0);
-+#ifdef QBMAN_CHECKING
-+ /* We should never be asked to initialise for a portal that isn't in
-+ * the power-on state. (Ie. don't forget to reset portals when they are
-+ * decommissioned!)
-+ */
-+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
-+ BUG_ON(reg);
-+#endif
-+ if (s->eqcr_mode == qman_eqcr_vb_array)
-+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,
-+ 1, 1);
-+ else
-+ reg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1,
-+ 1, 1);
-+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
-+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
-+ if (!reg) {
-+ pr_err("The portal %d is not enabled!\n", s->idx);
-+ kfree(s->cena);
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
-+{
-+ free_page((unsigned long)s->cena);
-+}
-+
-+static inline void *qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s,
-+ uint32_t offset)
-+{
-+ #ifdef QBMAN_CENA_TRACE
-+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
-+ s->addr_cena, s->idx, offset);
-+ #endif
-+ BUG_ON(offset & 63);
-+ return (s->addr_cena + offset);
-+}
-diff --git a/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h
-new file mode 100644
-index 0000000..ae7ef97
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/driver/qbman_sys_decl.h
-@@ -0,0 +1,68 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <compat.h>
-+#include <drivers/fsl_qbman_base.h>
-+
-+/* Sanity check */
-+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \
-+ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)
-+#error "Unknown endianness!"
-+#endif
-+
-+/* The platform-independent code shouldn't need endianness, except for
-+ * weird/fast-path cases like qbman_result_has_token(), which needs to
-+ * perform a passive and endianness-specific test on a read-only data structure
-+ * very quickly. It's an exception, and this symbol is used for that case. */
-+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-+#define DQRR_TOK_OFFSET 0
-+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24
-+#define SCN_STATE_OFFSET_IN_MEM 8
-+#define SCN_RID_OFFSET_IN_MEM 8
-+#else
-+#define DQRR_TOK_OFFSET 24
-+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0
-+#define SCN_STATE_OFFSET_IN_MEM 16
-+#define SCN_RID_OFFSET_IN_MEM 0
-+#endif
-+
-+/* Similarly-named functions */
-+#define upper32(a) upper_32_bits(a)
-+#define lower32(a) lower_32_bits(a)
-+
-+ /****************/
-+ /* arch assists */
-+ /****************/
-+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
-+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
-+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
-+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
-+static inline void prefetch_for_load(void *p)
-+{
-+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p));
-+}
-+static inline void prefetch_for_store(void *p)
-+{
-+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p));
-+}
-diff --git a/drivers/net/dpaa2/qbman/include/compat.h b/drivers/net/dpaa2/qbman/include/compat.h
-new file mode 100644
-index 0000000..0d14b58
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/include/compat.h
-@@ -0,0 +1,597 @@
-+/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef HEADER_COMPAT_H
-+#define HEADER_COMPAT_H
-+
-+#include <sched.h>
-+
-+#ifndef _GNU_SOURCE
-+#define _GNU_SOURCE
-+#endif
-+#include <stdint.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <errno.h>
-+#include <string.h>
-+#include <pthread.h>
-+#include <net/ethernet.h>
-+#include <stdio.h>
-+#include <stdbool.h>
-+#include <ctype.h>
-+#include <malloc.h>
-+#include <sys/types.h>
-+#include <sys/stat.h>
-+#include <fcntl.h>
-+#include <unistd.h>
-+#include <sys/mman.h>
-+#include <limits.h>
-+#include <assert.h>
-+#include <dirent.h>
-+#include <inttypes.h>
-+#include <error.h>
-+
-+/* The following definitions are primarily to allow the single-source driver
-+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
-+ * are also available in kernel-space, these definitions provide compatibility
-+ * with certain attributes and types used in those interfaces. */
-+
-+/* Required compiler attributes */
-+#define __maybe_unused __attribute__((unused))
-+#define __always_unused __attribute__((unused))
-+#define __packed __attribute__((__packed__))
-+#define __user
-+#define likely(x) __builtin_expect(!!(x), 1)
-+#define unlikely(x) __builtin_expect(!!(x), 0)
-+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
-+#define container_of(p, t, f) (t *)((void *)p - offsetof(t, f))
-+#define __stringify_1(x) #x
-+#define __stringify(x) __stringify_1(x)
-+#define panic(x) \
-+do { \
-+ printf("panic: %s", x); \
-+ abort(); \
-+} while (0)
-+
-+#ifdef ARRAY_SIZE
-+#undef ARRAY_SIZE
-+#endif
-+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-+
-+/* Required types */
-+typedef uint8_t u8;
-+typedef uint16_t u16;
-+typedef uint32_t u32;
-+typedef uint64_t u64;
-+typedef uint64_t dma_addr_t;
-+typedef cpu_set_t cpumask_t;
-+#define spinlock_t pthread_mutex_t
-+typedef u32 compat_uptr_t;
-+static inline void __user *compat_ptr(compat_uptr_t uptr)
-+{
-+ return (void __user *)(unsigned long)uptr;
-+}
-+
-+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-+{
-+ return (u32)(unsigned long)uptr;
-+}
-+
-+/* I/O operations */
-+static inline u32 in_be32(volatile void *__p)
-+{
-+ volatile u32 *p = __p;
-+ return *p;
-+}
-+static inline void out_be32(volatile void *__p, u32 val)
-+{
-+ volatile u32 *p = __p;
-+ *p = val;
-+}
-+
-+/* Debugging */
-+#define prflush(fmt, args...) \
-+ do { \
-+ printf(fmt, ##args); \
-+ fflush(stdout); \
-+ } while (0)
-+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
-+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
-+#define pr_warning(fmt, args...) prflush("WARN:" fmt, ##args)
-+#define pr_info(fmt, args...) prflush(fmt, ##args)
-+
-+#define BUG() abort()
-+#ifdef CONFIG_BUGON
-+#ifdef pr_debug
-+#undef pr_debug
-+#endif
-+#define pr_debug(fmt, args...) printf(fmt, ##args)
-+#define BUG_ON(c) \
-+do { \
-+ if (c) { \
-+ pr_crit("BUG: %s:%d\n", __FILE__, __LINE__); \
-+ abort(); \
-+ } \
-+} while(0)
-+#define might_sleep_if(c) BUG_ON(c)
-+#define msleep(x) \
-+do { \
-+ pr_crit("BUG: illegal call %s:%d\n", __FILE__, __LINE__); \
-+ exit(EXIT_FAILURE); \
-+} while(0)
-+#else
-+#ifdef pr_debug
-+#undef pr_debug
-+#endif
-+#define pr_debug(fmt, args...) do { ; } while(0)
-+#define BUG_ON(c) do { ; } while(0)
-+#define might_sleep_if(c) do { ; } while(0)
-+#define msleep(x) do { ; } while(0)
-+#endif
-+#define WARN_ON(c, str) \
-+do { \
-+ static int warned_##__LINE__; \
-+ if ((c) && !warned_##__LINE__) { \
-+ pr_warning("%s\n", str); \
-+ pr_warning("(%s:%d)\n", __FILE__, __LINE__); \
-+ warned_##__LINE__ = 1; \
-+ } \
-+} while (0)
-+
-+#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))
-+
-+/****************/
-+/* Linked-lists */
-+/****************/
-+
-+struct list_head {
-+ struct list_head *prev;
-+ struct list_head *next;
-+};
-+
-+#define LIST_HEAD(n) \
-+struct list_head n = { \
-+ .prev = &n, \
-+ .next = &n \
-+}
-+#define INIT_LIST_HEAD(p) \
-+do { \
-+ struct list_head *__p298 = (p); \
-+ __p298->prev = __p298->next =__p298; \
-+} while(0)
-+#define list_entry(node, type, member) \
-+ (type *)((void *)node - offsetof(type, member))
-+#define list_empty(p) \
-+({ \
-+ const struct list_head *__p298 = (p); \
-+ ((__p298->next == __p298) && (__p298->prev == __p298)); \
-+})
-+#define list_add(p,l) \
-+do { \
-+ struct list_head *__p298 = (p); \
-+ struct list_head *__l298 = (l); \
-+ __p298->next = __l298->next; \
-+ __p298->prev = __l298; \
-+ __l298->next->prev = __p298; \
-+ __l298->next = __p298; \
-+} while(0)
-+#define list_add_tail(p,l) \
-+do { \
-+ struct list_head *__p298 = (p); \
-+ struct list_head *__l298 = (l); \
-+ __p298->prev = __l298->prev; \
-+ __p298->next = __l298; \
-+ __l298->prev->next = __p298; \
-+ __l298->prev = __p298; \
-+} while(0)
-+#define list_for_each(i, l) \
-+ for (i = (l)->next; i != (l); i = i->next)
-+#define list_for_each_safe(i, j, l) \
-+ for (i = (l)->next, j = i->next; i != (l); \
-+ i = j, j = i->next)
-+#define list_for_each_entry(i, l, name) \
-+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
-+ i = list_entry(i->name.next, typeof(*i), name))
-+#define list_for_each_entry_safe(i, j, l, name) \
-+ for (i = list_entry((l)->next, typeof(*i), name), \
-+ j = list_entry(i->name.next, typeof(*j), name); \
-+ &i->name != (l); \
-+ i = j, j = list_entry(j->name.next, typeof(*j), name))
-+#define list_del(i) \
-+do { \
-+ (i)->next->prev = (i)->prev; \
-+ (i)->prev->next = (i)->next; \
-+} while(0)
-+
-+/* Other miscellaneous interfaces our APIs depend on; */
-+
-+#define lower_32_bits(x) ((u32)(x))
-+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
-+
-+/* Compiler/type stuff */
-+typedef unsigned int gfp_t;
-+typedef uint32_t phandle;
-+
-+#define noinline __attribute__((noinline))
-+#define __iomem
-+#define EINTR 4
-+#define ENODEV 19
-+#define MODULE_AUTHOR(s)
-+#define MODULE_LICENSE(s)
-+#define MODULE_DESCRIPTION(s)
-+#define MODULE_PARM_DESC(x, y)
-+#define EXPORT_SYMBOL(x)
-+#define module_init(fn) int m_##fn(void) { return fn(); }
-+#define module_exit(fn) void m_##fn(void) { fn(); }
-+#define module_param(x, y, z)
-+#define module_param_string(w, x, y, z)
-+#define GFP_KERNEL 0
-+#define __KERNEL__
-+#define __init
-+#define __raw_readb(p) *(const volatile unsigned char *)(p)
-+#define __raw_readl(p) *(const volatile unsigned int *)(p)
-+#define __raw_writel(v, p) \
-+do { \
-+ *(volatile unsigned int *)(p) = (v); \
-+} while (0)
-+
-+/* printk() stuff */
-+#define printk(fmt, args...) do_not_use_printk
-+#define nada(fmt, args...) do { ; } while(0)
-+
-+/* Interrupt stuff */
-+typedef uint32_t irqreturn_t;
-+#define IRQ_HANDLED 0
-+
-+/* memcpy() stuff - when you know alignments in advance */
-+#ifdef CONFIG_TRY_BETTER_MEMCPY
-+static inline void copy_words(void *dest, const void *src, size_t sz)
-+{
-+ u32 *__dest = dest;
-+ const u32 *__src = src;
-+ size_t __sz = sz >> 2;
-+ BUG_ON((unsigned long)dest & 0x3);
-+ BUG_ON((unsigned long)src & 0x3);
-+ BUG_ON(sz & 0x3);
-+ while (__sz--)
-+ *(__dest++) = *(__src++);
-+}
-+static inline void copy_shorts(void *dest, const void *src, size_t sz)
-+{
-+ u16 *__dest = dest;
-+ const u16 *__src = src;
-+ size_t __sz = sz >> 1;
-+ BUG_ON((unsigned long)dest & 0x1);
-+ BUG_ON((unsigned long)src & 0x1);
-+ BUG_ON(sz & 0x1);
-+ while (__sz--)
-+ *(__dest++) = *(__src++);
-+}
-+static inline void copy_bytes(void *dest, const void *src, size_t sz)
-+{
-+ u8 *__dest = dest;
-+ const u8 *__src = src;
-+ while (sz--)
-+ *(__dest++) = *(__src++);
-+}
-+#else
-+#define copy_words memcpy
-+#define copy_shorts memcpy
-+#define copy_bytes memcpy
-+#endif
-+
-+/* Spinlock stuff */
-+#define spinlock_t pthread_mutex_t
-+#define __SPIN_LOCK_UNLOCKED(x) PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
-+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-+#define spin_lock_init(x) \
-+ do { \
-+ __maybe_unused int __foo; \
-+ pthread_mutexattr_t __foo_attr; \
-+ __foo = pthread_mutexattr_init(&__foo_attr); \
-+ BUG_ON(__foo); \
-+ __foo = pthread_mutexattr_settype(&__foo_attr, \
-+ PTHREAD_MUTEX_ADAPTIVE_NP); \
-+ BUG_ON(__foo); \
-+ __foo = pthread_mutex_init(x, &__foo_attr); \
-+ BUG_ON(__foo); \
-+ } while (0)
-+#define spin_lock(x) \
-+ do { \
-+ __maybe_unused int __foo = pthread_mutex_lock(x); \
-+ BUG_ON(__foo); \
-+ } while (0)
-+#define spin_unlock(x) \
-+ do { \
-+ __maybe_unused int __foo = pthread_mutex_unlock(x); \
-+ BUG_ON(__foo); \
-+ } while (0)
-+#define spin_lock_irq(x) do { \
-+ local_irq_disable(); \
-+ spin_lock(x); \
-+ } while (0)
-+#define spin_unlock_irq(x) do { \
-+ spin_unlock(x); \
-+ local_irq_enable(); \
-+ } while (0)
-+#define spin_lock_irqsave(x, f) do { spin_lock_irq(x); } while (0)
-+#define spin_unlock_irqrestore(x, f) do { spin_unlock_irq(x); } while (0)
-+
-+#define raw_spinlock_t spinlock_t
-+#define raw_spin_lock_init(x) spin_lock_init(x)
-+#define raw_spin_lock_irqsave(x, f) spin_lock(x)
-+#define raw_spin_unlock_irqrestore(x, f) spin_unlock(x)
-+
-+/* Completion stuff */
-+#define DECLARE_COMPLETION(n) int n = 0;
-+#define complete(n) \
-+do { \
-+ *n = 1; \
-+} while(0)
-+#define wait_for_completion(n) \
-+do { \
-+ while (!*n) { \
-+ bman_poll(); \
-+ qman_poll(); \
-+ } \
-+ *n = 0; \
-+} while(0)
-+
-+/* Platform device stuff */
-+struct platform_device { void *dev; };
-+static inline struct
-+platform_device *platform_device_alloc(const char *name __always_unused,
-+ int id __always_unused)
-+{
-+ struct platform_device *ret = malloc(sizeof(*ret));
-+ if (ret)
-+ ret->dev = NULL;
-+ return ret;
-+}
-+#define platform_device_add(pdev) 0
-+#define platform_device_del(pdev) do { ; } while(0)
-+static inline void platform_device_put(struct platform_device *pdev)
-+{
-+ free(pdev);
-+}
-+struct resource {
-+ int unused;
-+};
-+
-+/* Allocator stuff */
-+#define kmalloc(sz, t) malloc(sz)
-+#define vmalloc(sz) malloc(sz)
-+#define kfree(p) do { if (p) free(p); } while (0)
-+static inline void *kzalloc(size_t sz, gfp_t __foo __always_unused)
-+{
-+ void *ptr = malloc(sz);
-+ if (ptr)
-+ memset(ptr, 0, sz);
-+ return ptr;
-+}
-+static inline unsigned long get_zeroed_page(gfp_t __foo __always_unused)
-+{
-+ void *p;
-+ if (posix_memalign(&p, 4096, 4096))
-+ return 0;
-+ memset(p, 0, 4096);
-+ return (unsigned long)p;
-+}
-+static inline void free_page(unsigned long p)
-+{
-+ free((void *)p);
-+}
-+struct kmem_cache {
-+ size_t sz;
-+ size_t align;
-+};
-+#define SLAB_HWCACHE_ALIGN 0
-+static inline struct kmem_cache *kmem_cache_create(const char *n __always_unused,
-+ size_t sz, size_t align, unsigned long flags __always_unused,
-+ void (*c)(void *) __always_unused)
-+{
-+ struct kmem_cache *ret = malloc(sizeof(*ret));
-+ if (ret) {
-+ ret->sz = sz;
-+ ret->align = align;
-+ }
-+ return ret;
-+}
-+static inline void kmem_cache_destroy(struct kmem_cache *c)
-+{
-+ free(c);
-+}
-+static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t f __always_unused)
-+{
-+ void *p;
-+ if (posix_memalign(&p, c->align, c->sz))
-+ return NULL;
-+ return p;
-+}
-+static inline void kmem_cache_free(struct kmem_cache *c __always_unused, void *p)
-+{
-+ free(p);
-+}
-+static inline void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t f)
-+{
-+ void *ret = kmem_cache_alloc(c, f);
-+ if (ret)
-+ memset(ret, 0, c->sz);
-+ return ret;
-+}
-+
-+/* Bitfield stuff. */
-+#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
-+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
-+#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))
-+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
-+static inline unsigned long test_bits(unsigned long mask,
-+ volatile unsigned long *p)
-+{
-+ return *p & mask;
-+}
-+static inline int test_bit(int idx, volatile unsigned long *bits)
-+{
-+ return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-+}
-+static inline void set_bits(unsigned long mask, volatile unsigned long *p)
-+{
-+ *p |= mask;
-+}
-+static inline void set_bit(int idx, volatile unsigned long *bits)
-+{
-+ set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-+}
-+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
-+{
-+ *p &= ~mask;
-+}
-+static inline void clear_bit(int idx, volatile unsigned long *bits)
-+{
-+ clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-+}
-+static inline unsigned long test_and_set_bits(unsigned long mask,
-+ volatile unsigned long *p)
-+{
-+ unsigned long ret = test_bits(mask, p);
-+ set_bits(mask, p);
-+ return ret;
-+}
-+static inline int test_and_set_bit(int idx, volatile unsigned long *bits)
-+{
-+ int ret = test_bit(idx, bits);
-+ set_bit(idx, bits);
-+ return ret;
-+}
-+static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)
-+{
-+ int ret = test_bit(idx, bits);
-+ clear_bit(idx, bits);
-+ return ret;
-+}
-+static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)
-+{
-+ while ((++idx < limit) && test_bit(idx, bits))
-+ ;
-+ return idx;
-+}
-+static inline int find_first_zero_bit(unsigned long *bits, int limit)
-+{
-+ int idx = 0;
-+ while (test_bit(idx, bits) && (++idx < limit))
-+ ;
-+ return idx;
-+}
-+
-+static inline u64 div64_u64(u64 n, u64 d)
-+{
-+ return n / d;
-+}
-+
-+#define dmb(opt) { asm volatile("dmb " #opt : : : "memory"); }
-+#define smp_mb() dmb(ish)
-+
-+/* Atomic stuff */
-+typedef struct {
-+ int counter;
-+} atomic_t;
-+
-+#define atomic_read(v) (*(volatile int *)&(v)->counter)
-+#define atomic_set(v, i) (((v)->counter) = (i))
-+static inline void atomic_add(int i, atomic_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ asm volatile("// atomic_add\n"
-+ "1: ldxr %w0, %2\n"
-+ " add %w0, %w0, %w3\n"
-+ " stxr %w1, %w0, %2\n"
-+ " cbnz %w1, 1b"
-+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-+ : "Ir" (i));
-+}
-+
-+static inline int atomic_add_return(int i, atomic_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ asm volatile("// atomic_add_return\n"
-+ "1: ldxr %w0, %2\n"
-+ " add %w0, %w0, %w3\n"
-+ " stlxr %w1, %w0, %2\n"
-+ " cbnz %w1, 1b"
-+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-+ : "Ir" (i)
-+ : "memory");
-+
-+ smp_mb();
-+ return result;
-+}
-+
-+static inline void atomic_sub(int i, atomic_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ asm volatile("// atomic_sub\n"
-+ "1: ldxr %w0, %2\n"
-+ " sub %w0, %w0, %w3\n"
-+ " stxr %w1, %w0, %2\n"
-+ " cbnz %w1, 1b"
-+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-+ : "Ir" (i));
-+}
-+
-+static inline int atomic_sub_return(int i, atomic_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ asm volatile("// atomic_sub_return\n"
-+ "1: ldxr %w0, %2\n"
-+ " sub %w0, %w0, %w3\n"
-+ " stlxr %w1, %w0, %2\n"
-+ " cbnz %w1, 1b"
-+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-+ : "Ir" (i)
-+ : "memory");
-+
-+ smp_mb();
-+ return result;
-+}
-+
-+#define atomic_inc(v) atomic_add(1, v)
-+#define atomic_dec(v) atomic_sub(1, v)
-+
-+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-+#define atomic_inc_return(v) (atomic_add_return(1, v))
-+#define atomic_dec_return(v) (atomic_sub_return(1, v))
-+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-+
-+#endif /* HEADER_COMPAT_H */
-diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h
-new file mode 100644
-index 0000000..4cb784c
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_base.h
-@@ -0,0 +1,151 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_QBMAN_BASE_H
-+#define _FSL_QBMAN_BASE_H
-+
-+/**
-+ * DOC: QBMan basic structures
-+ *
-+ * The QBMan block descriptor, software portal descriptor and Frame descriptor
-+ * are defined here.
-+ *
-+ */
-+
-+/**
-+ * struct qbman_block_desc - qbman block descriptor structure
-+ * @ccsr_reg_bar: CCSR register map.
-+ * @irq_rerr: Recoverable error interrupt line.
-+ * @irq_nrerr: Non-recoverable error interrupt line
-+ *
-+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
-+ * control this QBMan instance, these values may simply be place-holders. The
-+ * idea is simply that we be able to distinguish between them, eg. so that SWP
-+ * descriptors can identify which QBMan instance they belong to.
-+ */
-+struct qbman_block_desc {
-+ void *ccsr_reg_bar;
-+ int irq_rerr;
-+ int irq_nrerr;
-+};
-+
-+enum qbman_eqcr_mode {
-+ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */
-+ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */
-+};
-+
-+/**
-+ * struct qbman_swp_desc - qbman software portal descriptor structure
-+ * @block: The QBMan instance.
-+ * @cena_bar: Cache-enabled portal register map.
-+ * @cinh_bar: Cache-inhibited portal register map.
-+ * @irq: -1 if unused (or unassigned)
-+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user.
-+ * @qman_version: the qman version.
-+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and
-+ * valid bit array mode are supported.
-+ *
-+ * Descriptor for a QBMan software portal, expressed in terms that make sense to
-+ * the user context. Ie. on MC, this information is likely to be true-physical,
-+ * and instantiated statically at compile-time. On GPP, this information is
-+ * likely to be obtained via "discovery" over a partition's "MC bus"
-+ * (ie. in response to a MC portal command), and would take into account any
-+ * virtualisation of the GPP user's address space and/or interrupt numbering.
-+ */
-+struct qbman_swp_desc {
-+ const struct qbman_block_desc *block;
-+ uint8_t *cena_bar;
-+ uint8_t *cinh_bar;
-+ int irq;
-+ int idx;
-+ uint32_t qman_version;
-+ enum qbman_eqcr_mode eqcr_mode;
-+};
-+
-+/* Driver object for managing a QBMan portal */
-+struct qbman_swp;
-+
-+/**
-+ * struct qbman_fd - basci structure for qbman frame descriptor
-+ * @words: for easier/faster copying the whole FD structure.
-+ * @addr_lo: the lower 32 bits of the address in FD.
-+ * @addr_hi: the upper 32 bits of the address in FD.
-+ * @len: the length field in FD.
-+ * @bpid_offset: represent the bpid and offset fields in FD. offset in
-+ * the MS 16 bits, BPID in the LS 16 bits.
-+ * @frc: frame context
-+ * @ctrl: the 32bit control bits including dd, sc,... va, err.
-+ * @flc_lo: the lower 32bit of flow context.
-+ * @flc_hi: the upper 32bits of flow context.
-+ *
-+ * Place-holder for FDs, we represent it via the simplest form that we need for
-+ * now. Different overlays may be needed to support different options, etc. (It
-+ * is impractical to define One True Struct, because the resulting encoding
-+ * routines (lots of read-modify-writes) would be worst-case performance whether
-+ * or not circumstances required them.)
-+ *
-+ * Note, as with all data-structures exchanged between software and hardware (be
-+ * they located in the portal register map or DMA'd to and from main-memory),
-+ * the driver ensures that the caller of the driver API sees the data-structures
-+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
-+ * contained within this structure are represented in host-endianness, even if
-+ * hardware always treats them as little-endian. As such, if any of these fields
-+ * are interpreted in a binary (rather than numerical) fashion by hardware
-+ * blocks (eg. accelerators), then the user should be careful. We illustrate
-+ * with an example;
-+ *
-+ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
-+ * field of the FDs that are sent to it. Suppose also that the behaviour desired
-+ * by the user corresponds to an "frc" value which is expressed as the literal
-+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
-+ * value in which 0xfe is the first byte and 0xba is the last byte, and as
-+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
-+ * the software is little-endian also, this can simply be achieved by setting
-+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
-+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
-+ * to treat the 32-bit words as numerical values, in which the offset of a field
-+ * from the beginning of the first byte (as required or generated by hardware)
-+ * is numerically encoded by a left-shift (ie. by raising the field to a
-+ * corresponding power of 2). Ie. in the current example, software could set
-+ * "frc" in the following way, and it would work correctly on both little-endian
-+ * and big-endian operation;
-+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
-+ */
-+struct qbman_fd {
-+ union {
-+ uint32_t words[8];
-+ struct qbman_fd_simple {
-+ uint32_t addr_lo;
-+ uint32_t addr_hi;
-+ uint32_t len;
-+ uint32_t bpid_offset;
-+ uint32_t frc;
-+ uint32_t ctrl;
-+ uint32_t flc_lo;
-+ uint32_t flc_hi;
-+ } simple;
-+ };
-+};
-+
-+#endif /* !_FSL_QBMAN_BASE_H */
-diff --git a/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h
-new file mode 100644
-index 0000000..ddcabcf
---- /dev/null
-+++ b/drivers/net/dpaa2/qbman/include/drivers/fsl_qbman_portal.h
-@@ -0,0 +1,1089 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_QBMAN_PORTAL_H
-+#define _FSL_QBMAN_PORTAL_H
-+
-+#include <drivers/fsl_qbman_base.h>
-+
-+/**
-+ * DOC - QBMan portal APIs to implement the following functions:
-+ * - Initialize and destroy Software portal object.
-+ * - Read and write Software portal interrupt registers.
-+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
-+ * command etc.
-+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
-+ * parsing the dequeue response in DQRR and memeory, parsing the state change
-+ * notifications etc.
-+ * - Release, including setting the release descriptor, and issuing the buffer
-+ * release command.
-+ * - Acquire, acquire the buffer from the given buffer pool.
-+ * - FQ management.
-+ * - Channel management, enable/disable CDAN with or without context.
-+ */
-+
-+/**
-+ * qbman_swp_init() - Create a functional object representing the given
-+ * QBMan portal descriptor.
-+ * @d: the given qbman swp descriptor
-+ *
-+ * Return qbman_swp portal object for success, NULL if the object cannot
-+ * be created.
-+ */
-+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
-+
-+/**
-+ * qbman_swp_finish() - Create and destroy a functional object representing
-+ * the given QBMan portal descriptor.
-+ * @p: the qbman_swp object to be destroyed.
-+ *
-+ */
-+void qbman_swp_finish(struct qbman_swp *p);
-+
-+/**
-+ * qbman_swp_get_desc() - Get the descriptor of the given portal object.
-+ * @p: the given portal object.
-+ *
-+ * Return the descriptor for this portal.
-+ */
-+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *);
-+
-+ /**************/
-+ /* Interrupts */
-+ /**************/
-+
-+/* EQCR ring interrupt */
-+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
-+/* Enqueue command dispatched interrupt */
-+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
-+/* DQRR non-empty interrupt */
-+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
-+/* RCR ring interrupt */
-+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
-+/* Release command dispatched interrupt */
-+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
-+/* Volatile dequeue command interrupt */
-+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
-+
-+/**
-+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal
-+ * interrupt status disable register.
-+ * @p: the given software portal object.
-+ *
-+ * Return the settings in SWP_ISDR register.
-+ */
-+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
-+
-+/**
-+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal
-+ * interrupt status disable register.
-+ * @p: the given software portal object.
-+ * @mask: The value to set in SWP_IDSR register.
-+ */
-+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
-+
-+/**
-+ * qbman_swp_interrupt_read_status() - Get the data in software portal
-+ * interrupt status register.
-+ * @p: the given software portal object.
-+ *
-+ * Return the settings in SWP_ISR register.
-+ */
-+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
-+
-+/**
-+ * qbman_swp_interrupt_clear_status() - Set the data in software portal
-+ * interrupt status register.
-+ * @p: the given software portal object.
-+ * @mask: The value to set in SWP_ISR register.
-+ */
-+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
-+
-+/**
-+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal
-+ * interrupt enable register.
-+ * @p: the given software portal object.
-+ *
-+ * Return the settings in SWP_IER register.
-+ */
-+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
-+
-+/**
-+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal
-+ * interrupt enable register.
-+ * @p: the given software portal object.
-+ * @mask: The value to set in SWP_IER register.
-+ */
-+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
-+
-+/**
-+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
-+ * interrupt inhibit register.
-+ * @p: the given software portal object.
-+ *
-+ * Return the settings in SWP_IIR register.
-+ */
-+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
-+
-+/**
-+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
-+ * interrupt inhibit register.
-+ * @p: the given software portal object.
-+ * @mask: The value to set in SWP_IIR register.
-+ */
-+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
-+
-+ /************/
-+ /* Dequeues */
-+ /************/
-+
-+/**
-+ * struct qbman_result - structure for qbman dequeue response and/or
-+ * notification.
-+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
-+ * possible qbman dequeue result.
-+ */
-+struct qbman_result {
-+ uint32_t dont_manipulate_directly[16];
-+};
-+
-+/* TODO:
-+ *A DQRI interrupt can be generated when there are dequeue results on the
-+ * portal's DQRR (this mechanism does not deal with "pull" dequeues to
-+ * user-supplied 'storage' addresses). There are two parameters to this
-+ * interrupt source, one is a threshold and the other is a timeout. The
-+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
-+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
-+ * For timeout, an approximation to the desired nanosecond-granularity value is
-+ * made, so there are get and set APIs to allow the user to see what actual
-+ * timeout is set (compared to the timeout that was requested). */
-+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
-+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
-+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
-+
-+
-+/* ------------------- */
-+/* Push-mode dequeuing */
-+/* ------------------- */
-+
-+/* The user of a portal can enable and disable push-mode dequeuing of up to 16
-+ * channels independently. It does not specify this toggling by channel IDs, but
-+ * rather by specifing the index (from 0 to 15) that has been mapped to the
-+ * desired channel.
-+ */
-+
-+/**
-+ * qbman_swp_push_get() - Get the push dequeue setup.
-+ * @s: the software portal object.
-+ * @channel_idx: the channel index to query.
-+ * @enabled: returned boolean to show whether the push dequeue is enabled for
-+ * the given channel.
-+ */
-+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
-+
-+/**
-+ * qbman_swp_push_set() - Enable or disable push dequeue.
-+ * @s: the software portal object.
-+ * @channel_idx: the channel index..
-+ * @enable: enable or disable push dequeue.
-+ *
-+ * The user of a portal can enable and disable push-mode dequeuing of up to 16
-+ * channels independently. It does not specify this toggling by channel IDs, but
-+ * rather by specifying the index (from 0 to 15) that has been mapped to the
-+ * desired channel.
-+ */
-+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
-+
-+/* ------------------- */
-+/* Pull-mode dequeuing */
-+/* ------------------- */
-+
-+/**
-+ * struct qbman_pull_desc - the structure for pull dequeue descriptor
-+ * @dont_manipulate_directly: the 6 32bit data to represent the whole
-+ * possible settings for pull dequeue descriptor.
-+ */
-+struct qbman_pull_desc {
-+ uint32_t dont_manipulate_directly[6];
-+};
-+
-+enum qbman_pull_type_e {
-+ /* dequeue with priority precedence, respect intra-class scheduling */
-+ qbman_pull_type_prio = 1,
-+ /* dequeue with active FQ precedence, respect ICS */
-+ qbman_pull_type_active,
-+ /* dequeue with active FQ precedence, no ICS */
-+ qbman_pull_type_active_noics
-+};
-+
-+/**
-+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
-+ * default/starting state.
-+ * @d: the pull dequeue descriptor to be cleared.
-+ */
-+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
-+
-+/**
-+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
-+ * @d: the pull dequeue descriptor to be set.
-+ * @storage: the pointer of the memory to store the dequeue result.
-+ * @storage_phys: the physical address of the storage memory.
-+ * @stash: to indicate whether write allocate is enabled.
-+ *
-+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
-+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
-+ * produced to the given memory location (using the physical/DMA address which
-+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
-+ * those writes to main-memory express a cache-warming attribute.
-+ */
-+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
-+ struct qbman_result *storage,
-+ dma_addr_t storage_phys,
-+ int stash);
-+/**
-+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
-+ * @d: the pull dequeue descriptor to be set.
-+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
-+ */
-+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
-+ uint8_t numframes);
-+/**
-+ * qbman_pull_desc_set_token() - Set dequeue token for pull command
-+ * @d: the dequeue descriptor
-+ * @token: the token to be set
-+ *
-+ * token is the value that shows up in the dequeue response that can be used to
-+ * detect when the results have been published. The easiest technique is to zero
-+ * result "storage" before issuing a dequeue, and use any non-zero 'token' value
-+ */
-+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
-+
-+/* Exactly one of the following descriptor "actions" should be set. (Calling any
-+ * one of these will replace the effect of any prior call to one of these.)
-+ * - pull dequeue from the given frame queue (FQ)
-+ * - pull dequeue from any FQ in the given work queue (WQ)
-+ * - pull dequeue from any FQ in any WQ in the given channel
-+ */
-+/**
-+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
-+ * @fqid: the frame queue index of the given FQ.
-+ */
-+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
-+
-+/**
-+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
-+ * @wqid: composed of channel id and wqid within the channel.
-+ * @dct: the dequeue command type.
-+ */
-+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
-+ enum qbman_pull_type_e dct);
-+
-+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
-+ * dequeues.
-+ * @chid: the channel id to be dequeued.
-+ * @dct: the dequeue command type.
-+ */
-+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
-+ enum qbman_pull_type_e dct);
-+
-+/**
-+ * qbman_swp_pull() - Issue the pull dequeue command
-+ * @s: the software portal object.
-+ * @d: the software portal descriptor which has been configured with
-+ * the set of qbman_pull_desc_set_*() calls.
-+ *
-+ * Return 0 for success, and -EBUSY if the software portal is not ready
-+ * to do pull dequeue.
-+ */
-+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
-+
-+/* -------------------------------- */
-+/* Polling DQRR for dequeue results */
-+/* -------------------------------- */
-+
-+/**
-+ * qbman_swp_dqrr_next() - Get an valid DQRR entry.
-+ * @s: the software portal object.
-+ *
-+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
-+ * only once, so repeated calls can return a sequence of DQRR entries, without
-+ * requiring they be consumed immediately or in any particular order.
-+ */
-+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *);
-+
-+/**
-+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
-+ * qbman_swp_dqrr_next().
-+ * @s: the software portal object.
-+ * @dq: the DQRR entry to be consumed.
-+ */
-+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
-+
-+/**
-+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
-+ * @dqrr: the given dqrr object.
-+ *
-+ * Return dqrr index.
-+ */
-+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr);
-+
-+/**
-+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
-+ * given portal
-+ * @s: the given portal.
-+ * @idx: the dqrr index.
-+ *
-+ * Return dqrr entry object.
-+ */
-+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
-+
-+/* ------------------------------------------------- */
-+/* Polling user-provided storage for dequeue results */
-+/* ------------------------------------------------- */
-+
-+/**
-+ * qbman_result_has_new_result() - Check and get the dequeue response from the
-+ * dq storage memory set in pull dequeue command
-+ * @s: the software portal object.
-+ * @dq: the dequeue result read from the memory.
-+ *
-+ * Only used for user-provided storage of dequeue results, not DQRR. For
-+ * efficiency purposes, the driver will perform any required endianness
-+ * conversion to ensure that the user's dequeue result storage is in host-endian
-+ * format (whether or not that is the same as the little-endian format that
-+ * hardware DMA'd to the user's storage). As such, once the user has called
-+ * qbman_result_has_new_result() and been returned a valid dequeue result,
-+ * they should not call it again on the same memory location (except of course
-+ * if another dequeue command has been executed to produce a new result to that
-+ * location).
-+ *
-+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
-+ * dequeue result.
-+ */
-+int qbman_result_has_new_result(struct qbman_swp *s,
-+ const struct qbman_result *dq);
-+
-+/* -------------------------------------------------------- */
-+/* Parsing dequeue entries (DQRR and user-provided storage) */
-+/* -------------------------------------------------------- */
-+
-+/**
-+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
-+ * @dq: the dequeue result to be checked.
-+ *
-+ * DQRR entries may contain non-dequeue results, ie. notifications
-+ */
-+int qbman_result_is_DQ(const struct qbman_result *);
-+
-+/**
-+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
-+ * @dq: the dequeue result to be checked.
-+ *
-+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
-+ * notifications" of one type or another. Some APIs apply to all of them, of the
-+ * form qbman_result_SCN_***().
-+ */
-+static inline int qbman_result_is_SCN(const struct qbman_result *dq)
-+{
-+ return !qbman_result_is_DQ(dq);
-+}
-+
-+/* Recognise different notification types, only required if the user allows for
-+ * these to occur, and cares about them when they do.
-+ */
-+
-+/**
-+ * qbman_result_is_FQDAN() - Check for FQ Data Availability
-+ * @dq: the qbman_result object.
-+ *
-+ * Return 1 if this is FQDAN.
-+ */
-+int qbman_result_is_FQDAN(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_is_CDAN() - Check for Channel Data Availability
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is CDAN.
-+ */
-+int qbman_result_is_CDAN(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_is_CSCN() - Check for Congestion State Change
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is CSCN.
-+ */
-+int qbman_result_is_CSCN(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is BPSCN.
-+ */
-+int qbman_result_is_BPSCN(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is CGCU.
-+ */
-+int qbman_result_is_CGCU(const struct qbman_result *dq);
-+
-+/* Frame queue state change notifications; (FQDAN in theory counts too as it
-+ * leaves a FQ parked, but it is primarily a data availability notification)
-+ */
-+
-+/**
-+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is FQRN.
-+ */
-+int qbman_result_is_FQRN(const struct qbman_result *);
-+
-+/**
-+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is FQRNI.
-+ */
-+int qbman_result_is_FQRNI(const struct qbman_result *);
-+
-+/**
-+ * qbman_result_is_FQPN() - Check for FQ Park Notification
-+ * @dq: the qbman_result object to check.
-+ *
-+ * Return 1 if this is FQPN.
-+ */
-+int qbman_result_is_FQPN(const struct qbman_result *dq);
-+
-+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
-+ */
-+/* FQ empty */
-+#define QBMAN_DQ_STAT_FQEMPTY 0x80
-+/* FQ held active */
-+#define QBMAN_DQ_STAT_HELDACTIVE 0x40
-+/* FQ force eligible */
-+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
-+/* Valid frame */
-+#define QBMAN_DQ_STAT_VALIDFRAME 0x10
-+/* FQ ODP enable */
-+#define QBMAN_DQ_STAT_ODPVALID 0x04
-+/* Volatile dequeue */
-+#define QBMAN_DQ_STAT_VOLATILE 0x02
-+/* volatile dequeue command is expired */
-+#define QBMAN_DQ_STAT_EXPIRED 0x01
-+
-+/**
-+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response
-+ * @dq: the dequeue result.
-+ *
-+ * Return the state field.
-+ */
-+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
-+ * command.
-+ * @dq: the dequeue result.
-+ *
-+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
-+ */
-+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
-+{
-+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
-+}
-+
-+/**
-+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
-+ * completed.
-+ * @dq: the dequeue result.
-+ *
-+ * Return boolean.
-+ */
-+static inline int qbman_result_DQ_is_pull_complete(
-+ const struct qbman_result *dq)
-+{
-+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
-+}
-+
-+/**
-+ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
-+ * seqnum is valid only if VALIDFRAME flag is TRUE
-+ * @dq: the dequeue result.
-+ *
-+ * Return seqnum.
-+ */
-+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
-+ * odpid is valid only if ODPVAILD flag is TRUE.
-+ * @dq: the dequeue result.
-+ *
-+ * Return odpid.
-+ */
-+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response
-+ * @dq: the dequeue result.
-+ *
-+ * Return fqid.
-+ */
-+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
-+ * @dq: the dequeue result.
-+ *
-+ * Return the byte count remaining in the FQ.
-+ */
-+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response
-+ * @dq: the dequeue result.
-+ *
-+ * Return the frame count remaining in the FQ.
-+ */
-+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
-+ * @dq: the dequeue result.
-+ *
-+ * Return the frame queue context.
-+ */
-+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
-+
-+/**
-+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
-+ * @dq: the dequeue result.
-+ *
-+ * Return the frame descriptor.
-+ */
-+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
-+
-+/* State-change notifications (FQDAN/CDAN/CSCN/...). */
-+
-+/**
-+ * qbman_result_SCN_state() - Get the state field in State-change notification
-+ * @scn: the state change notification.
-+ *
-+ * Return the state in the notifiation.
-+ */
-+uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_SCN_rid() - Get the resource id from the notification
-+ * @scn: the state change notification.
-+ *
-+ * Return the resource id.
-+ */
-+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_SCN_ctx() - get the context from the notification
-+ * @scn: the state change notification.
-+ *
-+ * Return the context.
-+ */
-+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_SCN_state_in_mem() - Get the state in notification written
-+ * in memory
-+ * @scn: the state change notification.
-+ *
-+ * Return the state.
-+ */
-+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written
-+ * in memory.
-+ * @scn: the state change notification.
-+ *
-+ * Return the resource id.
-+ */
-+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn);
-+
-+
-+/* Type-specific "resource IDs". Mainly for illustration purposes, though it
-+ * also gives the appropriate type widths.
-+ */
-+/* Get the FQID from the FQDAN */
-+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
-+/* Get the FQID from the FQRN */
-+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
-+/* Get the FQID from the FQRNI */
-+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
-+/* Get the FQID from the FQPN */
-+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
-+/* Get the channel ID from the CDAN */
-+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
-+/* Get the CGID from the CSCN */
-+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
-+
-+/**
-+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
-+ * @scn: the state change notification.
-+ *
-+ * Return the buffer pool id.
-+ */
-+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free
-+ * buffers in the pool from BPSCN.
-+ * @scn: the state change notification.
-+ *
-+ * Return the number of free buffers.
-+ */
-+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
-+ * buffer pool is depleted.
-+ * @scn: the state change notification.
-+ *
-+ * Return the status of buffer pool depletion.
-+ */
-+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
-+ * pool is surplus or not.
-+ * @scn: the state change notification.
-+ *
-+ * Return the status of buffer pool surplus.
-+ */
-+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
-+ * @scn: the state change notification.
-+ *
-+ * Return the BPSCN context.
-+ */
-+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
-+
-+/* Parsing CGCU */
-+/**
-+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
-+ * @scn: the state change notification.
-+ *
-+ * Return the CGCU resource id.
-+ */
-+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
-+
-+/**
-+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
-+ * @scn: the state change notification.
-+ *
-+ * Return instantaneous count in the CGCU notification.
-+ */
-+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
-+
-+ /************/
-+ /* Enqueues */
-+ /************/
-+
-+/**
-+ * struct qbman_eq_desc - structure of enqueue descriptor
-+ * @dont_manipulate_directly: the 8 32bit data to represent the whole
-+ * possible qbman enqueue setting in enqueue descriptor.
-+ */
-+struct qbman_eq_desc {
-+ uint32_t dont_manipulate_directly[8];
-+};
-+
-+/**
-+ * struct qbman_eq_response - structure of enqueue response
-+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
-+ * enqueue response.
-+ */
-+struct qbman_eq_response {
-+ uint32_t dont_manipulate_directly[16];
-+};
-+
-+/**
-+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
-+ * default/starting state.
-+ * @d: the given enqueue descriptor.
-+ */
-+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
-+
-+/* Exactly one of the following descriptor "actions" should be set. (Calling
-+ * any one of these will replace the effect of any prior call to one of these.)
-+ * - enqueue without order-restoration
-+ * - enqueue with order-restoration
-+ * - fill a hole in the order-restoration sequence, without any enqueue
-+ * - advance NESN (Next Expected Sequence Number), without any enqueue
-+ * 'respond_success' indicates whether an enqueue response should be DMA'd
-+ * after success (otherwise a response is DMA'd only after failure).
-+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
-+ * be enqueued.
-+ */
-+
-+/**
-+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
-+ * @d: the enqueue descriptor.
-+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
-+ * rejections returned on a FQ.
-+ */
-+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
-+/**
-+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
-+ * @d: the enqueue descriptor.
-+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
-+ * rejections returned on a FQ.
-+ * @opr_id: the order point record id.
-+ * @seqnum: the order restoration sequence number.
-+ * @incomplete: indiates whether this is the last fragments using the same
-+ * sequeue number.
-+ */
-+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
-+ uint32_t opr_id, uint32_t seqnum, int incomplete);
-+
-+/**
-+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
-+ * without any enqueue
-+ * @d: the enqueue descriptor.
-+ * @opr_id: the order point record id.
-+ * @seqnum: the order restoration sequence number.
-+ */
-+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
-+ uint32_t seqnum);
-+
-+/**
-+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
-+ * without any enqueue
-+ * @d: the enqueue descriptor.
-+ * @opr_id: the order point record id.
-+ * @seqnum: the order restoration sequence number.
-+ */
-+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
-+ uint32_t seqnum);
-+/**
-+ * qbman_eq_desc_set_response() - Set the enqueue response info.
-+ * @d: the enqueue descriptor
-+ * @storage_phys: the physical address of the enqueue response in memory.
-+ * @stash: indicate that the write allocation enabled or not.
-+ *
-+ * In the case where an enqueue response is DMA'd, this determines where that
-+ * response should go. (The physical/DMA address is given for hardware's
-+ * benefit, but software should interpret it as a "struct qbman_eq_response"
-+ * data structure.) 'stash' controls whether or not the write to main-memory
-+ * expresses a cache-warming attribute.
-+ */
-+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
-+ dma_addr_t storage_phys,
-+ int stash);
-+
-+/**
-+ * qbman_eq_desc_set_token() - Set token for the enqueue command
-+ * @d: the enqueue descriptor
-+ * @token: the token to be set.
-+ *
-+ * token is the value that shows up in an enqueue response that can be used to
-+ * detect when the results have been published. The easiest technique is to zero
-+ * result "storage" before issuing an enqueue, and use any non-zero 'token'
-+ * value.
-+ */
-+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
-+
-+/**
-+ * Exactly one of the following descriptor "targets" should be set. (Calling any
-+ * one of these will replace the effect of any prior call to one of these.)
-+ * - enqueue to a frame queue
-+ * - enqueue to a queuing destination
-+ * Note, that none of these will have any affect if the "action" type has been
-+ * set to "orp_hole" or "orp_nesn".
-+ */
-+/**
-+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
-+ * @d: the enqueue descriptor
-+ * @fqid: the id of the frame queue to be enqueued.
-+ */
-+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
-+
-+/**
-+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
-+ * @d: the enqueue descriptor
-+ * @qdid: the id of the queuing destination to be enqueued.
-+ * @qd_bin: the queuing destination bin
-+ * @qd_prio: the queuing destination priority.
-+ */
-+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
-+ uint32_t qd_bin, uint32_t qd_prio);
-+
-+/**
-+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
-+ * @d: the enqueue descriptor
-+ * @enable: boolean to enable/disable EQDI
-+ *
-+ * Determines whether or not the portal's EQDI interrupt source should be
-+ * asserted after the enqueue command is completed.
-+ */
-+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
-+
-+/**
-+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
-+ * @d: the enqueue descriptor.
-+ * @enable: enabled/disable DCA mode.
-+ * @dqrr_idx: DCAP_CI, the DCAP consumer index.
-+ * @park: determine the whether park the FQ or not
-+ *
-+ * Determines whether or not a portal DQRR entry should be consumed once the
-+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
-+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of
-+ * being rescheduled.)
-+ */
-+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
-+ uint32_t dqrr_idx, int park);
-+
-+/**
-+ * qbman_swp_enqueue() - Issue an enqueue command.
-+ * @s: the software portal used for enqueue.
-+ * @d: the enqueue descriptor.
-+ * @fd: the frame descriptor to be enqueued.
-+ *
-+ * Please note that 'fd' should only be NULL if the "action" of the
-+ * descriptor is "orp_hole" or "orp_nesn".
-+ *
-+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
-+ */
-+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd);
-+
-+/* TODO:
-+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
-+ * @s: the software portal.
-+ * @thresh: the threshold to trigger the EQRI interrupt.
-+ *
-+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below
-+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
-+ */
-+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
-+
-+ /*******************/
-+ /* Buffer releases */
-+ /*******************/
-+/**
-+ * struct qbman_release_desc - The structure for buffer release descriptor
-+ * @dont_manipulate_directly: the 32bit data to represent the whole
-+ * possible settings of qbman release descriptor.
-+ */
-+struct qbman_release_desc {
-+ uint32_t dont_manipulate_directly[1];
-+};
-+
-+/**
-+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
-+ * default/starting state.
-+ * @d: the qbman release descriptor.
-+ */
-+void qbman_release_desc_clear(struct qbman_release_desc *d);
-+
-+/**
-+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
-+ * @d: the qbman release descriptor.
-+ */
-+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid);
-+
-+/**
-+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
-+ * interrupt source should be asserted after the release command is completed.
-+ * @d: the qbman release descriptor.
-+ */
-+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-+
-+/**
-+ * qbman_swp_release() - Issue a buffer release command.
-+ * @s: the software portal object.
-+ * @d: the release descriptor.
-+ * @buffers: a pointer pointing to the buffer address to be released.
-+ * @num_buffers: number of buffers to be released, must be less than 8.
-+ *
-+ * Return 0 for success, -EBUSY if the release command ring is not ready.
-+ */
-+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
-+ const uint64_t *buffers, unsigned int num_buffers);
-+
-+/* TODO:
-+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
-+ * @s: the software portal.
-+ * @thresh: the threshold.
-+ * An RCRI interrupt can be generated when the fill-level of RCR falls below
-+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
-+ */
-+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
-+
-+ /*******************/
-+ /* Buffer acquires */
-+ /*******************/
-+/**
-+ * qbman_swp_acquire() - Issue a buffer acquire command.
-+ * @s: the software portal object.
-+ * @bpid: the buffer pool index.
-+ * @buffers: a pointer pointing to the acquired buffer address|es.
-+ * @num_buffers: number of buffers to be acquired, must be less than 8.
-+ *
-+ * Return 0 for success, or negative error code if the acquire command
-+ * fails.
-+ */
-+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
-+ unsigned int num_buffers);
-+
-+ /*****************/
-+ /* FQ management */
-+ /*****************/
-+/**
-+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
-+ * @s: the software portal object.
-+ * @fqid: the index of frame queue to be scheduled.
-+ *
-+ * There are a couple of different ways that a FQ can end up parked state,
-+ * This schedules it.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
-+
-+/**
-+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
-+ * @s: the software portal object.
-+ * @fqid: the index of frame queue to be forced.
-+ *
-+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
-+ * and thus be available for selection by any channel-dequeuing behaviour (push
-+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
-+ * empty at the time this happens, the resulting dq_entry will have no FD.
-+ * (qbman_result_DQ_fd() will return NULL.)
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
-+
-+/**
-+ * These functions change the FQ flow-control stuff between XON/XOFF. (The
-+ * default is XON.) This setting doesn't affect enqueues to the FQ, just
-+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
-+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
-+ * changed to XOFF after it had already become truly-scheduled to a channel, and
-+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
-+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
-+ * return NULL.)
-+ */
-+/**
-+ * qbman_swp_fq_xon() - XON the frame queue.
-+ * @s: the software portal object.
-+ * @fqid: the index of frame queue.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
-+/**
-+ * qbman_swp_fq_xoff() - XOFF the frame queue.
-+ * @s: the software portal object.
-+ * @fqid: the index of frame queue.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
-+
-+ /**********************/
-+ /* Channel management */
-+ /**********************/
-+
-+/**
-+ * If the user has been allocated a channel object that is going to generate
-+ * CDANs to another channel, then these functions will be necessary.
-+ * CDAN-enabled channels only generate a single CDAN notification, after which
-+ * it they need to be reenabled before they'll generate another. (The idea is
-+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
-+ * reenable step.) Each function generates a distinct command to hardware, so a
-+ * combination function is provided if the user wishes to modify the "context"
-+ * (which shows up in each CDAN message) each time they reenable, as a single
-+ * command to hardware.
-+ */
-+
-+/**
-+ * qbman_swp_CDAN_set_context() - Set CDAN context
-+ * @s: the software portal object.
-+ * @channelid: the channel index.
-+ * @ctx: the context to be set in CDAN.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
-+ uint64_t ctx);
-+
-+/**
-+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
-+ * @s: the software portal object.
-+ * @channelid: the index of the channel to generate CDAN.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
-+
-+/**
-+ * qbman_swp_CDAN_disable() - disable CDAN for the channel.
-+ * @s: the software portal object.
-+ * @channelid: the index of the channel to generate CDAN.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
-+
-+/**
-+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
-+ * @s: the software portal object.
-+ * @channelid: the index of the channel to generate CDAN.
-+ * @ctx: the context set in CDAN.
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
-+ uint64_t ctx);
-+int qbman_swp_fill_ring(struct qbman_swp *s,
-+ const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd,
-+ uint8_t burst_index);
-+int qbman_swp_flush_ring(struct qbman_swp *s);
-+void qbman_sync(void);
-+int qbman_swp_send_multiple(struct qbman_swp *s,
-+ const struct qbman_eq_desc *d,
-+ const struct qbman_fd *fd,
-+ int frames_to_send);
-+
-+int qbman_check_command_complete(struct qbman_swp *s,
-+ const struct qbman_result *dq);
-+#endif /* !_FSL_QBMAN_PORTAL_H */
-diff --git a/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h
-new file mode 100644
-index 0000000..b35c3ee
---- /dev/null
-+++ b/drivers/net/dpaa2/rte_eth_dpaa2_pvt.h
-@@ -0,0 +1,313 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef _RTE_ETH_DPAA2_PVT_H_
-+#define _RTE_ETH_DPAA2_PVT_H_
-+
-+#include <rte_memory.h>
-+#include <rte_mbuf.h>
-+#include <rte_atomic.h>
-+#include <fsl_mc_sys.h>
-+#include <eal_vfio.h>
-+#include <eal_vfio_fsl_mc.h>
-+
-+typedef uint64_t dma_addr_t;
-+
-+#define FALSE 0
-+#define TRUE 1
-+#ifndef false
-+#define false FALSE
-+#endif
-+#ifndef true
-+#define true TRUE
-+#endif
-+#define lower_32_bits(x) ((uint32_t)(x))
-+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
-+
-+#ifndef ETH_ADDR_LEN
-+#define ETH_ADDR_LEN 6
-+#endif
-+#ifndef ETH_VLAN_HLEN
-+#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
-+#endif
-+
-+#define NUM_MAX_RECV_FRAMES 16
-+
-+#define MC_PORTAL_INDEX 0
-+#define NUM_DPIO_REGIONS 2
-+#define NUM_DQS_PER_QUEUE 2
-+#define MC_PORTALS_BASE_PADDR 0x00080C000000ULL
-+#define MC_PORTAL_STRIDE 0x10000
-+#define MC_PORTAL_SIZE 64
-+#define MC_PORTAL_ID_TO_PADDR(portal_id) \
-+(MC_PORTALS_BASE_PADDR + (portal_id) * MC_PORTAL_STRIDE)
-+
-+struct dpaa2_dpio_dev {
-+ TAILQ_ENTRY(dpaa2_dpio_dev) next; /**< Pointer to Next device instance */
-+ uint16_t index; /**< Index of a instance in the list */
-+ rte_atomic16_t ref_count; /**< How many thread contexts are sharing this.*/
-+ struct fsl_mc_io *dpio; /** handle to DPIO portal object */
-+ uint16_t token;
-+ struct qbman_swp *sw_portal; /** SW portal object */
-+ const struct qbman_result *dqrr[4]; /**< DQRR Entry for this SW portal */
-+ pthread_mutex_t lock; /** Required when Portal is shared */
-+ void *mc_portal; /**< MC Portal for configuring this device */
-+ uintptr_t qbman_portal_ce_paddr; /**< Physical address of Cache Enabled Area */
-+ uintptr_t ce_size; /**< Size of the CE region */
-+ uintptr_t qbman_portal_ci_paddr; /**< Physical address of Cache Inhibit Area */
-+ uintptr_t ci_size; /**< Size of the CI region */
-+ void *intr_handle;
-+ int32_t vfio_fd; /**< File descriptor received via VFIO */
-+ int32_t hw_id; /**< An unique ID of this DPIO device instance */
-+};
-+
-+struct queue_storage_info_t {
-+ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
-+ struct qbman_result *active_dqs;
-+ int toggle;
-+};
-+
-+struct thread_io_info_t {
-+ struct dpaa2_dpio_dev *dpio_dev;
-+ struct dpaa2_dpio_dev *sec_dpio_dev;
-+ struct qbman_result *global_active_dqs;
-+};
-+
-+/*! Global per thread DPIO portal */
-+extern __thread struct thread_io_info_t thread_io_info;
-+/*! Global MCP list */
-+extern void *(*mcp_ptr_list);
-+
-+/* Refer to Table 7-3 in SEC BG */
-+struct qbman_fle {
-+ uint32_t addr_lo;
-+ uint32_t addr_hi;
-+ uint32_t length;
-+ /* FMT must be 00, MSB is final bit */
-+ uint32_t fin_bpid_offset;
-+ uint32_t frc;
-+ uint32_t reserved[3]; /* Not used currently */
-+};
-+
-+/* Maximum release/acquire from QBMAN */
-+#define DPAA2_MBUF_MAX_ACQ_REL 7
-+
-+#define MAX_BPID 256
-+
-+/*Macros to define operations on FD*/
-+#define DPAA2_SET_FD_ADDR(fd, addr) \
-+ fd->simple.addr_lo = lower_32_bits((uint64_t)addr); \
-+ fd->simple.addr_hi = upper_32_bits((uint64_t)addr);
-+#define DPAA2_SET_FD_LEN(fd, length) fd->simple.len = length
-+#define DPAA2_SET_FD_BPID(fd, bpid) fd->simple.bpid_offset |= bpid;
-+#define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000))
-+#define DPAA2_SET_FD_OFFSET(fd, offset) (fd->simple.bpid_offset |= (uint32_t)(offset) << 16);
-+#define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc;
-+#define DPAA2_RESET_FD_CTRL(fd) fd->simple.ctrl = 0;
-+
-+#define DPAA2_SET_FD_ASAL(fd, asal) (fd->simple.ctrl |= (asal << 16))
-+#define DPAA2_SET_FD_FLC(fd, addr) \
-+ fd->simple.flc_lo = lower_32_bits((uint64_t)addr); \
-+ fd->simple.flc_hi = upper_32_bits((uint64_t)addr);
-+#define DPAA2_GET_FLE_ADDR(fle) \
-+ (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
-+#define DPAA2_SET_FLE_ADDR(fle, addr) \
-+ fle->addr_lo = lower_32_bits((uint64_t)addr); \
-+ fle->addr_hi = upper_32_bits((uint64_t)addr);
-+#define DPAA2_SET_FLE_OFFSET(fle, offset) (fle)->fin_bpid_offset |= (uint32_t)(offset) << 16;
-+#define DPAA2_SET_FLE_BPID(fle, bpid) (fle)->fin_bpid_offset |= (uint64_t)bpid;
-+#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff)
-+#define DPAA2_SET_FLE_FIN(fle) fle->fin_bpid_offset |= (uint64_t)1 << 31;
-+#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
-+#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
-+ fd->simple.bpid_offset |= (uint32_t)1 << 28;
-+#define DPAA2_GET_FD_ADDR(fd) \
-+ (uint64_t)((((uint64_t)(fd->simple.addr_hi)) << 32) + fd->simple.addr_lo)
-+#define DPAA2_GET_FD_LEN(fd) (fd->simple.len)
-+#define DPAA2_GET_FD_BPID(fd) ((fd->simple.bpid_offset & 0x00003FFF))
-+#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
-+#define DPAA2_GET_FD_OFFSET(fd) ((fd->simple.bpid_offset & 0x0FFF0000) >> 16)
-+#define DPAA2_GET_FD_FRC(fd) (fd->simple.frc)
-+#define DPAA2_GET_FD_FLC(fd) \
-+ (uint64_t)((((uint64_t)(fd->simple.flc_hi)) << 32) + fd->simple.flc_lo)
-+
-+#define DPAA2_SET_FLE_SG_EXT(fle) fle->fin_bpid_offset |= (uint64_t)1<<29;
-+#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
-+ (fle->fin_bpid_offset & ((uint64_t)1<<29))? 1 : 0
-+
-+#define DPAA2_INLINE_MBUF_FROM_BUF(buf) \
-+ ((struct rte_mbuf *)((uint64_t)buf + DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES))
-+#define DPAA2_BUF_FROM_INLINE_MBUF(mbuf) \
-+ ((uint8_t *)((uint64_t)mbuf - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES)))
-+
-+#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
-+
-+/*Macros to define QBMAN enqueue options */
-+#define DPAA2_ETH_EQ_DISABLE 0 /*!< Dont Enqueue the Frame */
-+#define DPAA2_ETH_EQ_RESP_ON_SUCC 1 /*!< Enqueue the Frame with
-+ response after success*/
-+#define DPAA2_ETH_EQ_RESP_ON_FAIL 2 /*!< Enqueue the Frame with
-+ response after failure*/
-+#define DPAA2_ETH_EQ_NO_RESP 3 /*!< Enqueue the Frame without
-+ response*/
-+/* Only Enqueue Error responses will be
-+ * pushed on FQID_ERR of Enqueue FQ */
-+#define DPAA2_EQ_RESP_ERR_FQ 0
-+/* All Enqueue responses will be pushed on address
-+ * set with qbman_eq_desc_set_response */
-+#define DPAA2_EQ_RESP_ALWAYS 1
-+
-+#define DPAA2_MAX_BUF_POOLS 8
-+
-+struct dpbp_node {
-+ struct dpbp_node *next;
-+ struct fsl_mc_io dpbp;
-+ uint16_t token;
-+ int dpbp_id;
-+};
-+
-+struct buf_pool_cfg {
-+ void *addr; /*!< The address from where DPAA2 will carve out the
-+ * buffers. 'addr' should be 'NULL' if user wants
-+ * to create buffers from the memory which user
-+ * asked DPAA2 to reserve during 'nadk init' */
-+ phys_addr_t phys_addr; /*!< corresponding physical address
-+ * of the memory provided in addr */
-+ uint32_t num; /*!< number of buffers */
-+ uint32_t size; /*!< size of each buffer. 'size' should include
-+ * any headroom to be reserved and alignment */
-+ uint16_t align; /*!< Buffer alignment (in bytes) */
-+ uint16_t bpid; /*!< The buffer pool id. This will be filled
-+ *in by DPAA2 for each buffer pool */
-+};
-+
-+struct buf_pool {
-+ uint32_t size;
-+ uint32_t num_bufs;
-+ uint16_t bpid;
-+ uint8_t *h_bpool_mem;
-+ struct rte_mempool *mp;
-+ struct dpbp_node *dpbp_node;
-+};
-+
-+/*!
-+ * Buffer pool list configuration structure. User need to give DPAA2 the
-+ * valid number of 'num_buf_pools'.
-+ */
-+struct dpaa2_bp_list_cfg {
-+ struct buf_pool_cfg buf_pool; /* Configuration
-+ * of each buffer pool */
-+};
-+
-+struct dpaa2_bp_list {
-+ struct dpaa2_bp_list *next;
-+ struct rte_mempool *mp;
-+ struct buf_pool buf_pool;
-+};
-+
-+struct bp_info {
-+ uint32_t size;
-+ uint32_t meta_data_size;
-+ struct dpaa2_bp_list *bp_list;
-+};
-+
-+extern struct dpaa2_bp_list *h_bp_list;
-+
-+//todo - this is costly, need to write a fast coversion routine
-+static void *dpaa2_mem_ptov(phys_addr_t paddr)
-+{
-+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
-+ int i;
-+
-+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
-+ if (paddr >= memseg[i].phys_addr &&
-+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
-+ return (void *)(memseg[i].addr_64 + (paddr - memseg[i].phys_addr));
-+ }
-+ return NULL;
-+}
-+
-+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
-+{
-+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
-+ int i;
-+
-+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
-+ if (vaddr >= memseg[i].addr_64 &&
-+ vaddr < memseg[i].addr_64 + memseg[i].len)
-+ return memseg[i].phys_addr + (vaddr - memseg[i].addr_64);
-+ }
-+ return (phys_addr_t)(NULL);
-+}
-+
-+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-+/*
-+ * When we are using Physical addresses as IO Virtual Addresses,
-+ * we call conversion routines nadk_mem_vtop & nadk_mem_ptov wherever required.
-+ * These routines are called with help of below MACRO's
-+ */
-+
-+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_physaddr)
-+
-+/**
-+ * macro to convert Virtual address to IOVA
-+ */
-+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
-+
-+/**
-+ * macro to convert IOVA to Virtual address
-+ */
-+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
-+
-+/**
-+ * macro to convert modify the memory containing Virtual address to IOVA
-+ */
-+#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type) \
-+ {_mem = (_type)(dpaa2_mem_vtop((uint64_t)(_mem))); }
-+
-+/**
-+ * macro to convert modify the memory containing IOVA to Virtual address
-+ */
-+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
-+ {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
-+
-+#else
-+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) (mbuf->buf_addr)
-+
-+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
-+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
-+#define DPAA2_MODIFY_VADDR_TO_IOVA(_mem, _type)
-+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
-+#endif
-+
-+#endif
-diff --git a/drivers/net/dpaa2/rte_eth_dpbp.c b/drivers/net/dpaa2/rte_eth_dpbp.c
-new file mode 100644
-index 0000000..6a7617d
---- /dev/null
-+++ b/drivers/net/dpaa2/rte_eth_dpbp.c
-@@ -0,0 +1,430 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <unistd.h>
-+#include <stdio.h>
-+#include <sys/types.h>
-+#include <string.h>
-+#include <stdlib.h>
-+#include <fcntl.h>
-+#include <errno.h>
-+#include <sys/ioctl.h>
-+#include <sys/stat.h>
-+#include <sys/types.h>
-+#include <sys/mman.h>
-+#include <sys/vfs.h>
-+#include <libgen.h>
-+#include <rte_mbuf.h>
-+
-+#include "rte_pci.h"
-+#include "rte_memzone.h"
-+
-+#include "rte_eth_dpaa2_pvt.h"
-+#include "fsl_qbman_portal.h"
-+#include <fsl_dpbp.h>
-+
-+#include <rte_log.h>
-+#include "dpaa2_logs.h"
-+
-+static struct dpbp_node *g_dpbp_list;
-+static struct dpbp_node *avail_dpbp;
-+
-+struct bp_info bpid_info[MAX_BPID];
-+
-+struct dpaa2_bp_list *h_bp_list;
-+
-+int
-+dpaa2_create_dpbp_device(
-+ int dpbp_id)
-+{
-+ struct dpbp_node *dpbp_node;
-+ int ret;
-+
-+ /* Allocate DPAA2 dpbp handle */
-+ dpbp_node = (struct dpbp_node *)malloc(sizeof(struct dpbp_node));
-+ if (!dpbp_node) {
-+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPBP Device\n");
-+ return -1;
-+ }
-+
-+ /* Open the dpbp object */
-+ dpbp_node->dpbp.regs = mcp_ptr_list[MC_PORTAL_INDEX];
-+ ret = dpbp_open(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Resource allocation failure with err code: %d",
-+ ret);
-+ free(dpbp_node);
-+ return -1;
-+ }
-+
-+ /* Clean the device first */
-+ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure cleaning dpbp device with"
-+ "error code %d\n", ret);
-+ return -1;
-+ }
-+
-+ dpbp_node->dpbp_id = dpbp_id;
-+ /* Add the dpbp handle into the global list */
-+ dpbp_node->next = g_dpbp_list;
-+ g_dpbp_list = dpbp_node;
-+ avail_dpbp = g_dpbp_list;
-+
-+ PMD_DRV_LOG(INFO, "Buffer resource initialized\n");
-+
-+ return 0;
-+}
-+
-+int hw_mbuf_create_pool(struct rte_mempool *mp)
-+{
-+ struct dpaa2_bp_list *bp_list;
-+ struct dpbp_attr dpbp_attr;
-+ int ret;
-+
-+ if (!avail_dpbp) {
-+ PMD_DRV_LOG(ERR, "DPAA2 resources not available\n");
-+ return -1;
-+ }
-+
-+ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
-+ if (ret != 0) {
-+ PMD_DRV_LOG(ERR, "Resource enable failure with"
-+ "err code: %d\n", ret);
-+ return -1;
-+ }
-+
-+ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
-+ avail_dpbp->token, &dpbp_attr);
-+ if (ret != 0) {
-+ PMD_DRV_LOG(ERR, "Resource read failure with"
-+ "err code: %d\n", ret);
-+ ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW,
-+ avail_dpbp->token);
-+ return -1;
-+ }
-+
-+ /* Allocate the bp_list which will be added into global_bp_list */
-+ bp_list = (struct dpaa2_bp_list *)malloc(sizeof(struct dpaa2_bp_list));
-+ if (!bp_list) {
-+ PMD_DRV_LOG(ERR, "No heap memory available\n");
-+ return -1;
-+ }
-+
-+ /* Set parameters of buffer pool list */
-+ bp_list->buf_pool.num_bufs = mp->size;
-+ bp_list->buf_pool.size = mp->elt_size
-+ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
-+ bp_list->buf_pool.bpid = dpbp_attr.bpid;
-+ bp_list->buf_pool.h_bpool_mem = NULL;
-+ bp_list->buf_pool.mp = mp;
-+ bp_list->buf_pool.dpbp_node = avail_dpbp;
-+ bp_list->next = h_bp_list;
-+
-+ mp->offload_ptr = dpbp_attr.bpid;
-+
-+ /* Increment the available DPBP */
-+ avail_dpbp = avail_dpbp->next;
-+
-+ bpid_info[dpbp_attr.bpid].size = bp_list->buf_pool.size;
-+ bpid_info[dpbp_attr.bpid].meta_data_size = sizeof(struct rte_mbuf)
-+ + rte_pktmbuf_priv_size(mp);
-+ bpid_info[dpbp_attr.bpid].bp_list = bp_list;
-+
-+ PMD_DRV_LOG(INFO, "BP List created for bpid =%d\n", dpbp_attr.bpid);
-+
-+ h_bp_list = bp_list;
-+ return 0;
-+}
-+
-+static inline void dpaa2_mbuf_release(uint64_t buf, uint32_t bpid)
-+{
-+ struct qbman_release_desc releasedesc;
-+ struct qbman_swp *swp;
-+ int ret;
-+
-+ if (!thread_io_info.dpio_dev) {
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret != 0) {
-+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal");
-+ return;
-+ }
-+ }
-+ swp = thread_io_info.dpio_dev->sw_portal;
-+
-+ /* Create a release descriptor required for releasing
-+ * buffers into BMAN */
-+ qbman_release_desc_clear(&releasedesc);
-+ qbman_release_desc_set_bpid(&releasedesc, bpid);
-+
-+ DPAA2_MODIFY_VADDR_TO_IOVA(buf, uint64_t);
-+ do {
-+ /* Release buffer into the BMAN */
-+ ret = qbman_swp_release(swp, &releasedesc, &buf, 1);
-+ } while (ret == -EBUSY);
-+ PMD_TX_FREE_LOG(DEBUG, "Released %p address to BMAN\n", buf);
-+}
-+
-+int hw_mbuf_alloc(struct rte_mempool *mp, void **mb)
-+{
-+ struct qbman_swp *swp;
-+ uint16_t bpid;
-+ uint64_t buf;
-+ int ret;
-+ struct rte_mbuf *m;
-+
-+ if ((mp->offload_ptr > MAX_BPID) ||
-+ !(bpid_info[mp->offload_ptr].bp_list)) {
-+
-+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n");
-+ return -2;
-+ }
-+
-+ bpid = mp->offload_ptr;
-+
-+ if (!thread_io_info.dpio_dev) {
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret != 0) {
-+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal");
-+ return -1;
-+ }
-+ }
-+ swp = thread_io_info.dpio_dev->sw_portal;
-+
-+ do {
-+ ret = qbman_swp_acquire(swp, bpid, &buf, 1);
-+ } while (ret == -EBUSY);
-+ if (ret <= 0) {
-+ PMD_DRV_LOG(INFO, "Buffer alloc(bpid %d)fail: err: %x",
-+ bpid, ret);
-+ return -1;
-+ }
-+ DPAA2_MODIFY_IOVA_TO_VADDR(buf, uint64_t);
-+
-+ PMD_DRV_LOG(INFO, "Acquired %p address from BMAN\n", buf);
-+ m = (struct rte_mbuf *)DPAA2_INLINE_MBUF_FROM_BUF(buf);
-+ RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
-+ rte_mbuf_refcnt_set(m, 1);
-+ *mb = m;
-+ return 0;
-+}
-+
-+int hw_mbuf_free(void __rte_unused *m)
-+{
-+ struct rte_mbuf *mb = (struct rte_mbuf *)m;
-+ if ((mb->pool->offload_ptr > MAX_BPID) ||
-+ !(bpid_info[mb->pool->offload_ptr].bp_list)) {
-+
-+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n");
-+ return -1;
-+ }
-+
-+ dpaa2_mbuf_release((uint64_t)DPAA2_BUF_FROM_INLINE_MBUF(m),
-+ mb->pool->offload_ptr);
-+ return 0;
-+}
-+
-+int hw_mbuf_alloc_bulk(struct rte_mempool *pool,
-+ void **obj_table, unsigned count)
-+{
-+ static int alloc;
-+ struct qbman_swp *swp;
-+ uint32_t mbuf_size;
-+ uint16_t bpid;
-+ uint64_t bufs[64];
-+ int ret;
-+ unsigned i, n = 0;
-+ struct rte_mbuf **mt = (struct rte_mbuf **)obj_table;
-+
-+ //PMD_DRV_LOG(DEBUG, MBUF, "%s/n", __func__);
-+ if ((pool->offload_ptr > MAX_BPID) ||
-+ !(bpid_info[pool->offload_ptr].bp_list)) {
-+
-+ printf("\nDPAA2 buffer pool not configured\n");
-+ return -2;
-+ }
-+
-+ bpid = pool->offload_ptr;
-+
-+ if (!thread_io_info.dpio_dev) {
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret != 0) {
-+ PMD_DRV_LOG(ERR, "Failed to allocate IO portal");
-+ return -1;
-+ }
-+ }
-+ swp = thread_io_info.dpio_dev->sw_portal;
-+
-+ /* if number of buffers requested is less than 7 */
-+ if (count < DPAA2_MBUF_MAX_ACQ_REL) {
-+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], count);
-+ if (ret <= 0){
-+ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret);
-+ return -1;
-+ }
-+ n = ret;
-+ goto set_buf;
-+ }
-+
-+ while (n < count) {
-+ ret = 0;
-+ /* Acquire is all-or-nothing, so we drain in 7s,
-+ * then in 1s for the remainder. */
-+ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
-+ ret = qbman_swp_acquire(swp, bpid, &bufs[n],
-+ DPAA2_MBUF_MAX_ACQ_REL);
-+ if (ret == DPAA2_MBUF_MAX_ACQ_REL) {
-+ n += ret;
-+ }
-+ }
-+ if (ret < DPAA2_MBUF_MAX_ACQ_REL) {
-+ ret = qbman_swp_acquire(swp, bpid, &bufs[n], 1);
-+ if (ret > 0) {
-+ PMD_DRV_LOG(DEBUG, "Drained buffer: %x",
-+ bufs[n]);
-+ n += ret;
-+ }
-+ }
-+ if (ret < 0) {
-+ PMD_DRV_LOG(WARNING, "Buffer aquire failed with"
-+ "err code: %d", ret);
-+ break;
-+ }
-+ }
-+ if (ret < 0 || n == 0){
-+ PMD_DRV_LOG(ERR, "Failed to allocate buffers %d", ret);
-+ return -1;
-+ }
-+set_buf:
-+
-+ mbuf_size = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(pool);
-+
-+ for (i = 0; i < n; i++ ) {
-+
-+ DPAA2_MODIFY_IOVA_TO_VADDR(buf[i], uint64_t);
-+
-+ mt[i] = (struct rte_mbuf *)(bufs[i] - mbuf_size);
-+ PMD_DRV_LOG(DEBUG,"Acquired %p address %p from BMAN\n", (void *)bufs[i], (void *)mt[i]);
-+ if (!bufs[i] || !mt[i]) {
-+ printf("\n ??????? how come we have a null buffer %p, %p",
-+ (void *)bufs[i], (void *)mt[i]);
-+ }
-+ }
-+
-+ alloc +=n;
-+ PMD_DRV_LOG(DEBUG, "Total = %d , req = %d done = %d",
-+ alloc, count, n);
-+ return 0;
-+}
-+
-+int hw_mbuf_free_bulk(struct rte_mempool *pool, void * const *obj_table,
-+ unsigned n)
-+{
-+ unsigned i;
-+ struct rte_mbuf *m;
-+ //PMD_DRV_LOG(INFO, "%s/n", __func__);
-+ if ((pool->offload_ptr > MAX_BPID) ||
-+ !(bpid_info[pool->offload_ptr].bp_list)) {
-+
-+ PMD_DRV_LOG(INFO, "DPAA2 buffer pool not configured\n");
-+ return -1;
-+ }
-+ for (i = 0; i < n; i++) {
-+ m = (struct rte_mbuf *)(obj_table[i]);
-+ dpaa2_mbuf_release((uint64_t)m->buf_addr, pool->offload_ptr);
-+ }
-+
-+ return 0;
-+}
-+
-+int hw_mbuf_init(
-+ struct rte_mempool *mp,
-+ void *_m)
-+{
-+ struct rte_mbuf *m = (struct rte_mbuf *)((unsigned char *)_m + DPAA2_FD_PTA_SIZE +
-+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES);
-+ uint32_t mbuf_size, buf_len, priv_size, head_size;
-+ uint32_t bpid;
-+
-+ if ((mp->offload_ptr > MAX_BPID) ||
-+ !(bpid_info[mp->offload_ptr].bp_list)) {
-+
-+ PMD_DRV_LOG(WARNING, "DPAA2 buffer pool not configured\n");
-+ return -1;
-+ }
-+ /*todo - assuming that h_bp_list will be at top node*/
-+ bpid = mp->offload_ptr;
-+
-+ priv_size = rte_pktmbuf_priv_size(mp);
-+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
-+
-+ RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
-+ RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
-+
-+ memset(_m, 0, mp->elt_size);
-+
-+ /*update it in global list as well */
-+ bpid_info[bpid].meta_data_size = DPAA2_RES;
-+
-+/* head_size = DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION
-+ + RTE_PKTMBUF_HEADROOM;
-+ head_size = DPAA2_ALIGN_ROUNDUP(head_size,
-+ DPAA2_PACKET_LAYOUT_ALIGN);
-+ head_size -= DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION;
-+*/
-+ head_size = RTE_PKTMBUF_HEADROOM;
-+
-+ buf_len = rte_pktmbuf_data_room_size(mp)
-+ - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES /* dummy */);
-+
-+ RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
-+
-+ /* start of buffer is after mbuf structure and priv data */
-+ m->priv_size = priv_size;
-+ m->buf_addr = (char *)m + mbuf_size ;
-+ m->buf_physaddr = rte_mempool_virt2phy(mp, _m) + DPAA2_FD_PTA_SIZE +
-+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES + mbuf_size;
-+ m->buf_len = (uint16_t)buf_len;
-+
-+ /* keep some headroom between start of buffer and data */
-+ m->data_off = RTE_MIN(head_size, (uint16_t)m->buf_len);
-+ /* init some constant fields */
-+ m->pool = mp;
-+ m->nb_segs = 1;
-+ m->port = 0xff;
-+
-+ /* Release the mempool buffer to BMAN */
-+ dpaa2_mbuf_release((uint64_t)_m, bpid);
-+ return 0;
-+}
-+
-diff --git a/drivers/net/dpaa2/rte_eth_dpio.c b/drivers/net/dpaa2/rte_eth_dpio.c
-new file mode 100644
-index 0000000..23f0b08
---- /dev/null
-+++ b/drivers/net/dpaa2/rte_eth_dpio.c
-@@ -0,0 +1,339 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <unistd.h>
-+#include <stdio.h>
-+#include <sys/types.h>
-+#include <string.h>
-+#include <stdlib.h>
-+#include <fcntl.h>
-+#include <errno.h>
-+#include <sys/ioctl.h>
-+#include <sys/stat.h>
-+#include <sys/types.h>
-+#include <sys/mman.h>
-+#include <sys/vfs.h>
-+#include <libgen.h>
-+
-+#include "rte_pci.h"
-+#include "rte_memzone.h"
-+#include <rte_malloc.h>
-+
-+#include "rte_eth_dpaa2_pvt.h"
-+#include "fsl_qbman_portal.h"
-+#include <fsl_dpio.h>
-+
-+#include <rte_log.h>
-+#include "dpaa2_logs.h"
-+
-+#define NUM_HOST_CPUS RTE_MAX_LCORE
-+
-+__thread struct thread_io_info_t thread_io_info;
-+
-+TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
-+static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
-+static uint32_t io_space_count;
-+
-+/*Stashing Macros*/
-+#define DPAA2_CORE_CLUSTER_BASE 0x04
-+#define DPAA2_CORE_CLUSTER_FIRST (DPAA2_CORE_CLUSTER_BASE + 0)
-+#define DPAA2_CORE_CLUSTER_SECOND (DPAA2_CORE_CLUSTER_BASE + 1)
-+#define DPAA2_CORE_CLUSTER_THIRD (DPAA2_CORE_CLUSTER_BASE + 2)
-+#define DPAA2_CORE_CLUSTER_FOURTH (DPAA2_CORE_CLUSTER_BASE + 3)
-+
-+#define DPAA2_CORE_CLUSTER_GET(sdest, cpu_id) \
-+do { \
-+ if (cpu_id == 0 || cpu_id == 1) \
-+ sdest = DPAA2_CORE_CLUSTER_FIRST; \
-+ else if (cpu_id == 2 || cpu_id == 3) \
-+ sdest = DPAA2_CORE_CLUSTER_SECOND; \
-+ else if (cpu_id == 4 || cpu_id == 5) \
-+ sdest = DPAA2_CORE_CLUSTER_THIRD; \
-+ else \
-+ sdest = DPAA2_CORE_CLUSTER_FOURTH; \
-+} while (0)
-+
-+static int
-+configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
-+{
-+ struct qbman_swp_desc p_des;
-+ struct dpio_attr attr;
-+
-+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
-+ if (!dpio_dev->dpio) {
-+ PMD_DRV_LOG(ERR, "Memory allocation failure\n");
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "\t Alocated DPIO[%p]\n", dpio_dev->dpio);
-+ dpio_dev->dpio->regs = dpio_dev->mc_portal;
-+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
-+ &dpio_dev->token)) {
-+ PMD_DRV_LOG(ERR, "Failed to allocate IO space\n");
-+ free(dpio_dev->dpio);
-+ return -1;
-+ }
-+
-+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
-+ PMD_DRV_LOG(ERR, "Failed to Enable dpio\n");
-+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
-+ free(dpio_dev->dpio);
-+ return -1;
-+ }
-+
-+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
-+ dpio_dev->token, &attr)) {
-+ PMD_DRV_LOG(ERR, "DPIO Get attribute failed\n");
-+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
-+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
-+ free(dpio_dev->dpio);
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "Qbman Portal ID %d\n", attr.qbman_portal_id);
-+ PMD_DRV_LOG(INFO, "Portal CE addr 0x%lX\n", attr.qbman_portal_ce_offset);
-+ PMD_DRV_LOG(INFO, "Portal CI addr 0x%lX\n", attr.qbman_portal_ci_offset);
-+
-+ /* Configure & setup SW portal */
-+ p_des.block = NULL;
-+ p_des.idx = attr.qbman_portal_id;
-+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
-+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
-+ p_des.irq = -1;
-+ p_des.qman_version = attr.qbman_version;
-+
-+ PMD_DRV_LOG(INFO, "Portal CE addr 0x%p\n", p_des.cena_bar);
-+ PMD_DRV_LOG(INFO, "Portal CI addr 0x%p\n", p_des.cinh_bar);
-+
-+ dpio_dev->sw_portal = qbman_swp_init(&p_des);
-+ if (dpio_dev->sw_portal == NULL) {
-+ PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
-+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
-+ free(dpio_dev->dpio);
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
-+
-+ return 0;
-+}
-+
-+int dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
-+{
-+ int sdest;
-+ int cpu_id, ret;
-+
-+ /* Set the Stashing Destination */
-+ cpu_id = rte_lcore_id();
-+ if (cpu_id < 0) {
-+ cpu_id = rte_get_master_lcore();
-+ if (cpu_id < 0) {
-+ PMD_DRV_LOG(ERR, "\tGetting CPU Index failed\n");
-+ return -1;
-+ }
-+ }
-+
-+ /*
-+ * In case of running DPDK on the Virtual Machine the Stashing
-+ * Destination gets set in the H/W w.r.t. the Virtual CPU ID's.
-+ * As a W.A. environment variable HOST_START_CPU tells which
-+ * the offset of the host start core of the Virtual Machine threads.
-+ */
-+ if (getenv("HOST_START_CPU")) {
-+ cpu_id +=
-+ atoi(getenv("HOST_START_CPU"));
-+ cpu_id = cpu_id % NUM_HOST_CPUS;
-+ }
-+
-+ /* Set the STASH Destination depending on Current CPU ID.
-+ Valid values of SDEST are 4,5,6,7. Where,
-+ CPU 0-1 will have SDEST 4
-+ CPU 2-3 will have SDEST 5.....and so on.
-+ */
-+ DPAA2_CORE_CLUSTER_GET(sdest, cpu_id);
-+ PMD_DRV_LOG(INFO, "Portal= %d CPU= %u SDEST= %d\n",
-+ dpio_dev->index, cpu_id, sdest);
-+
-+ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
-+ dpio_dev->token, sdest);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+int
-+dpaa2_affine_qbman_swp(void)
-+{
-+ struct dpaa2_dpio_dev *dpio_dev = NULL;
-+ int ret;
-+
-+ if (thread_io_info.dpio_dev)
-+ return 0;
-+
-+ /* Get DPIO dev handle from list using index */
-+ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
-+ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
-+ break;
-+ }
-+ if (!dpio_dev)
-+ return -1;
-+
-+ /* Populate the thread_io_info structure */
-+ thread_io_info.dpio_dev = dpio_dev;
-+
-+ ret = dpaa2_configure_stashing(dpio_dev);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed");
-+ }
-+ return ret;
-+}
-+
-+int
-+dpaa2_affine_qbman_swp_sec(void)
-+{
-+ struct dpaa2_dpio_dev *dpio_dev = NULL;
-+ int ret;
-+
-+ if (thread_io_info.sec_dpio_dev)
-+ return 0;
-+
-+ /* Get DPIO dev handle from list using index */
-+ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
-+ if (dpio_dev && rte_atomic16_read(&dpio_dev->ref_count) == 0) {
-+ rte_atomic16_inc(&dpio_dev->ref_count);
-+ break;
-+ }
-+ }
-+ if (!dpio_dev)
-+ return -1;
-+
-+ /* Populate the thread_io_info structure */
-+ thread_io_info.sec_dpio_dev = dpio_dev;
-+
-+ ret = dpaa2_configure_stashing(dpio_dev);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, "dpaa2_configure_stashing failed");
-+ }
-+ return ret;
-+}
-+
-+int
-+dpaa2_create_dpio_device(struct vfio_device *vdev,
-+ struct vfio_device_info *obj_info,
-+ int object_id)
-+{
-+ struct dpaa2_dpio_dev *dpio_dev;
-+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
-+
-+ if (obj_info->num_regions < NUM_DPIO_REGIONS) {
-+ PMD_DRV_LOG(ERR, "ERROR, Not sufficient number "
-+ "of DPIO regions.\n");
-+ return -1;
-+ }
-+
-+ if (!dpio_dev_list) {
-+ dpio_dev_list = malloc(sizeof(struct dpio_device_list));
-+ if (NULL == dpio_dev_list) {
-+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO list\n");
-+ return -1;
-+ }
-+
-+ /* Initialize the DPIO List */
-+ TAILQ_INIT(dpio_dev_list);
-+ }
-+
-+ dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
-+ if (!dpio_dev) {
-+ PMD_DRV_LOG(ERR, "Memory allocation failed for DPIO Device\n");
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]\n", dpio_dev);
-+ dpio_dev->dpio = NULL;
-+ dpio_dev->hw_id = object_id;
-+ dpio_dev->vfio_fd = vdev->fd;
-+ rte_atomic16_init(&dpio_dev->ref_count);
-+ /* Using single portal for all devices */
-+ dpio_dev->mc_portal = mcp_ptr_list[MC_PORTAL_INDEX];
-+
-+ reg_info.index = 0;
-+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
-+ printf("vfio: error getting region info\n");
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "\t Region Offset = %llx\n", reg_info.offset);
-+ PMD_DRV_LOG(INFO, "\t Region Size = %llx\n", reg_info.size);
-+ dpio_dev->ce_size = reg_info.size;
-+ dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
-+ PROT_WRITE | PROT_READ, MAP_SHARED,
-+ dpio_dev->vfio_fd, reg_info.offset);
-+
-+ /* Create Mapping for QBMan Cache Enabled area. This is a fix for
-+ SMMU fault for DQRR statshing transaction. */
-+ if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
-+ reg_info.offset, reg_info.size)) {
-+ PMD_DRV_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
-+ return -1;
-+ }
-+
-+ reg_info.index = 1;
-+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
-+ printf("vfio: error getting region info\n");
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "\t Region Offset = %llx\n", reg_info.offset);
-+ PMD_DRV_LOG(INFO, "\t Region Size = %llx\n", reg_info.size);
-+ dpio_dev->ci_size = reg_info.size;
-+ dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
-+ PROT_WRITE | PROT_READ, MAP_SHARED,
-+ dpio_dev->vfio_fd, reg_info.offset);
-+
-+ if (configure_dpio_qbman_swp(dpio_dev)) {
-+ PMD_DRV_LOG(ERR,
-+ "Failed in configuring the qbman portal for dpio %d\n",
-+ dpio_dev->hw_id);
-+ return -1;
-+ }
-+
-+ io_space_count++;
-+ dpio_dev->index = io_space_count;
-+ TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
-+
-+ return 0;
-+}
-+
-diff --git a/drivers/net/dpaa2/rte_eth_dpni.c b/drivers/net/dpaa2/rte_eth_dpni.c
-new file mode 100644
-index 0000000..62baf03
---- /dev/null
-+++ b/drivers/net/dpaa2/rte_eth_dpni.c
-@@ -0,0 +1,2230 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <time.h>
-+#include <rte_mbuf.h>
-+#include <rte_ethdev.h>
-+#include <rte_malloc.h>
-+#include <rte_memcpy.h>
-+#include <rte_string_fns.h>
-+#include <rte_cycles.h>
-+#include <rte_kvargs.h>
-+#include <rte_dev.h>
-+
-+#include <net/if.h>
-+/* MC header files */
-+#include <fsl_dpbp.h>
-+#include <fsl_dpni.h>
-+#include "rte_eth_dpaa2_pvt.h"
-+#include "rte_eth_dpni_annot.h"
-+#include "dpaa2_logs.h"
-+
-+#include <fsl_qbman_portal.h>
-+#include <fsl_dpio.h>
-+
-+#define DPAA2_STASHING
-+
-+/* tx fd send batching */
-+#define QBMAN_MULTI_TX
-+
-+#define RTE_ETH_DPAA2_SNAPSHOT_LEN 65535
-+#define RTE_ETH_DPAA2_SNAPLEN 4096
-+#define RTE_ETH_DPAA2_PROMISC 1
-+#define RTE_ETH_DPAA2_TIMEOUT -1
-+#define ETH_DPAA2_RX_IFACE_ARG "rx_iface"
-+#define ETH_DPAA2_TX_IFACE_ARG "tx_iface"
-+#define ETH_DPAA2_IFACE_ARG "iface"
-+
-+static const char *drivername = "DPNI PMD";
-+
-+#define MAX_TCS DPNI_MAX_TC
-+#define MAX_RX_QUEUES 64
-+#define MAX_TX_QUEUES 64
-+
-+/*Maximum number of slots available in TX ring*/
-+#define MAX_SLOTS 8
-+
-+/*Threshold for a queue to *Enter* Congestion state.
-+ It is set to 128 frames of size 64 bytes.*/
-+#define CONG_ENTER_THRESHOLD 128*64
-+
-+/*Threshold for a queue to *Exit* Congestion state.
-+ It is set to 98 frames of size 64 bytes*/
-+#define CONG_EXIT_THRESHOLD 98*64
-+
-+/*! Maximum number of flow distributions per traffic class */
-+#define MAX_DIST_PER_TC 16
-+
-+/* Size of the input SMMU mapped memory required by MC */
-+#define DIST_PARAM_IOVA_SIZE 256
-+
-+struct dpaa2_queue {
-+ void *dev;
-+ int32_t eventfd; /*!< Event Fd of this queue */
-+ uint32_t fqid; /*!< Unique ID of this queue */
-+ uint8_t tc_index; /*!< traffic class identifier */
-+ uint16_t flow_id; /*!< To be used by DPAA2 frmework */
-+ uint64_t rx_pkts;
-+ uint64_t tx_pkts;
-+ uint64_t err_pkts;
-+ union {
-+ struct queue_storage_info_t *q_storage;
-+ struct qbman_result *cscn;
-+ };
-+};
-+
-+struct dpaa2_dev_priv {
-+ void *hw;
-+ int32_t hw_id;
-+ int32_t qdid;
-+ uint16_t token;
-+ uint8_t nb_tx_queues;
-+ uint8_t nb_rx_queues;
-+ void *rx_vq[MAX_RX_QUEUES];
-+ void *tx_vq[MAX_TX_QUEUES];
-+
-+ struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
-+ uint16_t num_dist_per_tc[MAX_TCS];
-+
-+ uint8_t max_unicast_filters;
-+ uint8_t max_multicast_filters;
-+ uint8_t max_vlan_filters;
-+ uint8_t num_tc;
-+ uint32_t options;
-+};
-+
-+static struct rte_pci_id pci_id_dpaa2_map[] = {
-+ {RTE_PCI_DEVICE(FSL_VENDOR_ID, FSL_MC_DPNI_DEVID)},
-+};
-+
-+extern struct bp_info bpid_info[MAX_BPID];
-+
-+static void dpaa2_print_stats(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ uint64_t value;
-+
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME, &value);
-+ printf("Rx packets: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_BYTE, &value);
-+ printf("Rx bytes: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_MCAST_FRAME, &value);
-+ printf("Rx Multicast: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DROP, &value);
-+ printf("Rx dropped: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_ING_FRAME_DISCARD, &value);
-+ printf("Rx discarded: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME, &value);
-+ printf("Tx packets: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_BYTE, &value);
-+ printf("Tx bytes: %ld\n", value);
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token, DPNI_CNT_EGR_FRAME_DISCARD, &value);
-+ printf("Tx dropped: %ld\n", value);
-+}
-+
-+/**
-+ * Atomically reads the link status information from global
-+ * structure rte_eth_dev.
-+ *
-+ * @param dev
-+ * - Pointer to the structure rte_eth_dev to read from.
-+ * - Pointer to the buffer to be saved with the link status.
-+ *
-+ * @return
-+ * - On success, zero.
-+ * - On failure, negative value.
-+ */
-+static inline int
-+rte_dpni_dev_atomic_read_link_status(struct rte_eth_dev *dev,
-+ struct rte_eth_link *link)
-+{
-+ struct rte_eth_link *dst = link;
-+ struct rte_eth_link *src = &dev->data->dev_link;
-+
-+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-+ *(uint64_t *)src) == 0)
-+ return -1;
-+
-+ return 0;
-+}
-+
-+/**
-+ * Atomically writes the link status information into global
-+ * structure rte_eth_dev.
-+ *
-+ * @param dev
-+ * - Pointer to the structure rte_eth_dev to read from.
-+ * - Pointer to the buffer to be saved with the link status.
-+ *
-+ * @return
-+ * - On success, zero.
-+ * - On failure, negative value.
-+ */
-+static inline int
-+rte_dpni_dev_atomic_write_link_status(struct rte_eth_dev *dev,
-+ struct rte_eth_link *link)
-+{
-+ struct rte_eth_link *dst = &dev->data->dev_link;
-+ struct rte_eth_link *src = link;
-+
-+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-+ *(uint64_t *)src) == 0)
-+ return -1;
-+
-+ return 0;
-+}
-+
-+static inline void
-+dpaa2_eth_parse_packet(struct rte_mbuf *mbuf)
-+{
-+ uint32_t pkt_type = 0;
-+ struct pkt_annotation *annotation = (struct pkt_annotation *)
-+ ((uint8_t *)mbuf - (DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES));
-+
-+ PMD_DRV_LOG(DEBUG, "\n 1 annotation = 0x%x ", annotation->word4);
-+
-+ if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L2_ETHER;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT))
-+ pkt_type /* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV6;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L3_IPV4_EXT;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_UDP;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_TCP;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_SCTP;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_L4_ICMP;
-+
-+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
-+ pkt_type/* mbuf->packet_type */ |= RTE_PTYPE_UNKNOWN;
-+
-+ mbuf->packet_type = pkt_type;
-+}
-+
-+static inline
-+struct rte_mbuf *eth_fd_to_mbuf(const struct qbman_fd *fd)
-+{
-+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(DPAA2_GET_FD_ADDR(fd));
-+
-+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p",
-+ (void *)mbuf, mbuf->buf_addr);
-+
-+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n",
-+ DPAA2_GET_FD_ADDR(fd),
-+ DPAA2_GET_FD_BPID(fd),
-+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
-+ DPAA2_GET_FD_OFFSET(fd),
-+ DPAA2_GET_FD_LEN(fd));
-+
-+// mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
-+ mbuf->data_len = DPAA2_GET_FD_LEN(fd);
-+ mbuf->pkt_len = mbuf->data_len;
-+ mbuf->next = NULL;
-+ rte_mbuf_refcnt_set(mbuf, 1);
-+
-+ /* Parse the packet */
-+ dpaa2_eth_parse_packet(mbuf);
-+
-+ mbuf->nb_segs = 1;
-+ mbuf->ol_flags = 0;
-+
-+ return mbuf;
-+}
-+
-+static void __attribute__ ((noinline)) eth_mbuf_to_fd(struct rte_mbuf *mbuf,
-+ struct qbman_fd *fd, uint16_t bpid)
-+{
-+ /*Resetting the buffer pool id and offset field*/
-+ fd->simple.bpid_offset = 0;
-+
-+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(DPAA2_BUF_FROM_INLINE_MBUF(mbuf)));
-+ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
-+ DPAA2_SET_FD_BPID(fd, bpid);
-+ DPAA2_SET_FD_OFFSET(fd, DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION +
-+ DPAA2_RES /* dummy */+ 128 + mbuf->priv_size + mbuf->data_off);
-+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
-+
-+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p",
-+ (void *)mbuf, mbuf->buf_addr);
-+
-+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n",
-+ DPAA2_GET_FD_ADDR(fd),
-+ DPAA2_GET_FD_BPID(fd),
-+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
-+ DPAA2_GET_FD_OFFSET(fd),
-+ DPAA2_GET_FD_LEN(fd));
-+
-+ return;
-+}
-+
-+static int eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
-+ struct qbman_fd *fd, uint16_t bpid)
-+{
-+ struct rte_mbuf *m;
-+ void *mb = NULL;
-+
-+ if (hw_mbuf_alloc(bpid_info[bpid].bp_list->buf_pool.mp, &mb)) {
-+ PMD_DRV_LOG(WARNING, "Unable to allocated DPAA2 buffer");
-+ rte_pktmbuf_free(mbuf);
-+ return -1;
-+ }
-+ m = (struct rte_mbuf *)mb;
-+ memcpy((char *)m->buf_addr + mbuf->data_off,
-+ (void *)((char *)mbuf->buf_addr + mbuf->data_off),
-+ mbuf->pkt_len);
-+
-+ /*Resetting the buffer pool id and offset field*/
-+ fd->simple.bpid_offset = 0;
-+
-+ DPAA2_SET_FD_ADDR(fd, m->buf_addr);
-+ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
-+ DPAA2_SET_FD_BPID(fd, bpid);
-+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
-+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
-+
-+ PMD_DRV_LOG(DEBUG, "\nmbuf %p BMAN buf addr %p",
-+ (void *)mbuf, mbuf->buf_addr);
-+
-+ PMD_DRV_LOG(DEBUG, "\nfdaddr =%lx bpid =%d meta =%d off =%d, len =%d\n",
-+ DPAA2_GET_FD_ADDR(fd),
-+ DPAA2_GET_FD_BPID(fd),
-+ bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
-+ DPAA2_GET_FD_OFFSET(fd),
-+ DPAA2_GET_FD_LEN(fd));
-+ /*free the original packet */
-+ rte_pktmbuf_free(mbuf);
-+
-+ return 0;
-+}
-+
-+static uint16_t
-+eth_dpaa2_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-+{
-+ /* Function is responsible to receive frames for a given device and VQ*/
-+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
-+ struct qbman_result *dq_storage;
-+ uint32_t fqid = dpaa2_q->fqid;
-+ int ret, num_rx = 0;
-+ uint8_t is_last = 0, status;
-+ struct qbman_swp *swp;
-+ const struct qbman_fd *fd;
-+ struct qbman_pull_desc pulldesc;
-+ struct rte_eth_dev *dev = dpaa2_q->dev;
-+
-+ if (!thread_io_info.dpio_dev) {
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
-+ return 0;
-+ }
-+ }
-+ swp = thread_io_info.dpio_dev->sw_portal;
-+ dq_storage = dpaa2_q->q_storage->dq_storage[0];
-+
-+ qbman_pull_desc_clear(&pulldesc);
-+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts);
-+ qbman_pull_desc_set_fq(&pulldesc, fqid);
-+ /* todo optimization - we can have dq_storage_phys available*/
-+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-+
-+ /*Issue a volatile dequeue command. */
-+ while (1) {
-+ if (qbman_swp_pull(swp, &pulldesc)) {
-+ PMD_DRV_LOG(ERR, "VDQ command is not issued."
-+ "QBMAN is busy\n");
-+ /* Portal was busy, try again */
-+ continue;
-+ }
-+ break;
-+ };
-+
-+ /* Receive the packets till Last Dequeue entry is found with
-+ respect to the above issues PULL command.
-+ */
-+ while (!is_last) {
-+ /*Check if the previous issued command is completed.
-+ *Also seems like the SWP is shared between the Ethernet Driver
-+ *and the SEC driver.*/
-+ while(!qbman_check_command_complete(swp, dq_storage))
-+ ;
-+ /* Loop until the dq_storage is updated with
-+ * new token by QBMAN */
-+ while (!qbman_result_has_new_result(swp, dq_storage))
-+ ;
-+ /* Check whether Last Pull command is Expired and
-+ setting Condition for Loop termination */
-+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-+ is_last = 1;
-+ /* Check for valid frame. */
-+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
-+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
-+ PMD_DRV_LOG(DEBUG, "No frame is delivered\n");
-+ continue;
-+ }
-+ }
-+
-+ fd = qbman_result_DQ_fd(dq_storage);
-+ bufs[num_rx] = eth_fd_to_mbuf(fd);
-+ bufs[num_rx]->port = dev->data->port_id;
-+
-+ num_rx++;
-+ dq_storage++;
-+ } /* End of Packet Rx loop */
-+
-+ dpaa2_q->rx_pkts += num_rx;
-+
-+ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx);
-+ /*Return the total number of packets received to DPAA2 app*/
-+ return num_rx;
-+}
-+
-+static uint16_t
-+eth_dpaa2_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-+{
-+ /* Function is responsible to receive frames for a given device and VQ*/
-+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
-+ struct qbman_result *dq_storage;
-+ uint32_t fqid = dpaa2_q->fqid;
-+ int ret, i, num_rx = 0;
-+ uint8_t is_last = 0, status;
-+ struct qbman_swp *swp;
-+ const struct qbman_fd *fd[16];
-+ struct qbman_pull_desc pulldesc;
-+ struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
-+ struct rte_eth_dev *dev = dpaa2_q->dev;
-+
-+ if(!thread_io_info.dpio_dev) {
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
-+ return 0;
-+ }
-+ }
-+ swp = thread_io_info.dpio_dev->sw_portal;
-+
-+ if(!q_storage->active_dqs) {
-+ q_storage->toggle = 0;
-+ dq_storage = q_storage->dq_storage[q_storage->toggle];
-+ qbman_pull_desc_clear(&pulldesc);
-+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts);
-+ qbman_pull_desc_set_fq(&pulldesc, fqid);
-+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-+ if(thread_io_info.global_active_dqs) {
-+ while(!qbman_check_command_complete(swp, thread_io_info.global_active_dqs))
-+ ;
-+ }
-+ while (1) {
-+ if (qbman_swp_pull(swp, &pulldesc)) {
-+ PMD_DRV_LOG(WARNING, "VDQ command is not issued."
-+ "QBMAN is busy\n");
-+ /* Portal was busy, try again */
-+ continue;
-+ }
-+ break;
-+ }
-+ q_storage->active_dqs = dq_storage;
-+ thread_io_info.global_active_dqs = dq_storage;
-+ }
-+ while(!qbman_check_command_complete(swp, thread_io_info.global_active_dqs))
-+ ;
-+ dq_storage = q_storage->active_dqs;
-+ while (!is_last) {
-+ /* Loop until the dq_storage is updated with
-+ * new token by QBMAN */
-+ struct rte_mbuf *mbuf;
-+
-+ while (!qbman_result_has_new_result(swp, dq_storage))
-+ ;
-+ rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
-+ /* Check whether Last Pull command is Expired and
-+ setting Condition for Loop termination */
-+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-+ is_last = 1;
-+ /* Check for valid frame. */
-+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
-+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
-+ PMD_DRV_LOG(DEBUG, "No frame is delivered\n");
-+ continue;
-+ }
-+ }
-+ fd[num_rx] = qbman_result_DQ_fd(dq_storage);
-+ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(DPAA2_GET_FD_ADDR(fd[num_rx]));
-+ /* Prefeth mbuf */
-+ rte_prefetch0(mbuf);
-+ /* Prefetch Annotation address from where we get parse results */
-+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_FD_PTA_SIZE + 16));
-+ /*Prefetch Data buffer*/
-+ /* rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx]) + DPAA2_GET_FD_OFFSET(fd[num_rx]))); */
-+ dq_storage++;
-+ num_rx++;
-+
-+ } /* End of Packet Rx loop */
-+
-+ for (i = 0; i < num_rx; i++) {
-+ bufs[i] = eth_fd_to_mbuf(fd[i]);
-+ bufs[i]->port = dev->data->port_id;
-+ }
-+
-+ q_storage->toggle ^= 1;
-+ dq_storage = q_storage->dq_storage[q_storage->toggle];
-+ qbman_pull_desc_clear(&pulldesc);
-+ qbman_pull_desc_set_numframes(&pulldesc, nb_pkts);
-+ qbman_pull_desc_set_fq(&pulldesc, fqid);
-+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-+ /*Issue a volatile dequeue command. */
-+
-+ while (1) {
-+ if (qbman_swp_pull(swp, &pulldesc)) {
-+ PMD_DRV_LOG(WARNING, "VDQ command is not issued."
-+ "QBMAN is busy\n");
-+ continue;
-+ }
-+ break;
-+ }
-+ q_storage->active_dqs = dq_storage;
-+ thread_io_info.global_active_dqs = dq_storage;
-+
-+ dpaa2_q->rx_pkts += num_rx;
-+
-+ PMD_DRV_LOG(INFO, "Ethernet Received %d Packets\n", num_rx);
-+ /*Return the total number of packets received to DPAA2 app*/
-+ return num_rx;
-+}
-+
-+/*
-+ * Callback to handle sending packets through a real NIC.
-+ */
-+static uint16_t
-+eth_dpaa2_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-+{
-+ /* Function to transmit the frames to given device and VQ*/
-+ uint32_t loop;
-+ int32_t ret;
-+#ifdef QBMAN_MULTI_TX
-+ struct qbman_fd fd_arr[8];
-+ uint32_t frames_to_send;
-+#else
-+ struct qbman_fd fd;
-+#endif
-+ struct rte_mempool *mp;
-+ struct qbman_eq_desc eqdesc;
-+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
-+ struct qbman_swp *swp;
-+ uint16_t num_tx = 0;
-+ /*todo - need to support multiple buffer pools */
-+ uint16_t bpid;
-+ struct rte_eth_dev *dev = dpaa2_q->dev;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+
-+ if (!thread_io_info.dpio_dev) {
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure in affining portal\n");
-+ return 0;
-+ }
-+ }
-+ swp = thread_io_info.dpio_dev->sw_portal;
-+
-+ /*Prepare enqueue descriptor*/
-+ qbman_eq_desc_clear(&eqdesc);
-+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
-+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
-+ qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
-+ dpaa2_q->flow_id, dpaa2_q->tc_index);
-+
-+ /*Clear the unused FD fields before sending*/
-+#ifdef QBMAN_MULTI_TX
-+ while(nb_pkts) {
-+ /*Check if the queue is congested*/
-+ if(qbman_result_is_CSCN(dpaa2_q->cscn))
-+ goto skip_tx;
-+
-+ frames_to_send = (nb_pkts >> 3) ? MAX_SLOTS : nb_pkts;
-+
-+ for(loop = 0; loop < frames_to_send; loop++) {
-+ fd_arr[loop].simple.frc = 0;
-+ DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
-+ DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
-+ mp = (*bufs)->pool;
-+ /* Not a hw_pkt pool allocated frame */
-+ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) {
-+ printf ("\n non hw offload bufffer ");
-+ /* alloc should be from the default buffer pool
-+ attached to this interface */
-+ bpid = priv->bp_list->buf_pool.bpid;
-+ if (eth_copy_mbuf_to_fd(*bufs, &fd_arr[loop], bpid)) {
-+ bufs++;
-+ continue;
-+ }
-+ } else {
-+ bpid = mp->offload_ptr;
-+ eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
-+ }
-+ bufs++;
-+ }
-+ loop = 0;
-+ while(loop < frames_to_send) {
-+ loop += qbman_swp_send_multiple(swp, &eqdesc,
-+ &fd_arr[loop], frames_to_send - loop);
-+ }
-+
-+ num_tx += frames_to_send;
-+ dpaa2_q->tx_pkts += frames_to_send;
-+ nb_pkts -= frames_to_send;
-+ }
-+#else
-+ /*Check if the queue is congested*/
-+// if(qbman_result_is_CSCN(dpaa2_q->cscn))
-+// goto skip_tx;
-+
-+ fd.simple.frc = 0;
-+ DPAA2_RESET_FD_CTRL((&fd));
-+ DPAA2_SET_FD_FLC((&fd), NULL);
-+ loop = 0;
-+
-+ while (loop < nb_pkts) {
-+ /*Prepare each packet which is to be sent*/
-+ mp = bufs[loop]->pool;
-+ /* Not a hw_pkt pool allocated frame */
-+ if (mp && !(mp->flags & MEMPOOL_F_HW_PKT_POOL)) {
-+ printf ("\n non hw offload bufffer ");
-+ /* alloc should be from the default buffer pool
-+ attached to this interface */
-+ if (priv->bp_list)
-+ bpid = priv->bp_list->buf_pool.bpid;
-+ else
-+ printf("\n ??? why no bpool attached");
-+
-+ if (eth_copy_mbuf_to_fd(bufs[loop], &fd, bpid)) {
-+ loop++;
-+ continue;
-+ }
-+ } else {
-+ bpid = mp->offload_ptr;
-+ eth_mbuf_to_fd(bufs[loop], &fd, bpid);
-+ }
-+ /*Enqueue a single packet to the QBMAN*/
-+ do {
-+ ret = qbman_swp_enqueue(swp, &eqdesc, &fd);
-+ if (ret != 0) {
-+ PMD_DRV_LOG(DEBUG, "Error in transmiting the frame\n");
-+ }
-+ } while (ret != 0);
-+
-+ /* Free the buffer shell */
-+ /* rte_pktmbuf_free(bufs[loop]); */
-+ num_tx++; loop++;
-+ }
-+ dpaa2_q->tx_pkts += num_tx;
-+ dpaa2_q->err_pkts += nb_pkts - num_tx;
-+#endif
-+ skip_tx:
-+ return num_tx;
-+}
-+
-+static int
-+dpaa2_vlan_stripping_set(struct rte_eth_dev *dev, int on)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ PMD_INIT_FUNC_TRACE();
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return -1;
-+ }
-+
-+ ret = dpni_set_vlan_removal(dpni, CMD_PRI_LOW, priv->token, on);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "Unable to dpni_set_vlan_removal hwid =%d",
-+ priv->hw_id);
-+ return ret;
-+}
-+
-+static int
-+dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return -1;
-+ }
-+
-+ if (on)
-+ ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id);
-+ else
-+ ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id);
-+
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
-+ ret, vlan_id, priv->hw_id);
-+
-+ /*todo this should on global basis */
-+/* ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, on);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "Unable to set vlan filter");
-+*/ return ret;
-+}
-+
-+static void
-+dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ int ret;
-+ if (mask & ETH_VLAN_FILTER_MASK) {
-+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
-+ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, TRUE);
-+ else
-+ ret = dpni_set_vlan_filters(dpni, CMD_PRI_LOW, priv->token, FALSE);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "ret = %d Unable to set vlan filter", ret);
-+ }
-+
-+ if (mask & ETH_VLAN_STRIP_MASK) {
-+ /* Enable or disable VLAN stripping */
-+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-+ dpaa2_vlan_stripping_set(dev, TRUE);
-+ else
-+ dpaa2_vlan_stripping_set(dev, FALSE);
-+ }
-+
-+ if (mask & ETH_VLAN_EXTEND_MASK) {
-+ PMD_INIT_FUNC_TRACE();
-+/* if (dev->data->dev_conf.rxmode.hw_vlan_extend)
-+ i40e_vsi_config_double_vlan(vsi, TRUE);
-+ else
-+ i40e_vsi_config_double_vlan(vsi, FALSE);
-+*/ }
-+}
-+
-+static void
-+dpaa2_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+
-+ dev_info->driver_name = drivername;
-+ dev_info->if_index = priv->hw_id;
-+ dev_info->max_mac_addrs = priv->max_unicast_filters;
-+ dev_info->max_rx_pktlen = (uint32_t)-1;
-+ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
-+ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
-+ dev_info->min_rx_bufsize = 0;
-+ dev_info->pci_dev = dev->pci_dev;
-+/* dev_info->rx_offload_capa =
-+ DEV_RX_OFFLOAD_IPV4_CKSUM |
-+ DEV_RX_OFFLOAD_UDP_CKSUM |
-+ DEV_RX_OFFLOAD_TCP_CKSUM;
-+ dev_info->tx_offload_capa =
-+ DEV_TX_OFFLOAD_IPV4_CKSUM |
-+ DEV_TX_OFFLOAD_UDP_CKSUM |
-+ DEV_TX_OFFLOAD_TCP_CKSUM |
-+ DEV_TX_OFFLOAD_SCTP_CKSUM;
-+*/
-+}
-+
-+static int
-+dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ uint8_t tc_idx;
-+ uint16_t dist_idx;
-+ uint32_t vq_id;
-+ struct dpaa2_queue *mc_q, *mcq;
-+ uint32_t tot_queues;
-+ int i;
-+ struct dpaa2_queue *dpaa2_q;
-+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
-+ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
-+ RTE_CACHE_LINE_SIZE);
-+ if (!mc_q) {
-+ PMD_DRV_LOG(ERR, "malloc failed for rx/tx queues\n");
-+ return -1;
-+ }
-+
-+ for (i = 0; i < priv->nb_rx_queues; i++) {
-+ mc_q->dev = dev;
-+ priv->rx_vq[i] = mc_q++;
-+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-+ dpaa2_q->q_storage = rte_malloc("dq_storage",
-+ sizeof(struct queue_storage_info_t),
-+ RTE_CACHE_LINE_SIZE);
-+ if(!dpaa2_q->q_storage)
-+ goto fail;
-+
-+ memset(dpaa2_q->q_storage, 0, sizeof(struct queue_storage_info_t));
-+ }
-+
-+ for (i = 0; i < priv->nb_tx_queues; i++) {
-+ mc_q->dev = dev;
-+ priv->tx_vq[i] = mc_q++;
-+ }
-+
-+ vq_id = 0;
-+ for (tc_idx = 0; tc_idx < priv->num_tc; tc_idx++) {
-+ for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[tc_idx]; dist_idx++) {
-+ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
-+ mcq->tc_index = tc_idx;
-+ mcq->flow_id = dist_idx;
-+ vq_id++;
-+ }
-+ }
-+
-+ return 0;
-+fail:
-+ i -= 1;
-+ while(i >= 0)
-+ {
-+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-+ rte_free(dpaa2_q->q_storage);
-+ }
-+ return -1;
-+}
-+
-+static void dpaa2_distset_to_dpkg_profile_cfg(
-+ uint32_t req_dist_set,
-+ struct dpkg_profile_cfg *kg_cfg)
-+{
-+ uint32_t loop = 0, i = 0, dist_field = 0;
-+ int l2_configured = 0, l3_configured = 0;
-+ int l4_configured = 0, sctp_configured = 0;
-+
-+ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
-+ while (req_dist_set) {
-+ if (req_dist_set % 2 != 0) {
-+ dist_field = 1U << loop;
-+ switch (dist_field) {
-+ case ETH_RSS_L2_PAYLOAD:
-+
-+ if (l2_configured)
-+ break;
-+ l2_configured = 1;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_ETH;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_ETH_TYPE;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+ break;
-+
-+ case ETH_RSS_IPV4:
-+ case ETH_RSS_FRAG_IPV4:
-+ case ETH_RSS_NONFRAG_IPV4_OTHER:
-+ case ETH_RSS_IPV6:
-+ case ETH_RSS_FRAG_IPV6:
-+ case ETH_RSS_NONFRAG_IPV6_OTHER:
-+ case ETH_RSS_IPV6_EX:
-+
-+ if (l3_configured)
-+ break;
-+ l3_configured = 1;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_IP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_IP_SRC;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_IP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_IP_DST;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_IP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_IP_PROTO;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ kg_cfg->num_extracts++;
-+ i++;
-+ break;
-+
-+ case ETH_RSS_NONFRAG_IPV4_TCP:
-+ case ETH_RSS_NONFRAG_IPV6_TCP:
-+ case ETH_RSS_NONFRAG_IPV4_UDP:
-+ case ETH_RSS_NONFRAG_IPV6_UDP:
-+
-+ if (l4_configured)
-+ break;
-+ l4_configured = 1;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_TCP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_TCP_PORT_SRC;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_TCP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_TCP_PORT_SRC;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+ break;
-+
-+ case ETH_RSS_NONFRAG_IPV4_SCTP:
-+ case ETH_RSS_NONFRAG_IPV6_SCTP:
-+
-+ if (sctp_configured)
-+ break;
-+ sctp_configured = 1;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_SCTP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_SCTP_PORT_SRC;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+
-+ kg_cfg->extracts[i].extract.from_hdr.prot =
-+ NET_PROT_SCTP;
-+ kg_cfg->extracts[i].extract.from_hdr.field =
-+ NH_FLD_SCTP_PORT_DST;
-+ kg_cfg->extracts[i].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg->extracts[i].extract.from_hdr.type =
-+ DPKG_FULL_FIELD;
-+ i++;
-+ break;
-+
-+ default:
-+ PMD_DRV_LOG(WARNING, "Bad flow distribution option %x\n", dist_field);
-+ }
-+ }
-+ req_dist_set = req_dist_set >> 1;
-+ loop++;
-+ }
-+ kg_cfg->num_extracts = i;
-+}
-+
-+static int dpaa2_setup_flow_distribution(struct rte_eth_dev *eth_dev,
-+ uint32_t req_dist_set)
-+{
-+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
-+ struct fsl_mc_io *dpni = priv->hw;
-+ struct dpni_rx_tc_dist_cfg tc_cfg;
-+ struct dpkg_profile_cfg kg_cfg;
-+ void *p_params;
-+ int ret, tc_index = 0;
-+
-+ p_params = rte_malloc(
-+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
-+ if (!p_params) {
-+ PMD_DRV_LOG(ERR, "Memory unavaialble\n");
-+ return -ENOMEM;
-+ }
-+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
-+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
-+
-+ dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
-+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
-+ tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
-+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
-+
-+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n");
-+ rte_free(p_params);
-+ return ret;
-+ }
-+
-+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
-+ &tc_cfg);
-+ rte_free(p_params);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with"
-+ "err code: %d\n", ret);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+dpaa2_remove_flow_distribution(struct rte_eth_dev *eth_dev, uint8_t tc_index)
-+{
-+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
-+ struct fsl_mc_io *dpni = priv->hw;
-+ struct dpni_rx_tc_dist_cfg tc_cfg;
-+ struct dpkg_profile_cfg kg_cfg;
-+ void *p_params;
-+ int ret;
-+
-+ p_params = rte_malloc(
-+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
-+ if (!p_params) {
-+ PMD_DRV_LOG(ERR, "Memory unavaialble\n");
-+ return -ENOMEM;
-+ }
-+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
-+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
-+
-+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
-+ tc_cfg.dist_size = 0;
-+ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
-+
-+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Unable to prepare extract parameters\n");
-+ rte_free(p_params);
-+ return ret;
-+ }
-+
-+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
-+ &tc_cfg);
-+ rte_free(p_params);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Setting distribution for Rx failed with"
-+ "err code: %d\n", ret);
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+static int
-+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
-+{
-+ int i=0;
-+
-+ for(i = 0;i < NUM_DQS_PER_QUEUE; i++) {
-+ q_storage->dq_storage[i] = rte_malloc(NULL,
-+ NUM_MAX_RECV_FRAMES * sizeof(struct qbman_result),
-+ RTE_CACHE_LINE_SIZE);
-+ if(!q_storage->dq_storage[i])
-+ goto fail;
-+ /*setting toggle for initial condition*/
-+ q_storage->toggle = -1;
-+ }
-+ return 0;
-+fail:
-+ i -= 1;
-+ while(i >= 0)
-+ {
-+ rte_free(q_storage->dq_storage[i]);
-+ }
-+ return -1;
-+}
-+
-+static int
-+dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
-+{
-+ struct rte_eth_dev_data *data = dev->data;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct rte_eth_conf *eth_conf = &data->dev_conf;
-+ struct dpaa2_queue *dpaa2_q;
-+ int i, ret;
-+
-+ for (i = 0; i < data->nb_rx_queues; i++) {
-+ data->rx_queues[i] = priv->rx_vq[i];
-+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
-+ if(dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
-+ return -1;
-+ }
-+
-+ for (i = 0; i < data->nb_tx_queues; i++) {
-+ data->tx_queues[i] = priv->tx_vq[i];
-+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
-+ dpaa2_q->cscn = rte_malloc(NULL, sizeof(struct qbman_result), 16);
-+ if(!dpaa2_q->cscn)
-+ goto fail_tx_queue;
-+ }
-+
-+ /* Check for correct configuration */
-+ if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
-+ data->nb_rx_queues > 1) {
-+ PMD_DRV_LOG(ERR, "Distribution is not enabled, "
-+ "but Rx queues more than 1\n");
-+ return -1;
-+ }
-+
-+ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
-+ /* Return in case number of Rx queues is 1 */
-+ if (data->nb_rx_queues == 1)
-+ return 0;
-+ ret = dpaa2_setup_flow_distribution(dev,
-+ eth_conf->rx_adv_conf.rss_conf.rss_hf);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "dpaa2_setup_flow_distribution failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+ fail_tx_queue:
-+ i -= 1;
-+ while(i >= 0) {
-+ dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
-+ rte_free(dpaa2_q->cscn);
-+ }
-+ return -1;
-+}
-+
-+static int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
-+ void *blist)
-+{
-+ /* Function to attach a DPNI with a buffer pool list. Buffer pool list
-+ * handle is passed in blist.
-+ */
-+ int32_t retcode;
-+ struct fsl_mc_io *dpni = priv->hw;
-+ struct dpni_pools_cfg bpool_cfg;
-+ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
-+
-+ /*Attach buffer pool to the network interface as described by the user*/
-+ bpool_cfg.num_dpbp = 1;
-+ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
-+ bpool_cfg.pools[0].backup_pool = 0;
-+ bpool_cfg.pools[0].buffer_size =
-+ DPAA2_ALIGN_ROUNDUP(bp_list->buf_pool.size,
-+ DPAA2_PACKET_LAYOUT_ALIGN);;
-+
-+ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
-+ if (retcode != 0) {
-+ PMD_DRV_LOG(ERR, "Error in attaching the buffer pool list"
-+ "bpid = %d Error code = %d\n",
-+ bpool_cfg.pools[0].dpbp_id, retcode);
-+ return retcode;
-+ }
-+
-+ priv->bp_list = bp_list;
-+ return 0;
-+}
-+
-+/* Function to setup RX flow information. It contains traffic class ID,
-+ * flow ID, destination configuration etc.
-+ */
-+static int
-+dpaa2_rx_queue_setup(struct rte_eth_dev *dev,
-+ uint16_t rx_queue_id,
-+ uint16_t nb_rx_desc __rte_unused,
-+ unsigned int socket_id __rte_unused,
-+ const struct rte_eth_rxconf *rx_conf __rte_unused,
-+ struct rte_mempool *mb_pool)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ struct dpaa2_queue *dpaa2_q;
-+ struct dpni_queue_cfg cfg;
-+ uint8_t tc_id, flow_id;
-+ int ret;
-+
-+ PMD_DRV_LOG(INFO, "\n dev =%p, queue =%d, pool = %p, conf =%p",
-+ dev, rx_queue_id, mb_pool, rx_conf);
-+
-+ if (!priv->bp_list) {
-+ if (mb_pool->offload_ptr > MAX_BPID) {
-+ printf ("\n ??? ERR - %s not a offloaded buffer pool",
-+ __func__);
-+ return -1;
-+ }
-+ ret = dpaa2_attach_bp_list(priv,
-+ bpid_info[mb_pool->offload_ptr].bp_list);
-+ if (ret)
-+ return ret;
-+ }
-+ dpaa2_q = (struct dpaa2_queue *)dev->data->rx_queues[rx_queue_id];
-+
-+ /*Get the tc id and flow id from given VQ id*/
-+ tc_id = rx_queue_id / MAX_DIST_PER_TC;
-+ flow_id = rx_queue_id % MAX_DIST_PER_TC;
-+ memset(&cfg, 0, sizeof(struct dpni_queue_cfg));
-+
-+ cfg.options = cfg.options | DPNI_QUEUE_OPT_USER_CTX;
-+
-+#ifdef DPAA2_STASHING
-+ cfg.options = cfg.options | DPNI_QUEUE_OPT_FLC;
-+#endif
-+
-+ cfg.user_ctx = (uint64_t)(dpaa2_q);
-+#ifdef DPAA2_STASHING
-+ cfg.flc_cfg.flc_type = DPNI_FLC_STASH;
-+ cfg.flc_cfg.frame_data_size = DPNI_STASH_SIZE_64B;
-+ /* Enabling Annotation stashing */
-+ cfg.options |= DPNI_FLC_STASH_FRAME_ANNOTATION;
-+ cfg.flc_cfg.options = DPNI_FLC_STASH_FRAME_ANNOTATION;
-+#endif
-+
-+ cfg.options = cfg.options | DPNI_QUEUE_OPT_TAILDROP_THRESHOLD;
-+ cfg.tail_drop_threshold = 2048;// 16 packet
-+
-+ ret = dpni_set_rx_flow(dpni, CMD_PRI_LOW, priv->token,
-+ tc_id, flow_id, &cfg);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+static int
-+dpaa2_tx_queue_setup(struct rte_eth_dev *dev,
-+ uint16_t tx_queue_id,
-+ uint16_t nb_tx_desc __rte_unused,
-+ unsigned int socket_id __rte_unused,
-+ const struct rte_eth_txconf *tx_conf __rte_unused)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct dpaa2_queue *dpaa2_q;
-+ struct fsl_mc_io *dpni = priv->hw;
-+ struct dpni_tx_flow_cfg cfg;
-+ struct dpni_tx_conf_cfg tx_conf_cfg;
-+#ifdef QBMAN_MULTI_TX
-+ struct dpni_congestion_notification_cfg cong_notif_cfg;
-+#endif
-+ uint32_t tc_idx;
-+ uint16_t flow_id = DPNI_NEW_FLOW_ID;
-+ int ret;
-+
-+ PMD_INIT_FUNC_TRACE();
-+
-+ memset(&cfg, 0, sizeof(struct dpni_tx_flow_cfg));
-+ cfg.l3_chksum_gen = 1;
-+ cfg.options |= DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN;
-+ cfg.l4_chksum_gen = 1;
-+ cfg.options = DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN;
-+ memset(&tx_conf_cfg, 0, sizeof(struct dpni_tx_conf_cfg));
-+ tx_conf_cfg.errors_only = TRUE;
-+
-+ /*
-+ if (action & DPAA2BUF_TX_CONF_REQUIRED) {
-+ cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR;
-+ cfg.use_common_tx_conf_queue =
-+ ((action & DPAA2BUF_TX_CONF_ERR_ON_COMMON_Q) ?
-+ TRUE : FALSE);
-+ tx_conf_cfg.errors_only = FALSE;
-+ }*/
-+
-+ if (priv->num_tc == 1)
-+ tc_idx = 0;
-+ else
-+ tc_idx = tx_queue_id;
-+
-+ ret = dpni_set_tx_flow(dpni, CMD_PRI_LOW, priv->token, &flow_id, &cfg);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Error in setting the tx flow:"
-+ "ErrorCode = %x\n", ret);
-+ return -1;
-+ }
-+ /*Set tx-conf and error configuration*/
-+ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token,
-+ flow_id, &tx_conf_cfg);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: "
-+ "ErrorCode = %x", ret);
-+ return -1;
-+ }
-+
-+ if (tx_queue_id == 0) {
-+ /*Set tx-conf and error configuration*/
-+ ret = dpni_set_tx_conf(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_COMMON_TX_CONF, &tx_conf_cfg);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Error in setting tx conf settings: "
-+ "ErrorCode = %x", ret);
-+ return -1;
-+ }
-+ }
-+ /*todo - add the queue id support instead of hard queue id as "0" */
-+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[tx_queue_id];
-+ dpaa2_q->tc_index = tc_idx;
-+ if (flow_id == DPNI_NEW_FLOW_ID)
-+ dpaa2_q->flow_id = 0;
-+ else
-+ dpaa2_q->flow_id = flow_id;
-+
-+#ifdef QBMAN_MULTI_TX
-+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
-+ /*Notify about congestion when the queue size is 128 frames with each \
-+ frame 64 bytes size*/
-+ cong_notif_cfg.threshold_entry = CONG_ENTER_THRESHOLD;
-+ /*Notify that the queue is not congested when the number of frames in \
-+ the queue is below this thershold.
-+ TODO: Check if this value is the optimum value for better performance*/
-+ cong_notif_cfg.threshold_exit = CONG_EXIT_THRESHOLD;
-+ cong_notif_cfg.message_ctx = 0;
-+ cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
-+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
-+ cong_notif_cfg.options = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
-+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | DPNI_CONG_OPT_COHERENT_WRITE;
-+
-+ ret = dpni_set_tx_tc_congestion_notification(dpni, CMD_PRI_LOW,
-+ priv->token,
-+ tc_idx, &cong_notif_cfg);
-+ if(ret) {
-+ PMD_DRV_LOG(ERR, "Error in setting tx congestion notification "
-+ "settings: ErrorCode = %x", ret);
-+ return -1;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+static const uint32_t *
-+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
-+{
-+ static const uint32_t ptypes[] = {
-+ /*todo -= add more types */
-+ RTE_PTYPE_L2_ETHER,
-+ RTE_PTYPE_L3_IPV4,
-+ RTE_PTYPE_L3_IPV4_EXT,
-+ RTE_PTYPE_L3_IPV6,
-+ RTE_PTYPE_L3_IPV6_EXT,
-+ RTE_PTYPE_L4_TCP,
-+ RTE_PTYPE_L4_UDP,
-+ RTE_PTYPE_L4_SCTP,
-+ RTE_PTYPE_L4_ICMP,
-+ RTE_PTYPE_UNKNOWN
-+ };
-+
-+ if (dev->rx_pkt_burst == eth_dpaa2_prefetch_rx ||
-+ dev->rx_pkt_burst == eth_dpaa2_rx)
-+ return ptypes;
-+ return NULL;
-+}
-+
-+static int
-+dpaa2_dev_start(struct rte_eth_dev *dev)
-+{
-+ struct rte_eth_dev_data *data = dev->data;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ struct dpni_queue_attr cfg;
-+ uint16_t qdid;
-+ struct dpaa2_queue *dpaa2_q;
-+ int ret, i, mask = 0;
-+
-+ PMD_INIT_FUNC_TRACE();
-+
-+ dev->data->dev_link.link_status = 1;
-+
-+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure %d in enabling dpni %d device\n",
-+ ret, priv->hw_id);
-+ return ret;
-+ }
-+
-+ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, &qdid);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
-+ return ret;
-+ }
-+ priv->qdid = qdid;
-+
-+ for (i = 0; i < data->nb_rx_queues; i++) {
-+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
-+ ret = dpni_get_rx_flow(dpni, CMD_PRI_LOW, priv->token,
-+ dpaa2_q->tc_index, dpaa2_q->flow_id, &cfg);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Error to get flow "
-+ "information Error code = %d\n", ret);
-+ return ret;
-+ }
-+ dpaa2_q->fqid = cfg.fqid;
-+ }
-+ /*
-+ * VLAN Offload Settings
-+ */
-+ if (priv->options & DPNI_OPT_VLAN_FILTER)
-+ mask = ETH_VLAN_FILTER_MASK;
-+
-+ if (priv->options & DPNI_OPT_VLAN_MANIPULATION)
-+ mask = ETH_VLAN_STRIP_MASK;
-+
-+ if (mask)
-+ dpaa2_vlan_offload_set(dev, mask);
-+
-+ return 0;
-+}
-+
-+/*********************************************************************
-+ *
-+ * This routine disables all traffic on the adapter by issuing a
-+ * global reset on the MAC.
-+ *
-+ **********************************************************************/
-+static void
-+dpaa2_dev_stop(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ int ret;
-+ struct rte_eth_link link;
-+
-+ dev->data->dev_link.link_status = 0;
-+
-+ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure in disabling dpni %d device\n", priv->hw_id);
-+ return;
-+ }
-+
-+ /* clear the recorded link status */
-+ memset(&link, 0, sizeof(link));
-+ rte_dpni_dev_atomic_write_link_status(dev, &link);
-+}
-+
-+static void
-+dpaa2_dev_close(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ int ret;
-+ struct rte_eth_link link;
-+
-+ /*Function is reverse of dpaa2_dev_init.
-+ * It does the following:
-+ * 1. Detach a DPNI from attached resources i.e. buffer pools, dpbp_id.
-+ * 2. Close the DPNI device
-+ * 3. Free the allocated reqources.
-+ */
-+
-+ /* Clean the device first */
-+ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure cleaning dpni device with"
-+ "error code %d\n", ret);
-+ return;
-+ }
-+
-+ /*Close the device at underlying layer*/
-+ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure closing dpni device with"
-+ "error code %d\n", ret);
-+ return;
-+ }
-+
-+ /*Free the allocated memory for ethernet private data and dpni*/
-+ priv->hw = NULL;
-+ free(dpni);
-+
-+ memset(&link, 0, sizeof(link));
-+ rte_dpni_dev_atomic_write_link_status(dev, &link);
-+}
-+
-+static void
-+dpaa2_dev_promiscuous_enable(
-+ struct rte_eth_dev *dev)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, TRUE);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode");
-+ return;
-+}
-+
-+static void
-+dpaa2_dev_promiscuous_disable(
-+ struct rte_eth_dev *dev)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, FALSE);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "Unable to disable promiscuous mode");
-+ return;
-+}
-+
-+static void
-+dpaa2_dev_allmulticast_enable(
-+ struct rte_eth_dev *dev)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode");
-+ return;
-+}
-+
-+static void
-+dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
-+ if (ret < 0)
-+ PMD_DRV_LOG(ERR, "Unable to enable promiscuous mode");
-+ return;
-+}
-+
-+static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return -EINVAL;
-+ }
-+
-+ /* check that mtu is within the allowed range */
-+
-+ if ((mtu < ETHER_MIN_MTU) || (frame_size > ETHER_MAX_JUMBO_FRAME_LEN))
-+ return -EINVAL;
-+
-+ /* Set the Max Rx frame length as 'mtu' +
-+ * Maximum Ethernet header length */
-+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
-+ mtu + ETH_VLAN_HLEN);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "setting the max frame length failed");
-+ return -1;
-+ }
-+ if (priv->options & DPNI_OPT_IPF) {
-+ ret = dpni_set_mtu(dpni, CMD_PRI_LOW, priv->token, mtu);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Setting the MTU failed");
-+ return -1;
-+ }
-+ }
-+
-+ PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
-+ return 0;
-+}
-+
-+static void
-+dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
-+ struct ether_addr *addr,
-+ __rte_unused uint32_t index,
-+ __rte_unused uint32_t pool)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
-+ priv->token, addr->addr_bytes);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Adding the MAC ADDR failed");
-+ }
-+
-+ return;
-+}
-+
-+static void
-+dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
-+ uint32_t index)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ struct rte_eth_dev_data *data = dev->data;
-+ struct ether_addr *macaddr;
-+
-+ macaddr = &data->mac_addrs[index];
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
-+ priv->token, macaddr->addr_bytes);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Removing the MAC ADDR failed");
-+ }
-+
-+ return;
-+}
-+
-+static void
-+dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
-+ struct ether_addr *addr)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
-+ priv->token, addr->addr_bytes);
-+
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Setting the MAC ADDR failed");
-+ }
-+
-+ return;
-+}
-+
-+int dpaa2_dev_get_mac_addr(struct rte_eth_dev *dev,
-+ struct ether_addr *addr)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return -EINVAL;
-+ }
-+
-+ ret = dpni_get_primary_mac_addr(dpni, CMD_PRI_LOW,
-+ priv->token, addr->addr_bytes);
-+
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Getting the MAC ADDR failed");
-+ }
-+
-+ return ret;
-+}
-+
-+/*int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int unicast,
-+ int multicast)
-+
-+
-+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+
-+dpni_set_errors_behavior
-+
-+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+
-+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+
-+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int *en)
-+
-+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io,
-+ uint32_t cmd_flags,
-+ uint16_t token,
-+ int en)
-+
-+*/
-+
-+static int dpaa2_timestamp_enable(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ struct dpni_buffer_layout layout;
-+ int ret;
-+
-+ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ layout.pass_timestamp = TRUE;
-+
-+ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Enabling timestamp for Rx failed with"
-+ "err code: %d", ret);
-+ return ret;
-+ }
-+
-+ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx with"
-+ "err code: %d", ret);
-+ return ret;
-+ }
-+
-+ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW,
-+ priv->token, &layout);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Enabling timestamp failed for Tx-conf with"
-+ "err code: %d", ret);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int dpaa2_timestamp_disable(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ struct dpni_buffer_layout layout;
-+ int ret;
-+
-+ layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ layout.pass_timestamp = FALSE;
-+
-+ ret = dpni_set_rx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Rx with"
-+ "err code: %d", ret);
-+ return ret;
-+ }
-+
-+ ret = dpni_set_tx_buffer_layout(dpni, CMD_PRI_LOW, priv->token, &layout);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx with"
-+ "err code: %d", ret);
-+ return ret;
-+ }
-+
-+ ret = dpni_set_tx_conf_buffer_layout(dpni, CMD_PRI_LOW,
-+ priv->token, &layout);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Disabling timestamp failed for Tx-conf with"
-+ "err code: %d", ret);
-+ return ret;
-+ }
-+
-+ return ret;
-+}
-+
-+/* return 0 means link status changed, -1 means not changed */
-+static int
-+dpaa2_dev_get_link_info(struct rte_eth_dev *dev,
-+ int wait_to_complete __rte_unused)
-+{
-+ int ret;
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+ struct rte_eth_link link, old;
-+ struct dpni_link_state state = {0};
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return 0;
-+ }
-+ memset(&old, 0, sizeof(old));
-+ rte_dpni_dev_atomic_read_link_status(dev, &old);
-+
-+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
-+ if (ret < 0) {
-+ PMD_DRV_LOG(ERR, "dpni_get_link_state");
-+ return 0;
-+ }
-+
-+ if (state.up == 0) {
-+ rte_dpni_dev_atomic_write_link_status(dev, &link);
-+ if (state.up == old.link_status)
-+ return -1;
-+ return 0;
-+ }
-+ link.link_status = state.up;
-+ link.link_speed = state.rate;
-+
-+ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
-+ else
-+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
-+
-+ rte_dpni_dev_atomic_write_link_status(dev, &link);
-+
-+ if (link.link_status == old.link_status)
-+ return -1;
-+
-+ return 0;
-+}
-+
-+static
-+void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
-+ struct rte_eth_stats *stats)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ int32_t retcode;
-+ uint64_t value;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ if (!stats) {
-+ PMD_DRV_LOG(ERR, "stats is NULL");
-+ return;
-+ }
-+
-+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_FRAME, &value);
-+ if (retcode)
-+ goto error;
-+ stats->ipackets = value;
-+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_BYTE, &value);
-+ if (retcode)
-+ goto error;
-+ stats->ibytes = value;
-+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_FRAME_DROP, &value);
-+ if (retcode)
-+ goto error;
-+ stats->ierrors = value;
-+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_FRAME_DISCARD, &value);
-+ if (retcode)
-+ goto error;
-+ stats->ierrors = stats->ierrors + value;
-+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_EGR_FRAME, &value);
-+ if (retcode)
-+ goto error;
-+ stats->opackets = value;
-+ dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_EGR_BYTE, &value);
-+ if (retcode)
-+ goto error;
-+ stats->obytes = value;
-+ retcode = dpni_get_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_EGR_FRAME_DISCARD, &value);
-+ if (retcode)
-+ goto error;
-+ stats->oerrors = value;
-+
-+ return;
-+
-+error:
-+ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode);
-+ return;
-+};
-+
-+static
-+void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
-+{
-+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
-+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-+
-+ int32_t retcode;
-+
-+ if (dpni == NULL) {
-+ PMD_DRV_LOG(ERR, "dpni is NULL");
-+ return;
-+ }
-+
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_FRAME, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_BYTE, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_BCAST_FRAME, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_BCAST_BYTES, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_MCAST_FRAME, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_MCAST_BYTE, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_FRAME_DROP, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_ING_FRAME_DISCARD, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_EGR_FRAME, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_EGR_BYTE, 0);
-+ if (retcode)
-+ goto error;
-+ retcode = dpni_set_counter(dpni, CMD_PRI_LOW, priv->token,
-+ DPNI_CNT_EGR_FRAME_DISCARD, 0);
-+ if (retcode)
-+ goto error;
-+
-+ return;
-+
-+error:
-+ PMD_DRV_LOG(ERR, "Operation not completed:Error Code = %d\n", retcode);
-+ return;
-+};
-+
-+static struct eth_dev_ops ops = {
-+ .dev_configure = dpaa2_eth_dev_configure,
-+ .dev_start = dpaa2_dev_start,
-+ .dev_stop = dpaa2_dev_stop,
-+ .dev_close = dpaa2_dev_close,
-+ .promiscuous_enable = dpaa2_dev_promiscuous_enable,
-+ .promiscuous_disable = dpaa2_dev_promiscuous_disable,
-+ .allmulticast_enable = dpaa2_dev_allmulticast_enable,
-+ .allmulticast_disable = dpaa2_dev_allmulticast_disable,
-+ /* .dev_set_link_up = ixgbe_dev_set_link_up, */
-+ /* .dev_set_link_down = ixgbe_dev_set_link_down, */
-+ .link_update = dpaa2_dev_get_link_info,
-+ .stats_get = dpaa2_dev_stats_get,
-+ /* .xstats_get = ixgbe_dev_xstats_get, */
-+ .stats_reset = dpaa2_dev_stats_reset,
-+ /* .xstats_reset = ixgbe_dev_xstats_reset, */
-+ /* .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, */
-+ .dev_infos_get = dpaa2_eth_dev_info,
-+ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
-+ .mtu_set = dpaa2_dev_mtu_set,
-+ .vlan_filter_set = dpaa2_vlan_filter_set,
-+/* .vlan_tpid_set = i40e_vlan_tpid_set, */
-+ .vlan_offload_set = dpaa2_vlan_offload_set,
-+/* .vlan_strip_queue_set = i40e_vlan_strip_queue_set, */
-+/* .vlan_pvid_set = i40e_vlan_pvid_set, */
-+/* .rx_queue_start = i40e_dev_rx_queue_start, */
-+/* .rx_queue_stop = i40e_dev_rx_queue_stop, */
-+/* .tx_queue_start = i40e_dev_tx_queue_start, */
-+/* .tx_queue_stop = i40e_dev_tx_queue_stop, */
-+ .rx_queue_setup = dpaa2_rx_queue_setup,
-+/* .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, */
-+/* .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, */
-+/* .rx_queue_release = i40e_dev_rx_queue_release, */
-+/* .rx_queue_count = i40e_dev_rx_queue_count, */
-+ .tx_queue_setup = dpaa2_tx_queue_setup,
-+/* .tx_queue_release = i40e_dev_tx_queue_release, */
-+/* .dev_led_on = i40e_dev_led_on, */
-+/* .dev_led_off = i40e_dev_led_off, */
-+/* .flow_ctrl_get = i40e_flow_ctrl_get, */
-+/* .flow_ctrl_set = i40e_flow_ctrl_set, */
-+/* .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set, */
-+ .mac_addr_add = dpaa2_dev_add_mac_addr,
-+ .mac_addr_remove = dpaa2_dev_remove_mac_addr,
-+/* .reta_update = i40e_dev_rss_reta_update, */
-+/* .reta_query = i40e_dev_rss_reta_query, */
-+/* .rss_hash_update = i40e_dev_rss_hash_update, */
-+/* .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, */
-+/* .filter_ctrl = i40e_dev_filter_ctrl, */
-+/* .rxq_info_get = i40e_rxq_info_get, */
-+/* .txq_info_get = i40e_txq_info_get, */
-+/* .mirror_rule_set = i40e_mirror_rule_set, */
-+/* .mirror_rule_reset = i40e_mirror_rule_reset, */
-+ .timesync_enable = dpaa2_timestamp_enable,
-+ .timesync_disable = dpaa2_timestamp_disable,
-+/* .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, */
-+/* .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp, */
-+/* .get_dcb_info = i40e_dev_get_dcb_info, */
-+/* .timesync_adjust_time = i40e_timesync_adjust_time, */
-+/* .timesync_read_time = i40e_timesync_read_time, */
-+/* .timesync_write_time = i40e_timesync_write_time, */
-+/* .get_reg_length = i40e_get_reg_length, */
-+/* .get_reg = i40e_get_regs, */
-+/* .get_eeprom_length = i40e_get_eeprom_length, */
-+/* .get_eeprom = i40e_get_eeprom, */
-+ .mac_addr_set = dpaa2_dev_set_mac_addr,
-+};
-+
-+static int
-+dpaa2_dev_init(struct rte_eth_dev *eth_dev)
-+{
-+ struct rte_eth_dev_data *data = eth_dev->data;
-+ struct fsl_mc_io *dpni_dev;
-+ struct dpni_attr attr;
-+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
-+ struct dpni_buffer_layout layout;
-+ int i, ret, hw_id = eth_dev->pci_dev->addr.devid;
-+ struct dpni_extended_cfg *ext_cfg = NULL;
-+ int tot_size;
-+
-+ PMD_INIT_FUNC_TRACE();
-+
-+ dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
-+ if (!dpni_dev) {
-+ PMD_DRV_LOG(ERR, "malloc failed for dpni device\n");
-+ return -1;
-+ }
-+
-+ dpni_dev->regs = mcp_ptr_list[0];
-+ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure in opening dpni@%d device with"
-+ "error code %d\n", hw_id, ret);
-+ return -1;
-+ }
-+
-+ /* Clean the device first */
-+ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure cleaning dpni@%d device with"
-+ "error code %d\n", hw_id, ret);
-+ return -1;
-+ }
-+
-+ ext_cfg = (struct dpni_extended_cfg *)rte_malloc(NULL, 256,
-+ RTE_CACHE_LINE_SIZE);
-+ if (!ext_cfg) {
-+ PMD_DRV_LOG(ERR, "No data memory\n");
-+ return -1;
-+ }
-+ attr.ext_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(ext_cfg));
-+
-+ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "Failure in getting dpni@%d attribute, "
-+ "error code %d\n", hw_id, ret);
-+ return -1;
-+ }
-+
-+ priv->num_tc = attr.max_tcs;
-+ for (i = 0; i < attr.max_tcs; i++) {
-+ priv->num_dist_per_tc[i] = ext_cfg->tc_cfg[i].max_dist;
-+ priv->nb_rx_queues += priv->num_dist_per_tc[i];
-+ /* todo - currently we only support one TC index in RX side */
-+ break;
-+ }
-+ if (attr.max_tcs == 1)
-+ priv->nb_tx_queues = attr.max_senders;
-+ else
-+ priv->nb_tx_queues = attr.max_tcs;
-+ PMD_DRV_LOG(INFO, "num_tc %d\n", priv->num_tc);
-+ PMD_DRV_LOG(INFO, "nb_rx_queues %d\n", priv->nb_rx_queues);
-+
-+ eth_dev->data->nb_rx_queues = priv->nb_rx_queues;
-+ eth_dev->data->nb_tx_queues = priv->nb_tx_queues;
-+
-+ priv->hw = dpni_dev;
-+ priv->hw_id = hw_id;
-+ priv->options = attr.options;
-+
-+ priv->max_unicast_filters = attr.max_unicast_filters;
-+ priv->max_multicast_filters = attr.max_multicast_filters;
-+
-+ if (attr.options & DPNI_OPT_VLAN_FILTER)
-+ priv->max_vlan_filters = attr.max_vlan_filters;
-+ else
-+ priv->max_vlan_filters = 0;
-+
-+ ret = dpaa2_alloc_rx_tx_queues(eth_dev);
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
-+ return -1;
-+ }
-+
-+ data->mac_addrs = (struct ether_addr *)malloc(sizeof(struct ether_addr));
-+
-+ /* Allocate memory for storing MAC addresses */
-+ eth_dev->data->mac_addrs = rte_zmalloc("dpni",
-+ ETHER_ADDR_LEN * attr.max_unicast_filters, 0);
-+ if (eth_dev->data->mac_addrs == NULL) {
-+ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to "
-+ "store MAC addresses",
-+ ETHER_ADDR_LEN * attr.max_unicast_filters);
-+ return -ENOMEM;
-+ }
-+
-+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
-+ priv->token,
-+ (uint8_t *)(data->mac_addrs[0].addr_bytes));
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "DPNI get mac address failed:"
-+ " Error Code = %d\n", ret);
-+ return -1;
-+ }
-+
-+ PMD_DRV_LOG(INFO, "Adding Broadcast Address...\n");
-+ memset(data->mac_addrs[1].addr_bytes, 0xff, ETH_ADDR_LEN);
-+ ret = dpni_add_mac_addr(dpni_dev, CMD_PRI_LOW,
-+ priv->token,
-+ (uint8_t *)(data->mac_addrs[1].addr_bytes));
-+ if (ret) {
-+ PMD_DRV_LOG(ERR, "DPNI set broadcast mac address failed:"
-+ " Error Code = %0x\n", ret);
-+ return -1;
-+ }
-+
-+ /* ... rx buffer layout ... */
-+ /*Check alignment for buffer layouts first*/
-+ tot_size = DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION + DPAA2_RES/* dummy */ +
-+ 128 /*RTE_MUF */ + (128+DPAA2_RES)/*VLIB*/ + RTE_PKTMBUF_HEADROOM;
-+ tot_size = DPAA2_ALIGN_ROUNDUP(tot_size,
-+ DPAA2_PACKET_LAYOUT_ALIGN);
-+
-+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
-+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-+
-+ layout.pass_frame_status = 1;
-+ layout.data_head_room =
-+ (tot_size - (DPAA2_FD_PTA_SIZE + DPAA2_MBUF_HW_ANNOTATION));
-+ layout.private_data_size = DPAA2_FD_PTA_SIZE;
-+ layout.pass_parser_result = 1;
-+
-+ ret = dpni_set_rx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
-+ &layout);
-+ if (ret) {
-+ printf("Err(%d) in setting rx buffer layout\n", ret);
-+ return -1;
-+ }
-+
-+ /* ... tx buffer layout ... */
-+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
-+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
-+ layout.pass_frame_status = 1;
-+ ret = dpni_set_tx_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout);
-+ if (ret) {
-+ printf("Error (%d) in setting tx buffer layout\n", ret);
-+ return -1;
-+ }
-+
-+ /* ... tx-conf and error buffer layout ... */
-+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
-+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
-+ layout.pass_frame_status = 1;
-+ ret = dpni_set_tx_conf_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, &layout);
-+ if (ret) {
-+ printf("Error (%d) in setting tx-conf buffer layout\n", ret);
-+ return -1;
-+ }
-+
-+ /* TODO - Set the MTU if required */
-+
-+ eth_dev->dev_ops = &ops;
-+ eth_dev->rx_pkt_burst = eth_dpaa2_prefetch_rx;/*eth_dpaa2_rx;*/
-+ eth_dev->tx_pkt_burst = eth_dpaa2_tx;
-+
-+ rte_free(ext_cfg);
-+
-+ return 0;
-+}
-+
-+static struct eth_driver rte_dpaa2_dpni = {
-+ {
-+ .name = "rte_dpaa2_dpni",
-+ .id_table = pci_id_dpaa2_map,
-+ },
-+ .eth_dev_init = dpaa2_dev_init,
-+ .dev_private_size = sizeof(struct dpaa2_dev_priv),
-+};
-+
-+static int
-+rte_pmd_dpaa2_devinit(
-+ const char *name __rte_unused,
-+ const char *params __rte_unused)
-+{
-+ printf("Initializing dpaa2_pmd for %s\n", name);
-+ rte_eth_driver_register(&rte_dpaa2_dpni);
-+
-+ return 0;
-+}
-+
-+static struct rte_driver pmd_dpaa2_drv = {
-+ .name = "dpaa2_pmd",
-+ .type = PMD_PDEV,
-+ .init = rte_pmd_dpaa2_devinit,
-+};
-+
-+PMD_REGISTER_DRIVER(pmd_dpaa2_drv);
-diff --git a/drivers/net/dpaa2/rte_eth_dpni_annot.h b/drivers/net/dpaa2/rte_eth_dpni_annot.h
-new file mode 100644
-index 0000000..00fac9b
---- /dev/null
-+++ b/drivers/net/dpaa2/rte_eth_dpni_annot.h
-@@ -0,0 +1,311 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright (c) 2014 Freescale Semiconductor, Inc. All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+/**
-+ * @file
-+ *
-+ * DPNI packet parse results - implementation internal
-+ */
-+
-+#ifndef RTE_ETH_DPNI_ANNOT_H_
-+#define RTE_ETH_DPNI_ANNOT_H_
-+
-+#ifdef __cplusplus
-+extern "C" {
-+#endif
-+
-+/* Annotation valid bits in FD FRC */
-+#define DPAA2_FD_FRC_FASV 0x8000
-+#define DPAA2_FD_FRC_FAEADV 0x4000
-+#define DPAA2_FD_FRC_FAPRV 0x2000
-+#define DPAA2_FD_FRC_FAIADV 0x1000
-+#define DPAA2_FD_FRC_FASWOV 0x0800
-+#define DPAA2_FD_FRC_FAICFDV 0x0400
-+
-+/* Annotation bits in FD CTRL */
-+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
-+#define DPAA2_FD_CTRL_PTA 0x00800000
-+#define DPAA2_FD_CTRL_PTV1 0x00400000
-+
-+/* Frame annotation status */
-+struct dpaa2_fas {
-+ uint8_t reserved;
-+ uint8_t ppid;
-+ __le16 ifpid;
-+ __le32 status;
-+} __packed;
-+
-+/**
-+ * Internal Packet annotation header
-+ */
-+struct pkt_annotation {
-+ /**< word1: Frame Annotation Status (8 bytes)*/
-+ uint64_t word1;
-+ /**< word2: Time Stamp (8 bytes)*/
-+ uint64_t word2;
-+ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/
-+ uint64_t word3;
-+ /**< word4: Frame Annotation Flags-FAF (8 bytes) */
-+ uint64_t word4;
-+ /**< word5:
-+ ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset +
-+ LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n +
-+ LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
-+ */
-+ uint64_t word5;
-+ /**< word6:
-+ PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1
-+ + IPOffset_norMInEncapO + GREOffset + L4Offset +
-+ GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
-+ */
-+ uint64_t word6;
-+ /**< word7:
-+ RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + IPv6FragOffset +
-+ GrossRunningSum + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes)
-+ */
-+ uint64_t word7;
-+ /**< word8:
-+ ParseErrorcode + Soft Parsing Context (1 + 7 bytes)
-+ */
-+ uint64_t word8; /**< Layer 4 length */
-+};
-+
-+/**
-+ * Internal Macros to get/set Packet annotation header
-+ */
-+
-+/** General Macro to define a particular bit position*/
-+#define BIT_POS(x) ((uint64_t)1 << ((x)))
-+/** Set a bit in the variable */
-+#define BIT_SET_AT_POS(var, pos) (var |= pos)
-+/** Reset the bit in the variable */
-+#define BIT_RESET_AT_POS(var, pos) (var &= ~(pos))
-+/** Check the bit is set in the variable */
-+#define BIT_ISSET_AT_POS(var, pos) ((var & pos) ? 1 : 0)
-+/**
-+ * Macrso to define bit position in word3
-+ */
-+#define NEXT_HDR(var) ((uint64_t)var & 0xFFFF000000000000)
-+#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16)
-+#define FAF_EXTN_RESERVED(var) ((uint64_t)var & 0x00007FFF00000000)
-+#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)var & 0x00000000FF000000)
-+#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23)
-+#define PARSING_ERROR BIT_POS(22)
-+#define L2_ETH_MAC_PRESENT BIT_POS(21)
-+#define L2_ETH_MAC_UNICAST BIT_POS(20)
-+#define L2_ETH_MAC_MULTICAST BIT_POS(19)
-+#define L2_ETH_MAC_BROADCAST BIT_POS(18)
-+#define L2_ETH_FRAME_IS_BPDU BIT_POS(17)
-+#define L2_ETH_FCOE_PRESENT BIT_POS(16)
-+#define L2_ETH_FIP_PRESENT BIT_POS(15)
-+#define L2_ETH_PARSING_ERROR BIT_POS(14)
-+#define L2_LLC_SNAP_PRESENT BIT_POS(13)
-+#define L2_UNKNOWN_LLC_OUI BIT_POS(12)
-+#define L2_LLC_SNAP_ERROR BIT_POS(11)
-+#define L2_VLAN_1_PRESENT BIT_POS(10)
-+#define L2_VLAN_N_PRESENT BIT_POS(9)
-+#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8)
-+#define L2_VLAN_PARSING_ERROR BIT_POS(7)
-+#define L2_PPPOE_PPP_PRESENT BIT_POS(6)
-+#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5)
-+#define L2_MPLS_1_PRESENT BIT_POS(4)
-+#define L2_MPLS_N_PRESENT BIT_POS(3)
-+#define L2_MPLS_PARSING_ERROR BIT_POS(2)
-+#define L2_ARP_PRESENT BIT_POS(1)
-+#define L2_ARP_PARSING_ERROR BIT_POS(0)
-+/**
-+ * Macrso to define bit position in word4
-+ */
-+#define L2_UNKNOWN_PROTOCOL BIT_POS(63)
-+#define L2_SOFT_PARSING_ERROR BIT_POS(62)
-+#define L3_IPV4_1_PRESENT BIT_POS(61)
-+#define L3_IPV4_1_UNICAST BIT_POS(60)
-+#define L3_IPV4_1_MULTICAST BIT_POS(59)
-+#define L3_IPV4_1_BROADCAST BIT_POS(58)
-+#define L3_IPV4_N_PRESENT BIT_POS(57)
-+#define L3_IPV4_N_UNICAST BIT_POS(56)
-+#define L3_IPV4_N_MULTICAST BIT_POS(55)
-+#define L3_IPV4_N_BROADCAST BIT_POS(54)
-+#define L3_IPV6_1_PRESENT BIT_POS(53)
-+#define L3_IPV6_1_UNICAST BIT_POS(52)
-+#define L3_IPV6_1_MULTICAST BIT_POS(51)
-+#define L3_IPV6_N_PRESENT BIT_POS(50)
-+#define L3_IPV6_N_UNICAST BIT_POS(49)
-+#define L3_IPV6_N_MULTICAST BIT_POS(48)
-+#define L3_IP_1_OPT_PRESENT BIT_POS(47)
-+#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46)
-+#define L3_IP_1_MORE_FRAGMENT BIT_POS(45)
-+#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44)
-+#define L3_IP_1_PARSING_ERROR BIT_POS(43)
-+#define L3_IP_N_OPT_PRESENT BIT_POS(42)
-+#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41)
-+#define L3_IP_N_MORE_FRAGMENT BIT_POS(40)
-+#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39)
-+#define L3_PROTO_ICMP_PRESENT BIT_POS(38)
-+#define L3_PROTO_IGMP_PRESENT BIT_POS(37)
-+#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36)
-+#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35)
-+#define L3_IP_N_PARSING_ERROR BIT_POS(34)
-+#define L3_MIN_ENCAP_PRESENT BIT_POS(33)
-+#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32)
-+#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31)
-+#define L3_PROTO_GRE_PRESENT BIT_POS(30)
-+#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29)
-+#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28)
-+#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27)
-+#define L3_SOFT_PARSING_ERROR BIT_POS(26)
-+#define L3_PROTO_UDP_PRESENT BIT_POS(25)
-+#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24)
-+#define L3_PROTO_TCP_PRESENT BIT_POS(23)
-+#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22)
-+#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21)
-+#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20)
-+#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19)
-+#define L3_PROTO_IPSEC_PRESENT BIT_POS(18)
-+#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17)
-+#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16)
-+#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15)
-+#define L3_PROTO_SCTP_PRESENT BIT_POS(14)
-+#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13)
-+#define L3_PROTO_DCCP_PRESENT BIT_POS(12)
-+#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11)
-+#define L4_UNKNOWN_PROTOCOL BIT_POS(10)
-+#define L4_SOFT_PARSING_ERROR BIT_POS(9)
-+#define L3_PROTO_GTP_PRESENT BIT_POS(8)
-+#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7)
-+#define L3_PROTO_ESP_PRESENT BIT_POS(6)
-+#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5)
-+#define L3_PROTO_ISCSI_PRESENT BIT_POS(4)
-+#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3)
-+#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2)
-+#define L5_SOFT_PARSING_ERROR BIT_POS(1)
-+#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0)
-+
-+/**
-+ * Macros to get values in word5
-+ */
-+#define SHIM_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000)
-+#define SHIM_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000)
-+#define IP_PID_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000)
-+#define ETH_OFFSET(var) ((uint64_t)var & 0x000000FF00000000)
-+#define LLC_SNAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000)
-+#define VLAN_TCI_OFFSET_1(var) ((uint64_t)var & 0x0000000000FF0000)
-+#define VLAN_TCI_OFFSET_N(var) ((uint64_t)var & 0x000000000000FF00)
-+#define LAST_ETYPE_OFFSET(var) ((uint64_t)var & 0x00000000000000FF)
-+
-+/**
-+ * Macros to get values in word6
-+ */
-+#define PPPOE_OFFSET(var) ((uint64_t)var & 0xFF00000000000000)
-+#define MPLS_OFFSET_1(var) ((uint64_t)var & 0x00FF000000000000)
-+#define MPLS_OFFSET_N(var) ((uint64_t)var & 0x0000FF0000000000)
-+#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)var & 0x000000FF00000000)
-+#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)var & 0x00000000FF000000)
-+#define GRE_OFFSET(var) ((uint64_t)var & 0x0000000000FF0000)
-+#define L4_OFFSET(var) ((uint64_t)var & 0x000000000000FF00)
-+#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)var & 0x00000000000000FF)
-+
-+/**
-+ * Macros to get values in word7
-+ */
-+#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)var & 0xFF00000000000000)
-+#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)var & 0x00FF000000000000)
-+#define NEXT_HDR_OFFSET(var) ((uint64_t)var & 0x0000FF0000000000)
-+#define IPV6_FRAG_OFFSET(var) ((uint64_t)var & 0x000000FF00000000)
-+#define GROSS_RUNNING_SUM(var) ((uint64_t)var & 0x00000000FFFF0000)
-+#define RUNNING_SUM(var) ((uint64_t)var & 0x000000000000FFFF)
-+
-+/**
-+ * Macros to get values in word8
-+ */
-+#define PARSE_ERROR_CODE(var) ((uint64_t)var & 0xFF00000000000000)
-+#define SOFT_PARSING_CONTEXT(var) ((uint64_t)var & 0x00FFFFFFFFFFFFFF)
-+
-+/* Debug frame, otherwise supposed to be discarded */
-+#define DPAA2_ETH_FAS_DISC 0x80000000
-+/* MACSEC frame */
-+#define DPAA2_ETH_FAS_MS 0x40000000
-+#define DPAA2_ETH_FAS_PTP 0x08000000
-+/* Ethernet multicast frame */
-+#define DPAA2_ETH_FAS_MC 0x04000000
-+/* Ethernet broadcast frame */
-+#define DPAA2_ETH_FAS_BC 0x02000000
-+#define DPAA2_ETH_FAS_KSE 0x00040000
-+#define DPAA2_ETH_FAS_EOFHE 0x00020000
-+#define DPAA2_ETH_FAS_MNLE 0x00010000
-+#define DPAA2_ETH_FAS_TIDE 0x00008000
-+#define DPAA2_ETH_FAS_PIEE 0x00004000
-+/* Frame length error */
-+#define DPAA2_ETH_FAS_FLE 0x00002000
-+/* Frame physical error; our favourite pastime */
-+#define DPAA2_ETH_FAS_FPE 0x00001000
-+#define DPAA2_ETH_FAS_PTE 0x00000080
-+#define DPAA2_ETH_FAS_ISP 0x00000040
-+#define DPAA2_ETH_FAS_PHE 0x00000020
-+#define DPAA2_ETH_FAS_BLE 0x00000010
-+/* L3 csum validation performed */
-+#define DPAA2_ETH_FAS_L3CV 0x00000008
-+/* L3 csum error */
-+#define DPAA2_ETH_FAS_L3CE 0x00000004
-+/* L4 csum validation performed */
-+#define DPAA2_ETH_FAS_L4CV 0x00000002
-+/* L4 csum error */
-+#define DPAA2_ETH_FAS_L4CE 0x00000001
-+
-+/* These bits always signal errors */
-+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_ETH_FAS_KSE | \
-+ DPAA2_ETH_FAS_EOFHE | \
-+ DPAA2_ETH_FAS_MNLE | \
-+ DPAA2_ETH_FAS_TIDE | \
-+ DPAA2_ETH_FAS_PIEE | \
-+ DPAA2_ETH_FAS_FLE | \
-+ DPAA2_ETH_FAS_FPE | \
-+ DPAA2_ETH_FAS_PTE | \
-+ DPAA2_ETH_FAS_ISP | \
-+ DPAA2_ETH_FAS_PHE | \
-+ DPAA2_ETH_FAS_BLE | \
-+ DPAA2_ETH_FAS_L3CE | \
-+ DPAA2_ETH_FAS_L4CE)
-+/* Unsupported features in the ingress */
-+#define DPAA2_ETH_RX_UNSUPP_MASK DPAA2_ETH_FAS_MS
-+/* Tx errors */
-+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_ETH_FAS_KSE | \
-+ DPAA2_ETH_FAS_EOFHE | \
-+ DPAA2_ETH_FAS_MNLE | \
-+ DPAA2_ETH_FAS_TIDE)
-+
-+
-+#ifdef __cplusplus
-+}
-+#endif
-+
-+#endif
-diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
-new file mode 100644
-index 0000000..349c6e1
---- /dev/null
-+++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
-@@ -0,0 +1,4 @@
-+DPDK_16.04 {
-+
-+ local: *;
-+};
-diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
-index 2342fa1..8f27836 100644
---- a/lib/librte_eal/common/eal_private.h
-+++ b/lib/librte_eal/common/eal_private.h
-@@ -328,4 +328,16 @@ int rte_eal_hugepage_init(void);
- */
- int rte_eal_hugepage_attach(void);
-
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+/**
-+ * Initialize any soc init related functions if any before thread creation
-+ */
-+int rte_eal_soc_pre_init(void);
-+
-+/**
-+ * Initialize any soc init related functions if any after thread creation
-+ */
-+int rte_eal_soc_post_init(void);
-+#endif
-+
- #endif /* _EAL_PRIVATE_H_ */
-diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
-index e109361..abcd02c 100644
---- a/lib/librte_eal/linuxapp/eal/Makefile
-+++ b/lib/librte_eal/linuxapp/eal/Makefile
-@@ -47,6 +47,13 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
- CFLAGS += -I$(RTE_SDK)/lib/librte_ring
- CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
- CFLAGS += -I$(RTE_SDK)/lib/librte_ivshmem
-+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-+CFLAGS += -I$(RTE_SDK)/lib/librte_mbuf
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/qbman/include/drivers
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
-+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/driver
-+endif
- CFLAGS += $(WERROR_FLAGS) -O3
-
- LDLIBS += -ldl
-@@ -72,6 +79,10 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c
- SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c
- SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_interrupts.c
- SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_alarm.c
-+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_soc.c
-+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_fsl_mc.c
-+endif
- ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y)
- SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_ivshmem.c
- endif
-diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
-index 8aafd51..b2327c7 100644
---- a/lib/librte_eal/linuxapp/eal/eal.c
-+++ b/lib/librte_eal/linuxapp/eal/eal.c
-@@ -805,6 +805,11 @@ rte_eal_init(int argc, char **argv)
- if (rte_eal_tailqs_init() < 0)
- rte_panic("Cannot init tail queues for objects\n");
-
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (rte_eal_soc_pre_init() < 0)
-+ rte_panic("Cannot pre init soc\n");
-+#endif
-+
- #ifdef RTE_LIBRTE_IVSHMEM
- if (rte_eal_ivshmem_obj_init() < 0)
- rte_panic("Cannot init IVSHMEM objects\n");
-@@ -874,6 +879,11 @@ rte_eal_init(int argc, char **argv)
- rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
- rte_eal_mp_wait_lcore();
-
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (rte_eal_soc_post_init() < 0)
-+ rte_panic("Cannot post init soc\n");
-+#endif
-+
- /* Probe & Initialize PCI devices */
- if (rte_eal_pci_probe())
- rte_panic("Cannot probe PCI\n");
-diff --git a/lib/librte_eal/linuxapp/eal/eal_soc.c b/lib/librte_eal/linuxapp/eal/eal_soc.c
-new file mode 100644
-index 0000000..32ae172
---- /dev/null
-+++ b/lib/librte_eal/linuxapp/eal/eal_soc.c
-@@ -0,0 +1,84 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor, Inc or the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <unistd.h>
-+#include <limits.h>
-+#include <string.h>
-+#include <dirent.h>
-+
-+#include <rte_log.h>
-+#include <rte_eal.h>
-+#include <rte_lcore.h>
-+#include <rte_common.h>
-+#include <rte_string_fns.h>
-+#include <rte_debug.h>
-+#include "eal_private.h"
-+
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+#include "eal_vfio_fsl_mc.h"
-+#endif
-+
-+
-+#if (defined RTE_LIBRTE_DPAA_PMD)
-+extern int usdpaa_pre_rte_eal_init(void);
-+extern int usdpaa_post_rte_eal_init(void);
-+#endif
-+
-+
-+/* Initialize any soc init related functions if any before thread creation*/
-+int
-+rte_eal_soc_pre_init(void)
-+{
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (rte_eal_dpaa2_init() < 0)
-+ RTE_LOG(WARNING, EAL, "Cannot init FSL_MC SCAN \n");
-+#endif
-+#if (defined RTE_LIBRTE_DPAA_PMD)
-+ if (usdpaa_pre_rte_eal_init())
-+ RTE_LOG(WARNING, EAL, "Cannot init FSL_DPAA \n");
-+#endif
-+ return 0;
-+}
-+
-+/* Initialize any soc init related functions if any after thread creation*/
-+int
-+rte_eal_soc_post_init(void)
-+{
-+#if (defined RTE_LIBRTE_DPAA_PMD)
-+ if (usdpaa_post_rte_eal_init()) {
-+ RTE_LOG(WARNING, EAL, "dpaa1: usdpaa portal init failed\n");
-+ }
-+#endif
-+ return 0;
-+}
-+
-diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c
-new file mode 100644
-index 0000000..c71d8d6
---- /dev/null
-+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.c
-@@ -0,0 +1,653 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#include <unistd.h>
-+#include <stdio.h>
-+#include <sys/types.h>
-+#include <string.h>
-+#include <stdlib.h>
-+#include <fcntl.h>
-+#include <errno.h>
-+#include <sys/ioctl.h>
-+#include <sys/stat.h>
-+#include <sys/types.h>
-+#include <sys/mman.h>
-+#include <sys/vfs.h>
-+#include <libgen.h>
-+#include <dirent.h>
-+
-+#include "rte_pci.h"
-+#include "eal_vfio.h"
-+
-+#include <rte_log.h>
-+
-+#include "eal_vfio_fsl_mc.h"
-+
-+#include "rte_pci_dev_ids.h"
-+#include "eal_filesystem.h"
-+#include "eal_private.h"
-+
-+#ifndef VFIO_MAX_GROUPS
-+#define VFIO_MAX_GROUPS 64
-+#endif
-+
-+//#define DPAA2_STAGE2_STASHING
-+
-+/** Pathname of FSL-MC devices directory. */
-+#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
-+
-+/* Number of VFIO containers & groups with in */
-+static struct vfio_group vfio_groups[VFIO_MAX_GRP];
-+static struct vfio_container vfio_containers[VFIO_MAX_CONTAINERS];
-+static char *ls2bus_container;
-+static int container_device_fd;
-+static uint32_t *msi_intr_vaddr;
-+void *(*mcp_ptr_list);
-+static uint32_t mcp_id;
-+
-+static int vfio_connect_container(struct vfio_group *vfio_group)
-+{
-+ struct vfio_container *container;
-+ int i, fd, ret;
-+
-+ /* Try connecting to vfio container already created */
-+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
-+ container = &vfio_containers[i];
-+ if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
-+ RTE_LOG(ERR, EAL, "Container pre-exists with FD[0x%x]"
-+ " for this group\n", container->fd);
-+ vfio_group->container = container;
-+ return 0;
-+ }
-+ }
-+
-+ /* Opens main vfio file descriptor which represents the "container" */
-+ fd = open("/dev/vfio/vfio", O_RDWR);
-+ if (fd < 0) {
-+ RTE_LOG(ERR, EAL, "vfio: failed to open /dev/vfio/vfio\n");
-+ return -errno;
-+ }
-+
-+ ret = ioctl(fd, VFIO_GET_API_VERSION);
-+ if (ret != VFIO_API_VERSION) {
-+ RTE_LOG(ERR, EAL, "vfio: supported vfio version: %d, "
-+ "reported version: %d", VFIO_API_VERSION, ret);
-+ close(fd);
-+ return -EINVAL;
-+ }
-+#ifndef DPAA2_STAGE2_STASHING
-+ /* Check whether support for SMMU type IOMMU prresent or not */
-+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
-+ /* Connect group to container */
-+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n");
-+ close(fd);
-+ return -errno;
-+ }
-+
-+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, "vfio: failed to set iommu for container:\n");
-+ close(fd);
-+ return -errno;
-+ }
-+ } else {
-+ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU\n");
-+ close(fd);
-+ return -EINVAL;
-+ }
-+#else
-+ /* Check whether support for SMMU type IOMMU stage 2 present or not */
-+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_NESTING_IOMMU)) {
-+ /* Connect group to container */
-+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, "vfio: failed to set group container:\n");
-+ close(fd);
-+ return -errno;
-+ }
-+
-+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_NESTING_IOMMU);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, "vfio: failed to set iommu-2 for container:\n");
-+ close(fd);
-+ return -errno;
-+ }
-+ } else {
-+ RTE_LOG(ERR, EAL, "vfio error: No supported IOMMU-2\n");
-+ close(fd);
-+ return -EINVAL;
-+ }
-+#endif
-+ container = NULL;
-+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
-+ if (vfio_containers[i].used)
-+ continue;
-+ RTE_LOG(ERR, EAL, "DPAA2-Unused container at index %d\n", i);
-+ container = &vfio_containers[i];
-+ }
-+ if (!container) {
-+ RTE_LOG(ERR, EAL, "vfio error: No Free Container Found\n");
-+ close(fd);
-+ return -ENOMEM;
-+ }
-+
-+ container->used = 1;
-+ container->fd = fd;
-+ container->group_list[container->index] = vfio_group;
-+ vfio_group->container = container;
-+ container->index++;
-+ return 0;
-+}
-+
-+static int vfio_map_irq_region(struct vfio_group *group)
-+{
-+ int ret;
-+ unsigned long *vaddr = NULL;
-+ struct vfio_iommu_type1_dma_map map = {
-+ .argsz = sizeof(map),
-+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
-+ .vaddr = 0x6030000,
-+ .iova = 0x6030000,
-+ .size = 0x1000,
-+ };
-+
-+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
-+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
-+ if (vaddr == MAP_FAILED) {
-+ RTE_LOG(ERR, EAL, " mapping GITS region (errno = %d)", errno);
-+ return -errno;
-+ }
-+
-+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
-+ map.vaddr = (unsigned long)vaddr;
-+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
-+ if (ret == 0)
-+ return 0;
-+
-+ RTE_LOG(ERR, EAL, "vfio_map_irq_region fails (errno = %d)", errno);
-+ return -errno;
-+}
-+
-+int vfio_dmamap_mem_region(uint64_t vaddr,
-+ uint64_t iova,
-+ uint64_t size)
-+{
-+ struct vfio_group *group;
-+ struct vfio_iommu_type1_dma_map dma_map = {
-+ .argsz = sizeof(dma_map),
-+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
-+ };
-+
-+ dma_map.vaddr = vaddr;
-+ dma_map.size = size;
-+ dma_map.iova = iova;
-+
-+ /* SET DMA MAP for IOMMU */
-+ group = &vfio_groups[0];
-+ if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
-+ /* todo changes these to RTE_LOG */
-+ RTE_LOG(ERR, EAL, "SWP: VFIO_IOMMU_MAP_DMA API Error %d.\n", errno);
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+static int32_t setup_dmamap(void)
-+{
-+ int ret;
-+ struct vfio_group *group;
-+ struct vfio_iommu_type1_dma_map dma_map = {
-+ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
-+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
-+ };
-+
-+ int i;
-+ const struct rte_memseg *memseg;
-+
-+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
-+ memseg = rte_eal_get_physmem_layout();
-+ if (memseg == NULL) {
-+ RTE_LOG(ERR, EAL,
-+ "\nError Cannot get physical layout\n");
-+ return -ENODEV;
-+ }
-+
-+ if (memseg[i].addr == NULL && memseg[i].len == 0) {
-+ break;
-+ }
-+
-+ dma_map.size = memseg[i].len;
-+ dma_map.vaddr = memseg[i].addr_64;
-+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-+ dma_map.iova = memseg[i].phys_addr;
-+#else
-+ dma_map.iova = dma_map.vaddr;
-+#endif
-+
-+ /* SET DMA MAP for IOMMU */
-+ group = &vfio_groups[0];
-+
-+ printf("-->Initial SHM Virtual ADDR %llX\n", dma_map.vaddr);
-+ printf("-----> DMA size 0x%llX\n", dma_map.size);
-+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
-+ if (ret) {
-+ RTE_LOG(ERR, EAL,
-+ "\nErr: VFIO_IOMMU_MAP_DMA API Error %d.\n",
-+ errno);
-+ return ret;
-+ }
-+ printf("-----> dma_map.vaddr = 0x%llX\n", dma_map.vaddr);
-+ }
-+
-+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of
-+ the interrupt region to SMMU. This should be removed once the
-+ support is added in the Kernel.
-+ */
-+ vfio_map_irq_region(group);
-+
-+ return 0;
-+}
-+
-+static int vfio_set_group(struct vfio_group *group, int groupid)
-+{
-+ char path[PATH_MAX];
-+ struct vfio_group_status status = { .argsz = sizeof(status) };
-+
-+ /* Open the VFIO file corresponding to the IOMMU group */
-+ snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
-+
-+ group->fd = open(path, O_RDWR);
-+ if (group->fd < 0) {
-+ RTE_LOG(ERR, EAL, "vfio: error opening %s\n", path);
-+ return -1;
-+ }
-+
-+ /* Test & Verify that group is VIABLE & AVAILABLE */
-+ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
-+ RTE_LOG(ERR, EAL, "vfio: error getting group status\n");
-+ close(group->fd);
-+ return -1;
-+ }
-+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
-+ RTE_LOG(ERR, EAL, "vfio: group not viable\n");
-+ close(group->fd);
-+ return -1;
-+ }
-+ /* Since Group is VIABLE, Store the groupid */
-+ group->groupid = groupid;
-+
-+ /* Now connect this IOMMU group to given container */
-+ if (vfio_connect_container(group)) {
-+ RTE_LOG(ERR, EAL,
-+ "vfio: error sonnecting container with group %d\n",
-+ groupid);
-+ close(group->fd);
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int32_t setup_vfio_grp(char *vfio_container)
-+{
-+ char path[PATH_MAX];
-+ char iommu_group_path[PATH_MAX], *group_name;
-+ struct vfio_group *group = NULL;
-+ struct stat st;
-+ int groupid;
-+ int ret, len, i;
-+
-+ printf("\tProcessing Container = %s\n", vfio_container);
-+ sprintf(path, "/sys/bus/fsl-mc/devices/%s", vfio_container);
-+ /* Check whether ls-container exists or not */
-+ printf("\tcontainer device path = %s\n", path);
-+ if (stat(path, &st) < 0) {
-+ RTE_LOG(ERR, EAL, "vfio: Error (%d) getting FSL-MC device (%s)\n",
-+ errno, path);
-+ return -errno;
-+ }
-+
-+ /* DPRC container exists. NOw checkout the IOMMU Group */
-+ strncat(path, "/iommu_group", sizeof(path) - strlen(path) - 1);
-+
-+ len = readlink(path, iommu_group_path, PATH_MAX);
-+ if (len == -1) {
-+ RTE_LOG(ERR, EAL, "\tvfio: error no iommu_group for device\n");
-+ RTE_LOG(ERR, EAL, "\t%s: len = %d, errno = %d\n",
-+ path, len, errno);
-+ return -errno;
-+ }
-+
-+ iommu_group_path[len] = 0;
-+ group_name = basename(iommu_group_path);
-+ if (sscanf(group_name, "%d", &groupid) != 1) {
-+ RTE_LOG(ERR, EAL, "\tvfio: error reading %s: %m\n", path);
-+ return -errno;
-+ }
-+
-+ RTE_LOG(INFO, EAL, "\tvfio: iommu group id = %d\n", groupid);
-+
-+ /* Check if group already exists */
-+ for (i = 0; i < VFIO_MAX_GRP; i++) {
-+ group = &vfio_groups[i];
-+ if (group->groupid == groupid) {
-+ RTE_LOG(ERR, EAL, "groupid already exists %d\n", groupid);
-+ return 0;
-+ }
-+ }
-+
-+ if (vfio_set_group(group, groupid)) {
-+ RTE_LOG(ERR, EAL, "group setup failure - %d\n", groupid);
-+ return -ENODEV;
-+ }
-+
-+ /* Get Device information */
-+ ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, vfio_container);
-+ if (ret < 0) {
-+ RTE_LOG(ERR, EAL, "\tvfio: error getting device %s fd from group %d\n",
-+ vfio_container, group->groupid);
-+ return ret;
-+ }
-+ container_device_fd = ret;
-+ RTE_LOG(INFO, EAL, "vfio: Container FD is [0x%X]\n", container_device_fd);
-+ /* Set up SMMU */
-+ ret = setup_dmamap();
-+ if (ret) {
-+ RTE_LOG(ERR, EAL, ": Setting dma map\n");
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int64_t vfio_map_mcp_obj(struct vfio_group *group, char *mcp_obj)
-+{
-+ int64_t v_addr = (int64_t)MAP_FAILED;
-+ int32_t ret, mc_fd;
-+
-+ struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
-+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
-+
-+ /* getting the mcp object's fd*/
-+ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
-+ if (mc_fd < 0) {
-+ RTE_LOG(ERR, EAL, "vfio: error getting device %s fd from group %d\n",
-+ mcp_obj, group->fd);
-+ return v_addr;
-+ }
-+
-+ /* getting device info*/
-+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
-+ if (ret < 0) {
-+ RTE_LOG(ERR, EAL, "vfio: error getting DEVICE_INFO\n");
-+ goto MC_FAILURE;
-+ }
-+
-+ /* getting device region info*/
-+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
-+ if (ret < 0) {
-+ RTE_LOG(ERR, EAL, "vfio: error getting REGION_INFO\n");
-+ goto MC_FAILURE;
-+ }
-+
-+ RTE_LOG(INFO, EAL, "region offset = %llx , region size = %llx\n",
-+ reg_info.offset, reg_info.size);
-+
-+ v_addr = (uint64_t)mmap(NULL, reg_info.size,
-+ PROT_WRITE | PROT_READ, MAP_SHARED,
-+ mc_fd, reg_info.offset);
-+
-+MC_FAILURE:
-+ close(mc_fd);
-+
-+ return v_addr;
-+}
-+
-+/* Following function shall fetch total available list of MC devices
-+ * from VFIO container & populate private list of devices and other
-+ * data structures
-+ */
-+static int vfio_process_group_devices(void)
-+{
-+ struct vfio_device *vdev;
-+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
-+ char *temp_obj, *object_type, *mcp_obj, *dev_name;
-+ int32_t object_id, i, dev_fd, ret;
-+ DIR *d;
-+ struct dirent *dir;
-+ char path[PATH_MAX];
-+ int64_t v_addr;
-+ int ndev_count;
-+ struct vfio_group *group = &vfio_groups[0];
-+
-+ sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid);
-+
-+ d = opendir(path);
-+ if (!d) {
-+ RTE_LOG(ERR, EAL,"Unable to open directory %s\n", path);
-+ return -1;
-+ }
-+
-+ /*Counting the number of devices in a group and getting the mcp ID*/
-+ ndev_count = 0;
-+ mcp_obj = NULL;
-+ while ((dir = readdir(d)) != NULL) {
-+ if (dir->d_type == DT_LNK) {
-+ ndev_count++;
-+ if (!strncmp("dpmcp", dir->d_name, 5)) {
-+ if (mcp_obj)
-+ free(mcp_obj);
-+ mcp_obj = malloc(sizeof(dir->d_name));
-+ if (!mcp_obj) {
-+ RTE_LOG(ERR, EAL,
-+ "Unable to allocate memory\n");
-+ return -ENOMEM;
-+ }
-+ strcpy(mcp_obj, dir->d_name);
-+ temp_obj = strtok(dir->d_name, ".");
-+ temp_obj = strtok(NULL, ".");
-+ sscanf(temp_obj, "%d", &mcp_id);
-+ }
-+ }
-+ }
-+ closedir(d);
-+
-+ if (!mcp_obj) {
-+ RTE_LOG(ERR, EAL,"MCP Object not Found\n");
-+ return -ENODEV;
-+ }
-+ RTE_LOG(INFO, EAL,"Total devices in conatiner = %d, MCP ID = %d\n",
-+ ndev_count, mcp_id);
-+
-+ /* Allocate the memory depends upon number of objects in a group*/
-+ group->vfio_device = (struct vfio_device *)malloc(ndev_count * sizeof(struct vfio_device));
-+ if (!(group->vfio_device)) {
-+ RTE_LOG(ERR, EAL,"Unable to allocate memory\n");
-+ free(mcp_obj);
-+ return -ENOMEM;
-+ }
-+
-+ /* Allocate memory for MC Portal list */
-+ mcp_ptr_list = malloc(sizeof(void *) * 1);
-+ if (!mcp_ptr_list) {
-+ RTE_LOG(ERR, EAL, "NO Memory!\n");
-+ free(mcp_obj);
-+ goto FAILURE;
-+ }
-+
-+ v_addr = vfio_map_mcp_obj(group, mcp_obj);
-+ free(mcp_obj);
-+ if (v_addr == (int64_t)MAP_FAILED) {
-+ RTE_LOG(ERR, EAL, "mapping region (errno = %d)\n", errno);
-+ goto FAILURE;
-+ }
-+
-+ RTE_LOG(INFO, EAL, "MC has VIR_ADD = 0x%ld\n", v_addr);
-+
-+ mcp_ptr_list[0] = (void *)v_addr;
-+
-+ d = opendir(path);
-+ if (!d) {
-+ RTE_LOG(ERR, EAL, "Directory %s not able to open\n", path);
-+ goto FAILURE;
-+ }
-+
-+ i = 0;
-+ printf("\nDPAA2 - Parsing MC Device Objects:\n");
-+ /* Parsing each object and initiating them*/
-+ while ((dir = readdir(d)) != NULL) {
-+ if (dir->d_type != DT_LNK)
-+ continue;
-+ if (!strncmp("dprc", dir->d_name, 4) || !strncmp("dpmcp", dir->d_name, 5))
-+ continue;
-+ dev_name = malloc(sizeof(dir->d_name));
-+ if (!dev_name) {
-+ RTE_LOG(ERR, EAL, "Unable to allocate memory\n");
-+ goto FAILURE;
-+ }
-+ strcpy(dev_name, dir->d_name);
-+ object_type = strtok(dir->d_name, ".");
-+ temp_obj = strtok(NULL, ".");
-+ sscanf(temp_obj, "%d", &object_id);
-+ RTE_LOG(INFO, EAL, "%s ", dev_name);
-+
-+ /* getting the device fd*/
-+ dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
-+ if (dev_fd < 0) {
-+ RTE_LOG(ERR, EAL, "vfio getting device %s fd from group %d\n",
-+ dev_name, group->fd);
-+ free(dev_name);
-+ goto FAILURE;
-+ }
-+
-+ free(dev_name);
-+ vdev = &group->vfio_device[group->object_index++];
-+ vdev->fd = dev_fd;
-+ vdev->index = i;
-+ i++;
-+ /* Get Device inofrmation */
-+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) {
-+ RTE_LOG(ERR, EAL, "VFIO_DEVICE_FSL_MC_GET_INFO failed\n");
-+ goto FAILURE;
-+ }
-+
-+ if (!strcmp(object_type, "dpni") ||
-+ !strcmp(object_type, "dpseci")) {
-+ struct rte_pci_device *dev;
-+
-+ dev = malloc(sizeof(struct rte_pci_device));
-+ if (dev == NULL) {
-+ return -1;
-+ }
-+ memset(dev, 0, sizeof(*dev));
-+ /* store hw_id of dpni/dpseci device */
-+ dev->addr.devid = object_id;
-+ dev->id.vendor_id = FSL_VENDOR_ID;
-+ dev->id.device_id = (strcmp(object_type, "dpseci"))?
-+ FSL_MC_DPNI_DEVID: FSL_MC_DPSECI_DEVID;
-+
-+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
-+ }
-+
-+ if (!strcmp(object_type, "dpio")) {
-+ dpaa2_create_dpio_device(vdev, &device_info, object_id);
-+ }
-+
-+ if (!strcmp(object_type, "dpbp")) {
-+ dpaa2_create_dpbp_device(object_id);
-+ }
-+ }
-+ closedir(d);
-+
-+ ret = dpaa2_affine_qbman_swp();
-+ if (ret)
-+ RTE_LOG(ERR, EAL, "%s(): Err in affining qbman swp\n", __func__);
-+
-+ return 0;
-+
-+FAILURE:
-+ free(group->vfio_device);
-+ group->vfio_device = NULL;
-+ return -1;
-+}
-+
-+/*
-+ * Scan the content of the PCI bus, and the devices in the devices
-+ * list
-+ */
-+static int
-+fsl_mc_scan(void)
-+{
-+ char path[PATH_MAX];
-+ struct stat st;
-+
-+ ls2bus_container = getenv("DPRC");
-+
-+ if (ls2bus_container == NULL) {
-+ RTE_LOG(WARNING, EAL, "vfio container not set in env DPRC\n");
-+ return -1;
-+ }
-+
-+ snprintf(path, sizeof(path), "%s/%s", SYSFS_FSL_MC_DEVICES,
-+ ls2bus_container);
-+ /* Check whether LS-Container exists or not */
-+ RTE_LOG(INFO, EAL, "\tcontainer device path = %s\n", path);
-+ if (stat(path, &st) < 0) {
-+ RTE_LOG(ERR, EAL, "vfio:fsl-mc device does not exists\n");
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+/* Init the FSL-MC- LS2 EAL subsystem */
-+int
-+rte_eal_dpaa2_init(void)
-+{
-+ if (fsl_mc_scan() < 0)
-+ return -1;
-+
-+#ifdef VFIO_PRESENT
-+ if (setup_vfio_grp(ls2bus_container)) {
-+ RTE_LOG(ERR, EAL, "setup_vfio_grp\n");
-+ return -1;
-+ }
-+ if (vfio_process_group_devices()) {
-+ RTE_LOG(ERR, EAL, "vfio_process_group_devices\n");
-+ return -1;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h
-new file mode 100644
-index 0000000..7fc5ec6
---- /dev/null
-+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_fsl_mc.h
-@@ -0,0 +1,102 @@
-+/*-
-+ * BSD LICENSE
-+ *
-+ * Copyright(c) 2014 Freescale Semiconductor. All rights reserved.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ * * Neither the name of Freescale Semiconductor nor the names of its
-+ * contributors may be used to endorse or promote products derived
-+ * from this software without specific prior written permission.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+
-+#ifndef _EAL_VFIO_FSL_MC_H_
-+#define _EAL_VFIO_FSL_MC_H_
-+
-+#include <rte_memory.h>
-+#include <rte_mbuf.h>
-+#include <rte_atomic.h>
-+#include "eal_vfio.h"
-+
-+#define FSL_VENDOR_ID 0x1957
-+#define FSL_MC_DPNI_DEVID 7
-+#define FSL_MC_DPSECI_DEVID 3
-+
-+#define VFIO_MAX_GRP 1
-+#define VFIO_MAX_CONTAINERS 1
-+
-+#define DPAA2_MBUF_HW_ANNOTATION 64
-+#define DPAA2_FD_PTA_SIZE 64
-+#define DPAA2_PACKET_LAYOUT_ALIGN 256
-+#if (RTE_CACHE_LINE_SIZE == 128)
-+#define DPAA2_RES 128
-+#else
-+#define DPAA2_RES 0
-+#endif
-+
-+#define DPAA2_ALIGN_ROUNDUP(x, align) ((align) * (((x) + align - 1) / (align)))
-+#define DPAA2_ALIGN_ROUNDUP_PTR(x, align)\
-+ ((void *)DPAA2_ALIGN_ROUNDUP((uintptr_t)(x), (uintptr_t)(align)))
-+
-+typedef struct vfio_device {
-+ int fd; /* fsl_mc root container device ?? */
-+ int index; /*index of child object */
-+ struct vfio_device *child; /* Child object */
-+} vfio_device;
-+
-+typedef struct vfio_group {
-+ int fd; /* /dev/vfio/"groupid" */
-+ int groupid;
-+ struct vfio_container *container;
-+ int object_index;
-+ struct vfio_device *vfio_device;
-+} vfio_group;
-+
-+typedef struct vfio_container {
-+ int fd; /* /dev/vfio/vfio */
-+ int used;
-+ int index; /* index in group list */
-+ struct vfio_group *group_list[VFIO_MAX_GRP];
-+} vfio_container;
-+
-+int vfio_dmamap_mem_region(
-+ uint64_t vaddr,
-+ uint64_t iova,
-+ uint64_t size);
-+
-+/* initialize the NXP/FSL dpaa2 accelerators */
-+int rte_eal_dpaa2_init(void);
-+
-+int dpaa2_create_dpio_device(struct vfio_device *vdev,
-+ struct vfio_device_info *obj_info,
-+ int object_id);
-+
-+int dpaa2_create_dpbp_device(int dpbp_id);
-+
-+int dpaa2_affine_qbman_swp(void);
-+
-+int dpaa2_affine_qbman_swp_sec(void);
-+
-+#endif
-+
-diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
-index 8d62b0d..92446d1 100644
---- a/lib/librte_mbuf/Makefile
-+++ b/lib/librte_mbuf/Makefile
-@@ -36,6 +36,10 @@ LIB = librte_mbuf.a
-
- CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-+endif
-+
- EXPORT_MAP := rte_mbuf_version.map
-
- LIBABIVER := 2
-diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
-index dc0467c..c4009ee 100644
---- a/lib/librte_mbuf/rte_mbuf.c
-+++ b/lib/librte_mbuf/rte_mbuf.c
-@@ -60,6 +60,59 @@
- #include <rte_hexdump.h>
- #include <rte_errno.h>
-
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+
-+int __attribute__((weak))
-+hw_mbuf_create_pool(
-+struct rte_mempool __rte_unused *mp)
-+{
-+ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
-+ return -1;
-+}
-+
-+int __attribute__((weak))
-+hw_mbuf_init(
-+ struct rte_mempool __rte_unused*mp,
-+ void __rte_unused *_m)
-+{
-+ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
-+ return -1;
-+}
-+
-+int __attribute__((weak))
-+hw_mbuf_alloc(
-+ struct rte_mempool __rte_unused *mp,
-+ void __rte_unused **obj_p)
-+{
-+ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
-+ return -1;
-+}
-+
-+int __attribute__((weak))
-+hw_mbuf_free(void __rte_unused *m)
-+{
-+ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
-+ return -1;
-+}
-+
-+int __attribute__((weak))
-+hw_mbuf_alloc_bulk(struct rte_mempool __rte_unused *pool,
-+ void __rte_unused **obj_table,
-+ unsigned __rte_unused count)
-+{
-+ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
-+ return -1;
-+}
-+
-+int __attribute__((weak))
-+hw_mbuf_free_bulk(struct rte_mempool __rte_unused *mp,
-+ void __rte_unused * const *obj_table,
-+ unsigned __rte_unused n)
-+{
-+ RTE_LOG(WARNING, MBUF, "%s/n", __func__);
-+ return -1;
-+}
-+#endif
- /*
- * ctrlmbuf constructor, given as a callback function to
- * rte_mempool_create()
-@@ -106,6 +159,10 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
-
- mbp_priv = rte_mempool_get_priv(mp);
- memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL)
-+ hw_mbuf_create_pool(mp);
-+#endif
- }
-
- /*
-@@ -122,6 +179,12 @@ rte_pktmbuf_init(struct rte_mempool *mp,
- struct rte_mbuf *m = _m;
- uint32_t mbuf_size, buf_len, priv_size;
-
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) {
-+ if (hw_mbuf_init(mp, m) == 0)
-+ return;
-+ }
-+#endif
- priv_size = rte_pktmbuf_priv_size(mp);
- mbuf_size = sizeof(struct rte_mbuf) + priv_size;
- buf_len = rte_pktmbuf_data_room_size(mp);
-@@ -170,7 +233,11 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
- return rte_mempool_create(name, n, elt_size,
- cache_size, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL,
-+#if defined(RTE_LIBRTE_DPAA2_PMD)
-+ socket_id, MEMPOOL_F_HW_PKT_POOL);
-+#else
- socket_id, 0);
-+#endif
- }
-
- /* do some sanity checks on a mbuf: panic if it fails */
-diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
-index a6898ef..6116d52 100644
---- a/lib/librte_mempool/Makefile
-+++ b/lib/librte_mempool/Makefile
-@@ -36,6 +36,10 @@ LIB = librte_mempool.a
-
- CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-+CFLAGS += -I$(RTE_SDK)/lib/librte_mbuf
-+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-+endif
- EXPORT_MAP := rte_mempool_version.map
-
- LIBABIVER := 1
-diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
-index f8781e1..ac9595d 100644
---- a/lib/librte_mempool/rte_mempool.c
-+++ b/lib/librte_mempool/rte_mempool.c
-@@ -60,6 +60,10 @@
-
- #include "rte_mempool.h"
-
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+#include "eal_vfio_fsl_mc.h"
-+#endif
-+
- TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
-
- static struct rte_tailq_elem rte_mempool_tailq = {
-@@ -316,6 +320,12 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
-
- /* this is the size of an object, including header and trailer */
- sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (flags & MEMPOOL_F_HW_PKT_POOL)
-+ sz->total_size += DPAA2_ALIGN_ROUNDUP(
-+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE,
-+ DPAA2_PACKET_LAYOUT_ALIGN);
-+#endif
-
- return sz->total_size;
- }
-@@ -590,6 +600,9 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
- mp->cache_size = cache_size;
- mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
- mp->private_data_size = private_data_size;
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ mp->offload_ptr = UINTPTR_MAX;
-+#endif
-
- /* calculate address of the first element for continuous mempool. */
- obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
-diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
-index 9745bf0..304a434 100644
---- a/lib/librte_mempool/rte_mempool.h
-+++ b/lib/librte_mempool/rte_mempool.h
-@@ -215,7 +215,10 @@ struct rte_mempool {
- uintptr_t elt_va_end;
- /**< Virtual address of the <size + 1> mempool object. */
- phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
-+#ifdef RTE_LIBRTE_DPAA2_PMD
- /**< Array of physical page addresses for the mempool objects buffer. */
-+ uintptr_t offload_ptr;
-+#endif
-
- } __rte_cache_aligned;
-
-@@ -223,7 +226,18 @@ struct rte_mempool {
- #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
- #define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
- #define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
--
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+#define MEMPOOL_F_HW_PKT_POOL 0x0010 /**< HW offload for packet buffer mgmt*/
-+
-+int hw_mbuf_create_pool(struct rte_mempool *mp);
-+int hw_mbuf_init(struct rte_mempool *mp, void *_m);
-+int hw_mbuf_alloc(struct rte_mempool *mp, void **obj_p);
-+int hw_mbuf_free(void *_m);
-+int hw_mbuf_alloc_bulk(struct rte_mempool *pool,
-+ void **obj_table, unsigned count);
-+int hw_mbuf_free_bulk(struct rte_mempool *mp, void * const *obj_table,
-+ unsigned n);
-+#endif
- /**
- * @internal When debug is enabled, store some statistics.
- *
-@@ -877,6 +891,12 @@ static inline void __attribute__((always_inline))
- rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
- {
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) {
-+ if (hw_mbuf_free_bulk(mp, obj_table, n) == 0)
-+ return;
-+ }
-+#endif
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
- }
-@@ -1091,6 +1111,14 @@ static inline int __attribute__((always_inline))
- rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
- {
- int ret;
-+
-+#ifdef RTE_LIBRTE_DPAA2_PMD
-+ if (mp->flags & MEMPOOL_F_HW_PKT_POOL) {
-+ ret = hw_mbuf_alloc_bulk(mp, obj_table, n);
-+ if (ret > -2)
-+ return ret;
-+ }
-+#endif
- ret = __mempool_get_bulk(mp, obj_table, n,
- !(mp->flags & MEMPOOL_F_SC_GET));
- if (ret == 0)
-diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk
-new file mode 100644
-index 0000000..8541633
---- /dev/null
-+++ b/mk/machine/dpaa2/rte.vars.mk
-@@ -0,0 +1,60 @@
-+# BSD LICENSE
-+#
-+# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-+#
-+# Redistribution and use in source and binary forms, with or without
-+# modification, are permitted provided that the following conditions
-+# are met:
-+#
-+# * Redistributions of source code must retain the above copyright
-+# notice, this list of conditions and the following disclaimer.
-+# * Redistributions in binary form must reproduce the above copyright
-+# notice, this list of conditions and the following disclaimer in
-+# the documentation and/or other materials provided with the
-+# distribution.
-+# * Neither the name of Freescale Semiconductor nor the names of its
-+# contributors may be used to endorse or promote products derived
-+# from this software without specific prior written permission.
-+#
-+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+
-+#
-+# machine:
-+#
-+# - can define ARCH variable (overridden by cmdline value)
-+# - can define CROSS variable (overridden by cmdline value)
-+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
-+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
-+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
-+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
-+# overrides the one defined in arch.
-+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
-+# overrides the one defined in arch.
-+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
-+# overrides the one defined in arch.
-+# - may override any previously defined variable
-+#
-+
-+# ARCH =
-+# CROSS =
-+# MACHINE_CFLAGS =
-+# MACHINE_LDFLAGS =
-+# MACHINE_ASFLAGS =
-+# CPU_CFLAGS =
-+# CPU_LDFLAGS =
-+# CPU_ASFLAGS =
-+MACHINE_CFLAGS += -march=armv8-a
-+
-+ifdef CONFIG_RTE_ARCH_ARM_TUNE
-+MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE)
-+endif
-diff --git a/mk/rte.app.mk b/mk/rte.app.mk
-index c66e491..ee25ba3 100644
---- a/mk/rte.app.mk
-+++ b/mk/rte.app.mk
-@@ -125,6 +125,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
- _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
-
- _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt
-+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2
-
- ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
- # plugins (link only if static libraries)
---
-2.5.0
-
diff --git a/dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch b/dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch
deleted file mode 100644
index 10b6637..0000000
--- a/dpdk/dpdk-16.04_patches/0018-enic-fix-segfault-on-Tx-path-after-restarting-a-devi.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From 60971e62dcbb50a7ef1c3839e8b33b5aef6a48fe Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Fri, 1 Jul 2016 12:24:45 -0700
-Subject: [PATCH 18/25] enic: fix segfault on Tx path after restarting a device
-
-If you stop then start a port that had already sent some packets,
-there was a segfault due to not resetting the number of completed
-sends to zero.
-
-Fixes: d5d882fe1a11 ("Tx path rewrite to reduce Host CPU overhead")
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
-Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/base/vnic_wq.c | 2 ++
- drivers/net/enic/base/vnic_wq.h | 1 +
- 2 files changed, 3 insertions(+)
-
-diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
-index ccbbd61..7026bfe 100644
---- a/drivers/net/enic/base/vnic_wq.c
-+++ b/drivers/net/enic/base/vnic_wq.c
-@@ -206,6 +206,8 @@ void vnic_wq_clean(struct vnic_wq *wq,
-
- wq->head_idx = 0;
- wq->tail_idx = 0;
-+ wq->last_completed_index = 0;
-+ *((uint32_t *)wq->cqmsg_rz->addr) = 0;
-
- iowrite32(0, &wq->ctrl->fetch_index);
- iowrite32(0, &wq->ctrl->posted_index);
-diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
-index 37c3ff9..faf3bfa 100644
---- a/drivers/net/enic/base/vnic_wq.h
-+++ b/drivers/net/enic/base/vnic_wq.h
-@@ -38,6 +38,7 @@
-
- #include "vnic_dev.h"
- #include "vnic_cq.h"
-+#include <rte_memzone.h>
-
- /* Work queue control */
- struct vnic_wq_ctrl {
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch b/dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch
deleted file mode 100644
index 3e10748..0000000
--- a/dpdk/dpdk-16.04_patches/0019-enic-fix-Rx-queue-initialization-after-restarting-a-.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 8d336ba9cbcb4832b992201497afe07afcd4f2e1 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Fri, 1 Jul 2016 12:32:45 -0700
-Subject: [PATCH 19/25] enic: fix Rx queue initialization after restarting a
- device
-
-If you stop then start a port that had already received some packets,
-the NIC could fetch discriptors from the wrong location. This could
-effectivly reduce the size of the Rx queue by a random amount and
-cause packet drop or reduced performance.
-
-Reset the NIC fetch index to 0 when allocating and posting mbuf
-addresses to the NIC.
-
-Fixes: 947d860c821f ("enic: improve Rx performance")
-
-Signed-off-by: John Daley <johndale@cisco.com>
-Reviewed-by: Nelson Escobar <neescoba@cisco.com>
----
- drivers/net/enic/enic_main.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index be17707..68532d3 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -346,6 +346,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
- enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-+ iowrite32(0, &rq->ctrl->fetch_index);
- rte_rmb();
-
- // printf("posted %d buffers to %s rq\n", rq->ring.desc_count,
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch b/dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch
deleted file mode 100644
index 47bcda2..0000000
--- a/dpdk/dpdk-16.04_patches/0020-net-enic-fix-releasing-mbufs-when-tearing-down-Rx-qu.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 3f276178609472585a85fe440b549013a64d9327 Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Tue, 14 Jun 2016 16:55:34 -0700
-Subject: [PATCH 20/25] net/enic: fix releasing mbufs when tearing down Rx
- queue
-
-When trying to release the mbufs, the function was incorrectly
-iterating over the max size configured instead of the actual size
-of the ring.
-
-Fixes: 947d860c821f ("enic: improve Rx performance")
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
-Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 68532d3..56ec96e 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -91,7 +91,7 @@ static int is_eth_addr_valid(uint8_t *addr)
- }
-
- static void
--enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
-+enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
- {
- uint16_t i;
-
-@@ -100,7 +100,7 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
- return;
- }
-
-- for (i = 0; i < enic->config.rq_desc_count; i++) {
-+ for (i = 0; i < rq->ring.desc_count; i++) {
- if (rq->mbuf_ring[i]) {
- rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
- rq->mbuf_ring[i] = NULL;
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch b/dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch
deleted file mode 100644
index 56d2c67..0000000
--- a/dpdk/dpdk-16.04_patches/0021-net-enic-fix-crash-when-releasing-queues.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From 38e154305ee5fd2ee454c19218ca144ffd1535f1 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Sat, 11 Jun 2016 10:27:04 -0700
-Subject: [PATCH 21/25] net/enic: fix crash when releasing queues
-
-If device configuration failed due to a lack of resources, such as
-if more queues are requested than are available, the queue release
-functions are called with NULL pointers which were being dereferenced.
-
-Skip releasing queues if they are NULL pointers.
-
-Fixes: fefed3d1e62c ("enic: new driver")
-
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 21 ++++++++++++++++-----
- 1 file changed, 16 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 56ec96e..4e5594f 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -462,9 +462,15 @@ int enic_alloc_intr_resources(struct enic *enic)
-
- void enic_free_rq(void *rxq)
- {
-- struct vnic_rq *rq_sop = (struct vnic_rq *)rxq;
-- struct enic *enic = vnic_dev_priv(rq_sop->vdev);
-- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
-+ struct vnic_rq *rq_sop, *rq_data;
-+ struct enic *enic;
-+
-+ if (rxq == NULL)
-+ return;
-+
-+ rq_sop = (struct vnic_rq *)rxq;
-+ enic = vnic_dev_priv(rq_sop->vdev);
-+ rq_data = &enic->rq[rq_sop->data_queue_idx];
-
- enic_rxmbuf_queue_release(enic, rq_sop);
- if (rq_data->in_use)
-@@ -657,9 +663,14 @@ err_exit:
-
- void enic_free_wq(void *txq)
- {
-- struct vnic_wq *wq = (struct vnic_wq *)txq;
-- struct enic *enic = vnic_dev_priv(wq->vdev);
-+ struct vnic_wq *wq;
-+ struct enic *enic;
-+
-+ if (txq == NULL)
-+ return;
-
-+ wq = (struct vnic_wq *)txq;
-+ enic = vnic_dev_priv(wq->vdev);
- rte_memzone_free(wq->cqmsg_rz);
- vnic_wq_free(wq);
- vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch b/dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch
deleted file mode 100644
index bf6df81..0000000
--- a/dpdk/dpdk-16.04_patches/0022-net-enic-improve-out-of-resources-error-handling.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From db0a30a2e61a3bf2f6cb8e74203dab84280b0419 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Sat, 11 Jun 2016 10:27:05 -0700
-Subject: [PATCH 22/25] net/enic: improve out of resources error handling
-
-If configuration fails due to lack of resources, be more specific
-about which resources are lacking - work queues, read queues or
-completion queues. Return -EINVAL instead of -1 if more queeues
-are requested than are available.
-
-Fixes: fefed3d1e62c ("enic: new driver")
-
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 30 ++++++++++++++++++++----------
- 1 file changed, 20 insertions(+), 10 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 4e5594f..43e4af1 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -970,22 +970,32 @@ static void enic_dev_deinit(struct enic *enic)
- int enic_set_vnic_res(struct enic *enic)
- {
- struct rte_eth_dev *eth_dev = enic->rte_dev;
-+ int rc = 0;
-
-- if ((enic->rq_count < eth_dev->data->nb_rx_queues) ||
-- (enic->wq_count < eth_dev->data->nb_tx_queues)) {
-- dev_err(dev, "Not enough resources configured, aborting\n");
-- return -1;
-+ if (enic->rq_count < eth_dev->data->nb_rx_queues) {
-+ dev_err(dev, "Not enough Receive queues. Requested:%u, Configured:%u\n",
-+ eth_dev->data->nb_rx_queues, enic->rq_count);
-+ rc = -EINVAL;
-+ }
-+ if (enic->wq_count < eth_dev->data->nb_tx_queues) {
-+ dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
-+ eth_dev->data->nb_tx_queues, enic->wq_count);
-+ rc = -EINVAL;
- }
-
-- enic->rq_count = eth_dev->data->nb_rx_queues;
-- enic->wq_count = eth_dev->data->nb_tx_queues;
- if (enic->cq_count < (enic->rq_count + enic->wq_count)) {
-- dev_err(dev, "Not enough resources configured, aborting\n");
-- return -1;
-+ dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
-+ enic->rq_count + enic->wq_count, enic->cq_count);
-+ rc = -EINVAL;
- }
-
-- enic->cq_count = enic->rq_count + enic->wq_count;
-- return 0;
-+ if (rc == 0) {
-+ enic->rq_count = eth_dev->data->nb_rx_queues;
-+ enic->wq_count = eth_dev->data->nb_tx_queues;
-+ enic->cq_count = enic->rq_count + enic->wq_count;
-+ }
-+
-+ return rc;
- }
-
- static int enic_dev_init(struct enic *enic)
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch b/dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch
deleted file mode 100644
index 0cc423a..0000000
--- a/dpdk/dpdk-16.04_patches/0023-net-enic-fix-memory-freeing.patch
+++ /dev/null
@@ -1,238 +0,0 @@
-From 2040a8f4e47d3bc4b7f0f11faa863a4bd8d8891d Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Thu, 23 Jun 2016 16:14:58 -0700
-Subject: [PATCH 23/25] net/enic: fix memory freeing
-
-enic_alloc_consistent() allocated memory, but enic_free_consistent()
-was an empty function, so allocated memory was never freed.
-
-This commit adds a list and lock to the enic structure to keep track
-of the memzones allocated in enic_alloc_consistent(), and
-enic_free_consistent() uses that information to properly free memory.
-
-Fixes: fefed3d1e62c ("enic: new driver")
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
-Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/base/vnic_dev.c | 14 +++++------
- drivers/net/enic/base/vnic_dev.h | 2 +-
- drivers/net/enic/enic.h | 11 ++++++++
- drivers/net/enic/enic_main.c | 54 ++++++++++++++++++++++++++++++++++------
- 4 files changed, 65 insertions(+), 16 deletions(-)
-
-diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
-index e8a5028..fc2e4cc 100644
---- a/drivers/net/enic/base/vnic_dev.c
-+++ b/drivers/net/enic/base/vnic_dev.c
-@@ -83,7 +83,7 @@ struct vnic_dev {
- struct vnic_intr_coal_timer_info intr_coal_timer_info;
- void *(*alloc_consistent)(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name);
-- void (*free_consistent)(struct rte_pci_device *hwdev,
-+ void (*free_consistent)(void *priv,
- size_t size, void *vaddr,
- dma_addr_t dma_handle);
- };
-@@ -101,7 +101,7 @@ void *vnic_dev_priv(struct vnic_dev *vdev)
- void vnic_register_cbacks(struct vnic_dev *vdev,
- void *(*alloc_consistent)(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name),
-- void (*free_consistent)(struct rte_pci_device *hwdev,
-+ void (*free_consistent)(void *priv,
- size_t size, void *vaddr,
- dma_addr_t dma_handle))
- {
-@@ -807,7 +807,7 @@ int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
- int vnic_dev_notify_unset(struct vnic_dev *vdev)
- {
- if (vdev->notify && !vnic_dev_in_reset(vdev)) {
-- vdev->free_consistent(vdev->pdev,
-+ vdev->free_consistent(vdev->priv,
- sizeof(struct vnic_devcmd_notify),
- vdev->notify,
- vdev->notify_pa);
-@@ -924,16 +924,16 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
- {
- if (vdev) {
- if (vdev->notify)
-- vdev->free_consistent(vdev->pdev,
-+ vdev->free_consistent(vdev->priv,
- sizeof(struct vnic_devcmd_notify),
- vdev->notify,
- vdev->notify_pa);
- if (vdev->stats)
-- vdev->free_consistent(vdev->pdev,
-+ vdev->free_consistent(vdev->priv,
- sizeof(struct vnic_stats),
- vdev->stats, vdev->stats_pa);
- if (vdev->fw_info)
-- vdev->free_consistent(vdev->pdev,
-+ vdev->free_consistent(vdev->priv,
- sizeof(struct vnic_devcmd_fw_info),
- vdev->fw_info, vdev->fw_info_pa);
- kfree(vdev);
-@@ -1041,7 +1041,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
-
- ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
- *entry = (u16)a0;
-- vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
-+ vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
- } else if (cmd == CLSF_DEL) {
- a0 = *entry;
- ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
-diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
-index 113d6ac..689442f 100644
---- a/drivers/net/enic/base/vnic_dev.h
-+++ b/drivers/net/enic/base/vnic_dev.h
-@@ -102,7 +102,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
- void vnic_register_cbacks(struct vnic_dev *vdev,
- void *(*alloc_consistent)(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name),
-- void (*free_consistent)(struct rte_pci_device *hwdev,
-+ void (*free_consistent)(void *priv,
- size_t size, void *vaddr,
- dma_addr_t dma_handle));
- void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index d2de6ee..175adb8 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -46,6 +46,8 @@
- #include "vnic_rss.h"
- #include "enic_res.h"
- #include "cq_enet_desc.h"
-+#include <sys/queue.h>
-+#include <rte_spinlock.h>
-
- #define DRV_NAME "enic_pmd"
- #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
-@@ -96,6 +98,11 @@ struct enic_soft_stats {
- rte_atomic64_t rx_packet_errors;
- };
-
-+struct enic_memzone_entry {
-+ const struct rte_memzone *rz;
-+ LIST_ENTRY(enic_memzone_entry) entries;
-+};
-+
- /* Per-instance private data structure */
- struct enic {
- struct enic *next;
-@@ -140,6 +147,10 @@ struct enic {
- unsigned int intr_count;
-
- struct enic_soft_stats soft_stats;
-+
-+ /* linked list storing memory allocations */
-+ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
-+ rte_spinlock_t memzone_list_lock;
- };
-
- static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq)
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 43e4af1..0547f3b 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -356,12 +356,14 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- }
-
- static void *
--enic_alloc_consistent(__rte_unused void *priv, size_t size,
-+enic_alloc_consistent(void *priv, size_t size,
- dma_addr_t *dma_handle, u8 *name)
- {
- void *vaddr;
- const struct rte_memzone *rz;
- *dma_handle = 0;
-+ struct enic *enic = (struct enic *)priv;
-+ struct enic_memzone_entry *mze;
-
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
-@@ -374,16 +376,49 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size,
- vaddr = rz->addr;
- *dma_handle = (dma_addr_t)rz->phys_addr;
-
-+ mze = rte_malloc("enic memzone entry",
-+ sizeof(struct enic_memzone_entry), 0);
-+
-+ if (!mze) {
-+ pr_err("%s : Failed to allocate memory for memzone list\n",
-+ __func__);
-+ rte_memzone_free(rz);
-+ }
-+
-+ mze->rz = rz;
-+
-+ rte_spinlock_lock(&enic->memzone_list_lock);
-+ LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
-+ rte_spinlock_unlock(&enic->memzone_list_lock);
-+
- return vaddr;
- }
-
- static void
--enic_free_consistent(__rte_unused struct rte_pci_device *hwdev,
-- __rte_unused size_t size,
-- __rte_unused void *vaddr,
-- __rte_unused dma_addr_t dma_handle)
-+enic_free_consistent(void *priv,
-+ __rte_unused size_t size,
-+ void *vaddr,
-+ dma_addr_t dma_handle)
- {
-- /* Nothing to be done */
-+ struct enic_memzone_entry *mze;
-+ struct enic *enic = (struct enic *)priv;
-+
-+ rte_spinlock_lock(&enic->memzone_list_lock);
-+ LIST_FOREACH(mze, &enic->memzone_list, entries) {
-+ if (mze->rz->addr == vaddr &&
-+ mze->rz->phys_addr == dma_handle)
-+ break;
-+ }
-+ if (mze == NULL) {
-+ rte_spinlock_unlock(&enic->memzone_list_lock);
-+ dev_warning(enic,
-+ "Tried to free memory, but couldn't find it in the memzone list\n");
-+ return;
-+ }
-+ LIST_REMOVE(mze, entries);
-+ rte_spinlock_unlock(&enic->memzone_list_lock);
-+ rte_memzone_free(mze->rz);
-+ rte_free(mze);
- }
-
- static void
-@@ -840,7 +875,7 @@ static int enic_set_rsskey(struct enic *enic)
- rss_key_buf_pa,
- sizeof(union vnic_rss_key));
-
-- enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
-+ enic_free_consistent(enic, sizeof(union vnic_rss_key),
- rss_key_buf_va, rss_key_buf_pa);
-
- return err;
-@@ -867,7 +902,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
- rss_cpu_buf_pa,
- sizeof(union vnic_rss_cpu));
-
-- enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
-+ enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
- rss_cpu_buf_va, rss_cpu_buf_pa);
-
- return err;
-@@ -1049,6 +1084,9 @@ int enic_probe(struct enic *enic)
- goto err_out;
- }
-
-+ LIST_INIT(&enic->memzone_list);
-+ rte_spinlock_init(&enic->memzone_list_lock);
-+
- vnic_register_cbacks(enic->vdev,
- enic_alloc_consistent,
- enic_free_consistent);
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch b/dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch
deleted file mode 100644
index d581702..0000000
--- a/dpdk/dpdk-16.04_patches/0024-net-enic-fix-Rx-scatter-with-multiple-queues.patch
+++ /dev/null
@@ -1,80 +0,0 @@
-From 658069b0c5994e260cd7d0a7dfc7f03d78dd4f5a Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Tue, 28 Jun 2016 11:49:11 -0700
-Subject: [PATCH 24/25] net/enic: fix Rx scatter with multiple queues
-
-The Rx scatter patch failed to make a few changes and resulted in
-problems when using multiple receive queues (RQs) in DPDK (ie RSS)
-since the wrong adapter resources were being used.
-
-- get and use the correct completion queue index associated with a
- receive queue.
-- set the correct receive queue index when using RSS
-
-Fixes: 856d7ba7ed22 ("net/enic: support scattered Rx")
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
-Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic.h | 6 +++++-
- drivers/net/enic/enic_main.c | 10 ++++++----
- 2 files changed, 11 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 175adb8..8b0fa05 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -165,7 +165,11 @@ static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned
-
- static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
- {
-- return rq;
-+ /* Scatter rx uses two receive queues together with one
-+ * completion queue, so the completion queue number is no
-+ * longer the same as the rq number.
-+ */
-+ return rq / 2;
- }
-
- static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 0547f3b..976c9da 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -252,19 +252,20 @@ void enic_init_vnic_resources(struct enic *enic)
- vnic_dev_stats_clear(enic->vdev);
-
- for (index = 0; index < enic->rq_count; index++) {
-+ cq_idx = enic_cq_rq(enic, enic_sop_rq(enic, index));
-+
- vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)],
-- enic_cq_rq(enic, index),
-+ cq_idx,
- error_interrupt_enable,
- error_interrupt_offset);
-
- data_rq = &enic->rq[enic_data_rq(enic, index)];
- if (data_rq->in_use)
- vnic_rq_init(data_rq,
-- enic_cq_rq(enic, index),
-+ cq_idx,
- error_interrupt_enable,
- error_interrupt_offset);
-
-- cq_idx = enic_cq_rq(enic, index);
- vnic_cq_init(&enic->cq[cq_idx],
- 0 /* flow_control_enable */,
- 1 /* color_enable */,
-@@ -896,7 +897,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
- return -ENOMEM;
-
- for (i = 0; i < (1 << rss_hash_bits); i++)
-- (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
-+ (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
-+ enic_sop_rq(enic, i % enic->rq_count);
-
- err = enic_set_rss_cpu(enic,
- rss_cpu_buf_pa,
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch b/dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch
deleted file mode 100644
index e4e9f43..0000000
--- a/dpdk/dpdk-16.04_patches/0025-enic-fixup-of-Rx-Scatter-patch.patch
+++ /dev/null
@@ -1,169 +0,0 @@
-From 3131adb7f4195771bf54b294b2ee496055c3e65d Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Tue, 14 Jun 2016 11:54:01 -0700
-Subject: [PATCH 25/25] enic: fixup of Rx Scatter patch
-
-A version of the Rx Scatter patch was used by VPP before the
-patch was accepted in dpdk.org. This patch contains the change
-made to the patch before it was accepted.
-
-Composed of internal dpdk devel patches:
-enic: fixup rq count usage in wake of rx scatter
-enic: update checks since RX scatter uses 2 VIC RQs per app RQ.
-enic: fix packet type and flags when doing scatter Rx
-
-fixes: ENIC scatter RX
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
----
- drivers/net/enic/enic.h | 12 ++++++++++--
- drivers/net/enic/enic_ethdev.c | 7 +++++--
- drivers/net/enic/enic_main.c | 19 +++++++++++--------
- drivers/net/enic/enic_res.c | 5 +++--
- drivers/net/enic/enic_rxtx.c | 7 +++++--
- 5 files changed, 34 insertions(+), 16 deletions(-)
-
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 8b0fa05..9cc9f0b 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -55,8 +55,11 @@
- #define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
-
- #define ENIC_WQ_MAX 8
--#define ENIC_RQ_MAX 8
--#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
-+/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
-+ * RQs use the same CQ.
-+ */
-+#define ENIC_RQ_MAX 16
-+#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
- #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
-
- #define VLAN_ETH_HLEN 18
-@@ -163,6 +166,11 @@ static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned
- return rq * 2 + 1;
- }
-
-+static inline unsigned int enic_vnic_rq_count(struct enic *enic)
-+{
-+ return (enic->rq_count * 2);
-+}
-+
- static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
- {
- /* Scatter rx uses two receive queues together with one
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index 697ff82..e5b84e1 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -269,9 +269,12 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- struct enic *enic = pmd_priv(eth_dev);
-
- ENICPMD_FUNC_TRACE();
-- if (queue_idx >= ENIC_RQ_MAX) {
-+ /* With Rx scatter support, two RQs are now used on VIC per RQ used
-+ * by the application.
-+ */
-+ if (queue_idx * 2 >= ENIC_RQ_MAX) {
- dev_err(enic,
-- "Max number of RX queues exceeded. Max is %d\n",
-+ "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
- ENIC_RQ_MAX);
- return -EINVAL;
- }
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 976c9da..ff94ee2 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -133,7 +133,7 @@ static void enic_log_q_error(struct enic *enic)
- error_status);
- }
-
-- for (i = 0; i < enic->rq_count; i++) {
-+ for (i = 0; i < enic_vnic_rq_count(enic); i++) {
- error_status = vnic_rq_error_status(&enic->rq[i]);
- if (error_status)
- dev_err(enic, "RQ[%d] error_status %d\n", i,
-@@ -486,7 +486,7 @@ int enic_alloc_intr_resources(struct enic *enic)
-
- dev_info(enic, "vNIC resources used: "\
- "wq %d rq %d cq %d intr %d\n",
-- enic->wq_count, enic->rq_count,
-+ enic->wq_count, enic_vnic_rq_count(enic),
- enic->cq_count, enic->intr_count);
-
- err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
-@@ -790,10 +790,12 @@ int enic_disable(struct enic *enic)
- if (err)
- return err;
- }
-- for (i = 0; i < enic->rq_count; i++) {
-- err = vnic_rq_disable(&enic->rq[i]);
-- if (err)
-- return err;
-+ for (i = 0; i < enic_vnic_rq_count(enic); i++) {
-+ if (enic->rq[i].in_use) {
-+ err = vnic_rq_disable(&enic->rq[i]);
-+ if (err)
-+ return err;
-+ }
- }
-
- vnic_dev_set_reset_flag(enic->vdev, 1);
-@@ -802,8 +804,9 @@ int enic_disable(struct enic *enic)
- for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
-
-- for (i = 0; i < enic->rq_count; i++)
-- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
-+ for (i = 0; i < enic_vnic_rq_count(enic); i++)
-+ if (enic->rq[i].in_use)
-+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
- for (i = 0; i < enic->cq_count; i++)
- vnic_cq_clean(&enic->cq[i]);
- vnic_intr_clean(&enic->intr);
-diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
-index ebe379d..42edd84 100644
---- a/drivers/net/enic/enic_res.c
-+++ b/drivers/net/enic/enic_res.c
-@@ -196,8 +196,9 @@ void enic_free_vnic_resources(struct enic *enic)
-
- for (i = 0; i < enic->wq_count; i++)
- vnic_wq_free(&enic->wq[i]);
-- for (i = 0; i < enic->rq_count; i++)
-- vnic_rq_free(&enic->rq[i]);
-+ for (i = 0; i < enic_vnic_rq_count(enic); i++)
-+ if (enic->rq[i].in_use)
-+ vnic_rq_free(&enic->rq[i]);
- for (i = 0; i < enic->cq_count; i++)
- vnic_cq_free(&enic->cq[i]);
- vnic_intr_free(&enic->intr);
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-index 463b954..c68bbfb 100644
---- a/drivers/net/enic/enic_rxtx.c
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -326,8 +326,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-
- /* Fill in the rest of the mbuf */
- seg_length = enic_cq_rx_desc_n_bytes(&cqd);
-- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-+
- if (rq->is_sop) {
- first_seg = rxmb;
- first_seg->nb_segs = 1;
-@@ -350,6 +349,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- continue;
- }
-
-+ /* cq rx flags are only valid if eop bit is set */
-+ first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-+ enic_cq_rx_to_pkt_flags(&cqd, first_seg);
-+
- if (unlikely(packet_error)) {
- rte_pktmbuf_free(first_seg);
- rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch b/dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch
deleted file mode 100644
index 334e9be..0000000
--- a/dpdk/dpdk-16.04_patches/0026-net-enic-fix-setting-MAC-address-when-a-port-is-rest.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From e5b60cf1199c51ee51c287988bdda3522fee748c Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Thu, 7 Jul 2016 18:10:21 -0700
-Subject: [PATCH 1/2] net/enic: fix setting MAC address when a port is
- restarted
-
-enic_disable() removed the MAC address when a port was shut down but
-enic_enable() didn't add the MAC address back when the port was
-started again. Move where we set the MAC address for the adapter from
-enic_setup_finish() to a enic_enable() so that port restarting works
-properly.
-
-Fixes: fefed3d1e62c ("enic: new driver")
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
-Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index d4e43b5..5d47c01 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -468,6 +468,8 @@ int enic_enable(struct enic *enic)
- for (index = 0; index < enic->rq_count; index++)
- enic_start_rq(enic, index);
-
-+ vnic_dev_add_addr(enic->vdev, enic->mac_addr);
-+
- vnic_dev_enable_wait(enic->vdev);
-
- /* Register and enable error interrupt */
-@@ -971,8 +973,6 @@ int enic_setup_finish(struct enic *enic)
- return -1;
- }
-
-- vnic_dev_add_addr(enic->vdev, enic->mac_addr);
--
- /* Default conf */
- vnic_dev_packet_filter(enic->vdev,
- 1 /* directed */,
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch b/dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch
deleted file mode 100644
index 1e58db9..0000000
--- a/dpdk/dpdk-16.04_patches/0027-net-enic-fix-removing-old-MAC-address-when-setting-n.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 620b173ae0f77c1a5af2592a27b5db8a6ce88bb6 Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Thu, 7 Jul 2016 18:11:08 -0700
-Subject: [PATCH 2/2] net/enic: fix removing old MAC address when setting new
- one
-
-enic_set_mac_address() meant to remove the old MAC address before
-setting the new one, but accidentally tried removing the new MAC
-address before setting the new MAC address.
-
-Fixes: fefed3d1e62c ("enic: new driver")
-
-Signed-off-by: Nelson Escobar <neescoba@cisco.com>
-Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 5d47c01..d8669cc 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -215,7 +215,7 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
- return;
- }
-
-- err = vnic_dev_del_addr(enic->vdev, mac_addr);
-+ err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
- if (err) {
- dev_err(enic, "del mac addr failed\n");
- return;
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch b/dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch
deleted file mode 100644
index 5cd32cf..0000000
--- a/dpdk/dpdk-16.04_patches/0028-i40e-Add-packet_type-metadata-in-the-i40e-vPMD.patch
+++ /dev/null
@@ -1,1184 +0,0 @@
-From e462b3f07bcbd807f7f3c8e6077e886a92f46ff0 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Thu, 14 Jul 2016 09:59:01 -0700
-Subject: [PATCH 28/29] i40e: Add packet_type metadata in the i40e vPMD
-
-The ptype is decoded from the rx descriptor and stored
-in the packet type field in the mbuf using the same function
-as the non-vector driver.
-
-Signed-off-by: Damjan Marion <damarion@cisco.com>
-Signed-off-by: Jeff Shaw <jeffrey.b.shaw@intel.com>
----
- drivers/net/i40e/i40e_rxtx.c | 539 +------------------------------------
- drivers/net/i40e/i40e_rxtx.h | 564 +++++++++++++++++++++++++++++++++++++++
- drivers/net/i40e/i40e_rxtx_vec.c | 16 ++
- 3 files changed, 582 insertions(+), 537 deletions(-)
-
-diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
-index 4d35d83..511f016 100644
---- a/drivers/net/i40e/i40e_rxtx.c
-+++ b/drivers/net/i40e/i40e_rxtx.c
-@@ -189,542 +189,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
- }
- #endif
-
--/* For each value it means, datasheet of hardware can tell more details
-- *
-- * @note: fix i40e_dev_supported_ptypes_get() if any change here.
-- */
--static inline uint32_t
--i40e_rxd_pkt_type_mapping(uint8_t ptype)
--{
-- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
-- /* L2 types */
-- /* [0] reserved */
-- [1] = RTE_PTYPE_L2_ETHER,
-- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
-- /* [3] - [5] reserved */
-- [6] = RTE_PTYPE_L2_ETHER_LLDP,
-- /* [7] - [10] reserved */
-- [11] = RTE_PTYPE_L2_ETHER_ARP,
-- /* [12] - [21] reserved */
--
-- /* Non tunneled IPv4 */
-- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_L4_FRAG,
-- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_L4_NONFRAG,
-- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_L4_UDP,
-- /* [25] reserved */
-- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_L4_TCP,
-- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_L4_SCTP,
-- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_L4_ICMP,
--
-- /* IPv4 --> IPv4 */
-- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [32] reserved */
-- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> IPv6 */
-- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [39] reserved */
-- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> GRE/Teredo/VXLAN */
-- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
-- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [47] reserved */
-- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
-- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [54] reserved */
-- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
-- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
-- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [62] reserved */
-- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
-- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [69] reserved */
-- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
-- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
-- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [77] reserved */
-- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
-- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [84] reserved */
-- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* Non tunneled IPv6 */
-- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_L4_FRAG,
-- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_L4_NONFRAG,
-- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_L4_UDP,
-- /* [91] reserved */
-- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_L4_TCP,
-- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_L4_SCTP,
-- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_L4_ICMP,
--
-- /* IPv6 --> IPv4 */
-- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [98] reserved */
-- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> IPv6 */
-- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [105] reserved */
-- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_IP |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> GRE/Teredo/VXLAN */
-- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
-- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [113] reserved */
-- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
-- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [120] reserved */
-- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
-- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
-- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [128] reserved */
-- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
-- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [135] reserved */
-- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
-- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
-- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [143] reserved */
-- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
-- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_FRAG,
-- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_NONFRAG,
-- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_UDP,
-- /* [150] reserved */
-- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_TCP,
-- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_SCTP,
-- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_TUNNEL_GRENAT |
-- RTE_PTYPE_INNER_L2_ETHER_VLAN |
-- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-- RTE_PTYPE_INNER_L4_ICMP,
--
-- /* All others reserved */
-- };
--
-- return type_table[ptype];
--}
--
- #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
- #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
- #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
-@@ -2135,7 +1599,8 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
- #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
- dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
- #endif
-- dev->rx_pkt_burst == i40e_recv_scattered_pkts)
-+ dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
-+ dev->rx_pkt_burst == i40e_recv_pkts_vec)
- return ptypes;
- return NULL;
- }
-diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
-index 98179f0..c33782f 100644
---- a/drivers/net/i40e/i40e_rxtx.h
-+++ b/drivers/net/i40e/i40e_rxtx.h
-@@ -255,4 +255,568 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
- struct i40e_tx_queue *txq);
- void i40e_set_tx_function(struct rte_eth_dev *dev);
-
-+/* For each value it means, datasheet of hardware can tell more details
-+ *
-+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
-+ */
-+#define RTE_PTYPE_L2_ETHER_NSH 0x00000005
-+static inline uint32_t
-+i40e_rxd_pkt_type_mapping(uint8_t ptype)
-+{
-+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
-+ /* L2 types */
-+ /* [0] reserved */
-+ [1] = RTE_PTYPE_L2_ETHER,
-+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
-+ /* [3] - [5] reserved */
-+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
-+ /* [7] - [10] reserved */
-+ [11] = RTE_PTYPE_L2_ETHER_ARP,
-+ /* [12] - [21] reserved */
-+
-+ /* Non tunneled IPv4 */
-+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_FRAG,
-+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_NONFRAG,
-+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_UDP,
-+ /* [25] reserved */
-+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_TCP,
-+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_SCTP,
-+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_ICMP,
-+
-+ /* IPv4 --> IPv4 */
-+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [32] reserved */
-+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> IPv6 */
-+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [39] reserved */
-+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN */
-+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
-+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [47] reserved */
-+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
-+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [54] reserved */
-+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
-+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
-+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [62] reserved */
-+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
-+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [69] reserved */
-+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
-+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
-+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [77] reserved */
-+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
-+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [84] reserved */
-+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* Non tunneled IPv6 */
-+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_FRAG,
-+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_NONFRAG,
-+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_UDP,
-+ /* [91] reserved */
-+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_TCP,
-+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_SCTP,
-+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_ICMP,
-+
-+ /* IPv6 --> IPv4 */
-+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [98] reserved */
-+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> IPv6 */
-+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [105] reserved */
-+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_IP |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN */
-+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
-+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [113] reserved */
-+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
-+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [120] reserved */
-+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
-+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
-+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [128] reserved */
-+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
-+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [135] reserved */
-+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
-+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
-+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [143] reserved */
-+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
-+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_FRAG,
-+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_NONFRAG,
-+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_UDP,
-+ /* [150] reserved */
-+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_TCP,
-+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_SCTP,
-+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_TUNNEL_GRENAT |
-+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
-+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_INNER_L4_ICMP,
-+
-+ /* L2 NSH packet type */
-+ [154] = RTE_PTYPE_L2_ETHER_NSH,
-+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_FRAG,
-+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_NONFRAG,
-+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_UDP,
-+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_TCP,
-+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_SCTP,
-+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_ICMP,
-+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_FRAG,
-+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_NONFRAG,
-+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_UDP,
-+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_TCP,
-+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_SCTP,
-+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-+ RTE_PTYPE_L4_ICMP,
-+
-+ /* All others reserved */
-+ };
-+
-+ return type_table[ptype];
-+}
-+
- #endif /* _I40E_RXTX_H_ */
-diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
-index 047aff5..defa581 100644
---- a/drivers/net/i40e/i40e_rxtx_vec.c
-+++ b/drivers/net/i40e/i40e_rxtx_vec.c
-@@ -220,6 +220,21 @@ desc_pktlen_align(__m128i descs[4])
- *((uint16_t *)&descs[3]+7) = vol.e[3];
- }
-
-+static inline void
-+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
-+{
-+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
-+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
-+
-+ ptype0 = _mm_srli_epi64(ptype0, 30);
-+ ptype1 = _mm_srli_epi64(ptype1, 30);
-+
-+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
-+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
-+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
-+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
-+}
-+
- /*
- * Notice:
- * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
-@@ -413,6 +428,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- pkt_mb2);
- _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
- pkt_mb1);
-+ desc_to_ptype_v(descs, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
- var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
- nb_pkts_recd += var;
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch b/dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch
deleted file mode 100644
index 9b0f064..0000000
--- a/dpdk/dpdk-16.04_patches/0029-i40e-Enable-bad-checksum-flags-in-i40e-vPMD.patch
+++ /dev/null
@@ -1,114 +0,0 @@
-From ff4d874754e5e420671cc78d82829cd7317542ad Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Thu, 14 Jul 2016 09:59:02 -0700
-Subject: [PATCH 29/29] i40e: Enable bad checksum flags in i40e vPMD
-
-Decode the checksum flags from the rx descriptor, setting
-the appropriate bit in the mbuf ol_flags field when the flag
-indicates a bad checksum.
-
-Signed-off-by: Damjan Marion <damarion@cisco.com>
-Signed-off-by: Jeff Shaw <jeffrey.b.shaw@intel.com>
----
- drivers/net/i40e/i40e_rxtx_vec.c | 55 +++++++++++++++++++++++-----------------
- 1 file changed, 32 insertions(+), 23 deletions(-)
-
-diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
-index defa581..09ec6e6 100644
---- a/drivers/net/i40e/i40e_rxtx_vec.c
-+++ b/drivers/net/i40e/i40e_rxtx_vec.c
-@@ -138,18 +138,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
- static inline void
- desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
- {
-- __m128i vlan0, vlan1, rss;
-- union {
-- uint16_t e[4];
-- uint64_t dword;
-- } vol;
-+ __m128i vlan0, vlan1, rss, l3_l4e;
-
-- /* mask everything except rss and vlan flags
-- *bit2 is for vlan tag, bits 13:12 for rss
-- */
-- const __m128i rss_vlan_msk = _mm_set_epi16(
-- 0x0000, 0x0000, 0x0000, 0x0000,
-- 0x3004, 0x3004, 0x3004, 0x3004);
-+ /* mask everything except RSS, flow director and VLAN flags
-+ * bit2 is for VLAN tag, bit11 for flow director indication
-+ * bit13:12 for RSS indication.
-+ */
-+ const __m128i rss_vlan_msk = _mm_set_epi32(
-+ 0x1c03004, 0x1c03004, 0x1c03004, 0x1c03004);
-
- /* map rss and vlan type to rss hash and vlan flag */
- const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
-@@ -162,23 +158,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
- 0, 0, 0, 0,
- PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
-
-- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
-- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
-- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
-+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
-+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
-+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-+ PKT_RX_EIP_CKSUM_BAD,
-+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-+ PKT_RX_L4_CKSUM_BAD,
-+ PKT_RX_IP_CKSUM_BAD,
-+ 0);
-+
-+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
-+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
-+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
-
- vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
- vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
-
-- rss = _mm_srli_epi16(vlan1, 12);
-+ rss = _mm_srli_epi32(vlan1, 12);
- rss = _mm_shuffle_epi8(rss_flags, rss);
-
-+ l3_l4e = _mm_srli_epi32(vlan1, 22);
-+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
-+
- vlan0 = _mm_or_si128(vlan0, rss);
-- vol.dword = _mm_cvtsi128_si64(vlan0);
-+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
-
-- rx_pkts[0]->ol_flags = vol.e[0];
-- rx_pkts[1]->ol_flags = vol.e[1];
-- rx_pkts[2]->ol_flags = vol.e[2];
-- rx_pkts[3]->ol_flags = vol.e[3];
-+ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
-+ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
-+ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
-+ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
- }
- #else
- #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
-@@ -770,7 +779,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
- #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
-- rxmode->hw_vlan_extend != 0)
-+ rxmode->hw_vlan_extend != 0 ||
-+ rxmode->hw_ip_checksum != 0)
- return -1;
- #endif
-
-@@ -781,8 +791,7 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
- /* - no csum error report support
- * - no header split support
- */
-- if (rxmode->hw_ip_checksum == 1 ||
-- rxmode->header_split == 1)
-+ if (rxmode->header_split == 1)
- return -1;
-
- return 0;
---
-2.7.4
-
diff --git a/dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch b/dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch
deleted file mode 100644
index 6bb043e..0000000
--- a/dpdk/dpdk-16.04_patches/0030-net-enic-fix-possible-Rx-corruption.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From cee88bcfd49cbf142c13ee7b6d2e77166c80bb48 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 19 Jul 2016 13:41:14 -0700
-Subject: [PATCH] net/enic: fix possible Rx corruption
-
-Initialize the mbuf data offset to RTE_PKTMBUF_HEADROOM as the
-enic takes ownership of them. If allocated mbufs had some offset
-other than RTE_PKTMBUF_HEADROOM, the application would read mbuf
-data starting at the wrong place and misinterpret the packet.
-
-Fixes: 856d7ba7ed22 ("net/enic: support scattered Rx")
-
-Reviewed-by: Nelson Escobar <neescoba@cisco.com>
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 1 +
- drivers/net/enic/enic_rxtx.c | 2 +-
- 2 files changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 8cedebf..774fcb1 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -328,6 +328,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- return -ENOMEM;
- }
-
-+ mb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
- rq_enet_desc_enc(rqd, dma_addr,
- (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-index c68bbfb..60f5062 100644
---- a/drivers/net/enic/enic_rxtx.c
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -317,7 +317,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd);
-
- /* Push descriptor for newly allocated mbuf */
--
-+ nmb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
- rq_enet_desc_enc(rqd_ptr, dma_addr,
- (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
---
-2.7.0
-
diff --git a/dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch b/dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch
deleted file mode 100644
index 88586eb..0000000
--- a/dpdk/dpdk-16.04_patches/0031-enic-fix-bug-introduced-with-scatter-rx.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-commit f0ca43396ebeb85228f9737a52caa68fc06aa9ee
-Author: Nelson Escobar <neescoba@cisco.com>
-Date: Mon Jun 13 17:24:41 2016 -0700
-
- enic: fix bug introduced with scatter rx
-
- We did not properly set the rq pointers we pass up to dpdk when
- rx scatter was introduced. This resulted in segfaults whenever
- more than one rq was being used.
-
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index e5b84e1..66fddb1 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -279,7 +279,8 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- return -EINVAL;
- }
-
-- eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
-+ eth_dev->data->rx_queues[queue_idx] =
-+ (void *)&enic->rq[enic_sop_rq(enic, queue_idx)];
-
- ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
- if (ret) {
diff --git a/dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch b/dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch
deleted file mode 100644
index 09bca06..0000000
--- a/dpdk/dpdk-2.1.0_patches/0001-kni-fix-igb-build-with-kernel-4.2.patch
+++ /dev/null
@@ -1,78 +0,0 @@
-From 2de9d1629312a32f82c43167467640bc793805a6 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Mon, 12 Oct 2015 14:23:30 +0200
-Subject: [PATCH 1/9] kni: fix igb build with kernel 4.2
-
-Kernel 4.2 has introduced two new parameters in ndo_bridge_getlink,
-which breaks DPDK compilation.
-
-Linux: 7d4f8d87 ("switchdev: ad VLAN support for ports bridge-getlink")
-
-This patch adds the necessary checks to fix it.
-
-Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
----
- lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c | 13 +++++++++----
- lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h | 7 ++++++-
- 2 files changed, 15 insertions(+), 5 deletions(-)
-
-diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
-index eed8df6..b330b20 100644
---- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
-+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
-@@ -2250,14 +2250,14 @@ static int igb_ndo_bridge_setlink(struct net_device *dev,
- }
-
- #ifdef HAVE_BRIDGE_FILTER
--#ifdef HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK
-+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
- static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask,
- int nlflags)
- #else
- static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask)
--#endif /* HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK */
-+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */
- #else
- static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
-@@ -2275,11 +2275,16 @@ static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- mode = BRIDGE_MODE_VEPA;
-
- #ifdef HAVE_NDO_FDB_ADD_VID
--#ifdef HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK
-+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
-+#ifdef HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL
-+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
-+ nlflags, filter_mask, NULL);
-+#else
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags);
-+#endif /* HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL */
- #else
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
--#endif /* HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK */
-+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */
- #else
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
- #endif /* HAVE_NDO_FDB_ADD_VID */
-diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
-index 852f80f..5f45b8b 100644
---- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
-+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
-@@ -3899,6 +3899,11 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
-
- #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) )
- /* ndo_bridge_getlink adds new nlflags parameter */
--#define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK
-+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
- #endif /* >= 4.1.0 */
-+
-+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) )
-+/* ndo_bridge_getlink adds new filter_mask and vlan_fill parameters */
-+#define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL
-+#endif /* >= 4.2.0 */
- #endif /* _KCOMPAT_H_ */
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch
deleted file mode 100644
index 2ce5004..0000000
--- a/dpdk/dpdk-2.1.0_patches/0002-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch
+++ /dev/null
@@ -1,107 +0,0 @@
-From 3609c4fb4d07d4285e96187598f54cb21e9e9b08 Mon Sep 17 00:00:00 2001
-From: Shesha Sreenivasamurthy <shesha@cisco.com>
-Date: Wed, 2 Sep 2015 08:57:24 -0700
-Subject: [PATCH 2/9] mbuf: rearrange rte_mbuf metadata to suit vpp
-
-Offload structure in the second cache line, next pointer in the
-first cache line. Issue reported to Intel.
----
- .../linuxapp/eal/include/exec-env/rte_kni_common.h | 10 +++++++--
- lib/librte_mbuf/rte_mbuf.h | 25 ++++++++++++++--------
- 2 files changed, 24 insertions(+), 11 deletions(-)
-
-diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-index e9f38bd..d327f71 100644
---- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-@@ -111,6 +111,10 @@ struct rte_kni_fifo {
- * The kernel image of the rte_mbuf struct, with only the relevant fields.
- * Padding is necessary to assure the offsets of these fields
- */
-+/*
-+ * offload in the second cache line, next in the first. Better for vpp
-+ * at least as of right now.
-+ */
- struct rte_kni_mbuf {
- void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
- char pad0[10];
-@@ -121,16 +125,18 @@ struct rte_kni_mbuf {
- char pad2[4];
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
-+ char pad3[2];
- #else
- char pad2[2];
- uint16_t data_len; /**< Amount of data in segment buffer. */
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
-+ char pad3[4];
- #endif
-+ void *next;
-
- /* fields on second cache line */
-- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
-+ char pad4[12] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
- void *pool;
-- void *next;
- };
-
- /*
-diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
-index 8c2db1b..61cbbd7 100644
---- a/lib/librte_mbuf/rte_mbuf.h
-+++ b/lib/librte_mbuf/rte_mbuf.h
-@@ -743,6 +743,12 @@ typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
- /**
- * The generic rte_mbuf, containing a packet mbuf.
- */
-+/*
-+ * offload in the second cache line, next in the first. Better for vpp
-+ * at least as of right now.
-+ * If you change this structure, you must change the user-mode
-+ * version in rte_mbuf.h
-+ */
- struct rte_mbuf {
- MARKER cacheline0;
-
-@@ -809,6 +815,16 @@ struct rte_mbuf {
- uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
- #endif /* RTE_NEXT_ABI */
-+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
-+
-+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
-+#ifdef RTE_NEXT_ABI
-+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
-+#endif /* RTE_NEXT_ABI */
-+
-+ /* second cache line - fields only used in slow path or on TX */
-+ MARKER cacheline1 __rte_cache_aligned;
-+
- union {
- uint32_t rss; /**< RSS hash result if RSS enabled */
- struct {
-@@ -828,21 +844,12 @@ struct rte_mbuf {
- uint32_t usr; /**< User defined tags. See rte_distributor_process() */
- } hash; /**< hash information */
-
-- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
--#ifdef RTE_NEXT_ABI
-- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
--#endif /* RTE_NEXT_ABI */
--
-- /* second cache line - fields only used in slow path or on TX */
-- MARKER cacheline1 __rte_cache_aligned;
--
- union {
- void *userdata; /**< Can be used for external metadata */
- uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
- };
-
- struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
-- struct rte_mbuf *next; /**< Next segment of scattered packet. */
-
- /* fields to support TX offloads */
- union {
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch
deleted file mode 100644
index d1ea27a..0000000
--- a/dpdk/dpdk-2.1.0_patches/0003-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 699252f0b685db4cd298e90f0e1d64e4792356f2 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 21 Oct 2015 14:46:12 +0200
-Subject: [PATCH 3/9] e1000: Set VLAN Rx Offload tag correctly
-
----
- drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++
- lib/librte_ether/rte_ether.h | 3 +++
- 2 files changed, 33 insertions(+)
-
-diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
-index b13930e..7fe76c8 100644
---- a/drivers/net/e1000/igb_rxtx.c
-+++ b/drivers/net/e1000/igb_rxtx.c
-@@ -885,6 +885,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-+ {
-+ /*
-+ * Check packet for VLAN ethernet types and set
-+ * RX Offload flag PKT_RX_VLAN_PKT accordingly.
-+ */
-+ struct ether_hdr *eth_hdr =
-+ rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-+
-+ if ((eth_type == ETHER_TYPE_VLAN) ||
-+ (eth_type == ETHER_TYPE_VLAN_AD) ||
-+ (eth_type == ETHER_TYPE_VLAN_9100) ||
-+ (eth_type == ETHER_TYPE_VLAN_9200))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ }
- rxm->ol_flags = pkt_flags;
- #ifdef RTE_NEXT_ABI
- rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
-@@ -1123,6 +1138,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-+ {
-+ /*
-+ * Check packet for VLAN ethernet types and set
-+ * RX Offload flag PKT_RX_VLAN_PKT accordingly.
-+ */
-+ struct ether_hdr *eth_hdr =
-+ rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-+
-+ if ((eth_type == ETHER_TYPE_VLAN) ||
-+ (eth_type == ETHER_TYPE_VLAN_AD) ||
-+ (eth_type == ETHER_TYPE_VLAN_9100) ||
-+ (eth_type == ETHER_TYPE_VLAN_9200))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ }
- first_seg->ol_flags = pkt_flags;
- #ifdef RTE_NEXT_ABI
- first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
-diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
-index 07c17d7..fd646ec 100644
---- a/lib/librte_ether/rte_ether.h
-+++ b/lib/librte_ether/rte_ether.h
-@@ -332,6 +332,9 @@ struct vxlan_hdr {
- #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
- #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
- #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
-+#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */
-+#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */
-+#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */
-
- #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
- /**< VXLAN tunnel header length. */
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch
deleted file mode 100644
index 7524117..0000000
--- a/dpdk/dpdk-2.1.0_patches/0004-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From 67d1c25af7fa16df40a8305405066ba6a40ac659 Mon Sep 17 00:00:00 2001
-From: Shesha Sreenivasamurthy <shesha@cisco.com>
-Date: Wed, 2 Sep 2015 08:46:39 -0700
-Subject: [PATCH 4/9] ixgbe: Wait a bit longer for autonegotiation to leave
- state 0
-
----
- drivers/net/ixgbe/base/ixgbe_82599.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
-index f0deb59..ae66380 100644
---- a/drivers/net/ixgbe/base/ixgbe_82599.c
-+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
-@@ -2442,7 +2442,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
- /* Wait for AN to leave state 0 */
-- for (i = 0; i < 10; i++) {
-+ for (i = 0; i < 50; i++) {
- msec_delay(4);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch b/dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch
deleted file mode 100644
index 245b43c..0000000
--- a/dpdk/dpdk-2.1.0_patches/0005-eal-Temporarily-turn-off-unthrottled-RTE_LOG.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 9e28214eb784b9f68af6e0503f8cefe861f13440 Mon Sep 17 00:00:00 2001
-From: Shesha Sreenivasamurthy <shesha@cisco.com>
-Date: Wed, 2 Sep 2015 08:55:43 -0700
-Subject: [PATCH 5/9] eal: Temporarily turn off unthrottled RTE_LOG(...)
-
-Otherwise, /var/log/syslog eventually fills the disk. The error
-condition seems only to affect ESXi VM's. It'd be worth suggesting log
-throttling to the DPDK community. Much better to avoid making syslog
-(...) calls in the first place.
----
- lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-index 3f87875..29a3539 100644
---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-@@ -709,7 +709,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
- * for epoll_wait.
- */
- bytes_read = read(events[n].data.fd, &buf, bytes_read);
-- if (bytes_read < 0) {
-+ if (0 && bytes_read < 0) {
- if (errno == EINTR || errno == EWOULDBLOCK)
- continue;
-
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch b/dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch
deleted file mode 100644
index 9306f11..0000000
--- a/dpdk/dpdk-2.1.0_patches/0006-virtio-Cleanup-virtio-pmd-debug-log-output-reset-off.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From 21a9bf50270f71ebda5acb5fc233b8279cec56a7 Mon Sep 17 00:00:00 2001
-From: Shesha Sreenivasamurthy <shesha@cisco.com>
-Date: Wed, 2 Sep 2015 08:48:09 -0700
-Subject: [PATCH 6/9] virtio: Cleanup virtio pmd debug log output, reset
- offload field
-
----
- drivers/net/virtio/virtio_ethdev.c | 10 +++++-----
- drivers/net/virtio/virtio_rxtx.c | 4 +++-
- 2 files changed, 8 insertions(+), 6 deletions(-)
-
-diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
-index 465d3cd..6a686fe 100644
---- a/drivers/net/virtio/virtio_ethdev.c
-+++ b/drivers/net/virtio/virtio_ethdev.c
-@@ -1521,24 +1521,24 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
- link.link_speed = SPEED_10G;
-
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
-- PMD_INIT_LOG(DEBUG, "Get link status from hw");
- vtpci_read_dev_config(hw,
- offsetof(struct virtio_net_config, status),
- &status, sizeof(status));
- if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
- link.link_status = 0;
-- PMD_INIT_LOG(DEBUG, "Port %d is down",
-- dev->data->port_id);
- } else {
- link.link_status = 1;
-- PMD_INIT_LOG(DEBUG, "Port %d is up",
-- dev->data->port_id);
- }
- } else {
- link.link_status = 1; /* Link up */
- }
- virtio_dev_atomic_write_link_status(dev, &link);
-
-+ /* This message is far too noisy for normal use */
-+ if (0)
-+ PMD_INIT_LOG(DEBUG, "Port %d is %s\n", dev->data->port_id,
-+ link.link_status ? "up" : "down");
-+
- return (old.link_status == link.link_status) ? -1 : 0;
- }
-
-diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
-index c5b53bb..9f0b759 100644
---- a/drivers/net/virtio/virtio_rxtx.c
-+++ b/drivers/net/virtio/virtio_rxtx.c
-@@ -536,6 +536,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->next = NULL;
- rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
- rxm->data_len = (uint16_t)(len[i] - hdr_size);
-+ rxm->ol_flags = 0;
-
- if (hw->vlan_strip)
- rte_vlan_strip(rxm);
-@@ -651,6 +652,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
- rxm->next = NULL;
- rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
- rxm->data_len = (uint16_t)(len[0] - hdr_size);
-+ rxm->ol_flags = 0;
-
- rxm->port = rxvq->port_id;
- rx_pkts[nb_rx] = rxm;
-@@ -752,7 +754,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
- if (unlikely(nb_pkts < 1))
- return nb_pkts;
-
-- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
-+ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(txvq);
-
- virtio_rmb();
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch b/dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch
deleted file mode 100644
index 31e828c..0000000
--- a/dpdk/dpdk-2.1.0_patches/0008-enic-fix-dma-addr-of-outgoing-packets.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From cbb6efb18835860f06a9e02bf63a9fbc2e19d192 Mon Sep 17 00:00:00 2001
-From: Yoann Desmouceaux <ydesmouc@cisco.com>
-Date: Fri, 19 Feb 2016 10:50:51 +0100
-Subject: [PATCH] enic: fix dma addr of outgoing packets
-
-The enic PMD driver send function uses a constant offset instead of relying on the data_off in the mbuf to find the start of the packet.
-
-Signed-off-by: Yoann Desmouceaux <ydesmouc@cisco.com>
----
- drivers/net/enic/enic_main.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index f47e96c..7bad59c 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -163,7 +163,7 @@ int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- uint8_t cq_entry = eop;
- uint8_t vlan_tag_insert = 0;
- uint64_t bus_addr = (dma_addr_t)
-- (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);
-+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
-
- if (sop) {
- if (ol_flags & PKT_TX_VLAN_PKT)
---
-2.1.4
-
diff --git a/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch b/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch
deleted file mode 100644
index 6ed2fc6..0000000
--- a/dpdk/dpdk-2.2.0_patches/0001-e1000-Set-VLAN-Rx-Offload-tag-correctly.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 4a599535445d16a1c55fac0bd71edc443c6c23f2 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 03:21:21 +0100
-Subject: [PATCH 1/4] e1000: Set VLAN Rx Offload tag correctly
-
----
- drivers/net/e1000/igb_rxtx.c | 30 ++++++++++++++++++++++++++++++
- lib/librte_ether/rte_ether.h | 3 +++
- 2 files changed, 33 insertions(+)
-
-diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
-index 996e7da..cbe80a1 100644
---- a/drivers/net/e1000/igb_rxtx.c
-+++ b/drivers/net/e1000/igb_rxtx.c
-@@ -910,6 +910,21 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-+ {
-+ /*
-+ * Check packet for VLAN ethernet types and set
-+ * RX Offload flag PKT_RX_VLAN_PKT accordingly.
-+ */
-+ struct ether_hdr *eth_hdr =
-+ rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-+
-+ if ((eth_type == ETHER_TYPE_VLAN) ||
-+ (eth_type == ETHER_TYPE_VLAN_AD) ||
-+ (eth_type == ETHER_TYPE_VLAN_9100) ||
-+ (eth_type == ETHER_TYPE_VLAN_9200))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ }
- rxm->ol_flags = pkt_flags;
- rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
- lo_dword.hs_rss.pkt_info);
-@@ -1146,6 +1161,21 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-+ {
-+ /*
-+ * Check packet for VLAN ethernet types and set
-+ * RX Offload flag PKT_RX_VLAN_PKT accordingly.
-+ */
-+ struct ether_hdr *eth_hdr =
-+ rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-+ u16 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-+
-+ if ((eth_type == ETHER_TYPE_VLAN) ||
-+ (eth_type == ETHER_TYPE_VLAN_AD) ||
-+ (eth_type == ETHER_TYPE_VLAN_9100) ||
-+ (eth_type == ETHER_TYPE_VLAN_9200))
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ }
- first_seg->ol_flags = pkt_flags;
- first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
- lower.lo_dword.hs_rss.pkt_info);
-diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
-index 07c17d7..fd646ec 100644
---- a/lib/librte_ether/rte_ether.h
-+++ b/lib/librte_ether/rte_ether.h
-@@ -332,6 +332,9 @@ struct vxlan_hdr {
- #define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
- #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
- #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
-+#define ETHER_TYPE_VLAN_AD 0x88a8 /**< IEEE 802.1AD VLAN tagging. */
-+#define ETHER_TYPE_VLAN_9100 0x9100 /**< VLAN 0x9100 tagging. */
-+#define ETHER_TYPE_VLAN_9200 0x9200 /**< VLAN 0x9200 tagging. */
-
- #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
- /**< VXLAN tunnel header length. */
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch b/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch
deleted file mode 100644
index b7a5029..0000000
--- a/dpdk/dpdk-2.2.0_patches/0002-ixgbe-Wait-a-bit-longer-for-autonegotiation-to-leave.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 009cd67e5b1ed0592de0fb6ae2fa662ffc172dde Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 03:22:11 +0100
-Subject: [PATCH 2/4] ixgbe: Wait a bit longer for autonegotiation to leave
-
----
- drivers/net/ixgbe/base/ixgbe_82599.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
-index f0deb59..ae66380 100644
---- a/drivers/net/ixgbe/base/ixgbe_82599.c
-+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
-@@ -2442,7 +2442,7 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
- /* Wait for AN to leave state 0 */
-- for (i = 0; i < 10; i++) {
-+ for (i = 0; i < 50; i++) {
- msec_delay(4);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch b/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch
deleted file mode 100644
index 874f666..0000000
--- a/dpdk/dpdk-2.2.0_patches/0003-virtio-Cleanup-virtio-pmd-debug-log-output-reset.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From e2592eb622c33791d8ae51153360bd8249bdd056 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 03:29:22 +0100
-Subject: [PATCH 3/4] virtio: Cleanup virtio pmd debug log output, reset
-
----
- drivers/net/virtio/virtio_ethdev.c | 10 +++++-----
- drivers/net/virtio/virtio_rxtx.c | 4 +++-
- 2 files changed, 8 insertions(+), 6 deletions(-)
-
-diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
-index d928339..2fa1587 100644
---- a/drivers/net/virtio/virtio_ethdev.c
-+++ b/drivers/net/virtio/virtio_ethdev.c
-@@ -1635,24 +1635,24 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
- link.link_speed = SPEED_10G;
-
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
-- PMD_INIT_LOG(DEBUG, "Get link status from hw");
- vtpci_read_dev_config(hw,
- offsetof(struct virtio_net_config, status),
- &status, sizeof(status));
- if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
- link.link_status = 0;
-- PMD_INIT_LOG(DEBUG, "Port %d is down",
-- dev->data->port_id);
- } else {
- link.link_status = 1;
-- PMD_INIT_LOG(DEBUG, "Port %d is up",
-- dev->data->port_id);
- }
- } else {
- link.link_status = 1; /* Link up */
- }
- virtio_dev_atomic_write_link_status(dev, &link);
-
-+ /* This message is far too noisy for normal use */
-+ if (0)
-+ PMD_INIT_LOG(DEBUG, "Port %d is %s\n", dev->data->port_id,
-+ link.link_status ? "up" : "down");
-+
- return (old.link_status == link.link_status) ? -1 : 0;
- }
-
-diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
-index 74b39ef..2512bce 100644
---- a/drivers/net/virtio/virtio_rxtx.c
-+++ b/drivers/net/virtio/virtio_rxtx.c
-@@ -618,6 +618,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->next = NULL;
- rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
- rxm->data_len = (uint16_t)(len[i] - hdr_size);
-+ rxm->ol_flags = 0;
-
- if (hw->vlan_strip)
- rte_vlan_strip(rxm);
-@@ -737,6 +738,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
- rxm->vlan_tci = 0;
- rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
- rxm->data_len = (uint16_t)(len[0] - hdr_size);
-+ rxm->ol_flags = 0;
-
- rxm->port = rxvq->port_id;
- rx_pkts[nb_rx] = rxm;
-@@ -838,7 +840,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
- if (unlikely(nb_pkts < 1))
- return nb_pkts;
-
-- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
-+ PMD_TX_LOG(DEBUG, "%d packets to xmit\n", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(txvq);
-
- virtio_rmb();
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch b/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch
deleted file mode 100644
index bee64df..0000000
--- a/dpdk/dpdk-2.2.0_patches/0004-mbuf-rearrange-rte_mbuf-metadata-to-suit-vpp.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From b8b575a3398c480f6e02525a0933e5e057139b78 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 04:25:23 +0100
-Subject: [PATCH 4/4] mbuf: rearrange rte_mbuf metadata to suit vpp
-
----
- .../linuxapp/eal/include/exec-env/rte_kni_common.h | 5 +++--
- lib/librte_mbuf/rte_mbuf.h | 20 ++++++++++++--------
- 2 files changed, 15 insertions(+), 10 deletions(-)
-
-diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-index bd1cc09..a68a949 100644
---- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
-@@ -120,11 +120,12 @@ struct rte_kni_mbuf {
- char pad2[4];
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
-+ char pad3[8];
-+ void *next;
-
- /* fields on second cache line */
-- char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
-+ char pad4[16] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
- void *pool;
-- void *next;
- };
-
- /*
-diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
-index f234ac9..a1b4405 100644
---- a/lib/librte_mbuf/rte_mbuf.h
-+++ b/lib/librte_mbuf/rte_mbuf.h
-@@ -734,6 +734,12 @@ struct rte_mbuf_offload;
- /**
- * The generic rte_mbuf, containing a packet mbuf.
- */
-+/*
-+ * offload in the second cache line, next in the first. Better for vpp
-+ * at least as of right now.
-+ * If you change this structure, you must change the user-mode
-+ * version in rte_mbuf.h
-+ */
- struct rte_mbuf {
- MARKER cacheline0;
-
-@@ -786,6 +792,12 @@ struct rte_mbuf {
- uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
- uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
-+ uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
-+ uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
-+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
-+
-+ /* second cache line - fields only used in slow path or on TX */
-+ MARKER cacheline1 __rte_cache_aligned;
-
- union {
- uint32_t rss; /**< RSS hash result if RSS enabled */
-@@ -809,20 +821,12 @@ struct rte_mbuf {
- uint32_t usr; /**< User defined tags. See rte_distributor_process() */
- } hash; /**< hash information */
-
-- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
--
-- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
--
-- /* second cache line - fields only used in slow path or on TX */
-- MARKER cacheline1 __rte_cache_aligned;
--
- union {
- void *userdata; /**< Can be used for external metadata */
- uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
- };
-
- struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
-- struct rte_mbuf *next; /**< Next segment of scattered packet. */
-
- /* fields to support TX offloads */
- union {
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch b/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch
deleted file mode 100644
index c6211cd..0000000
--- a/dpdk/dpdk-2.2.0_patches/0005-missing-include.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-From a8767269f3ee545141e83e5a5f62ff24c29248a9 Mon Sep 17 00:00:00 2001
-From: Damjan Marion <damarion@cisco.com>
-Date: Wed, 16 Dec 2015 04:43:40 +0100
-Subject: [PATCH 5/5] missing include
-
----
- lib/librte_eal/linuxapp/eal/eal_timer.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c
-index 9ceff33..d0792be 100644
---- a/lib/librte_eal/linuxapp/eal/eal_timer.c
-+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c
-@@ -51,6 +51,7 @@
- #include <rte_memzone.h>
- #include <rte_eal.h>
- #include <rte_debug.h>
-+#include <rte_lcore.h>
-
- #include "eal_private.h"
- #include "eal_internal_cfg.h"
---
-2.5.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch b/dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch
deleted file mode 100644
index f15e4aa..0000000
--- a/dpdk/dpdk-2.2.0_patches/0006-Fix-a-crash-in-igb_uio-driver-when-the-device-is-rem.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 68d23609ec0c42773043383ff2939a30830e8069 Mon Sep 17 00:00:00 2001
-From: Bud Grise <griseb@cisco.com>
-Date: Tue, 2 Feb 2016 12:45:44 -0800
-Subject: [PATCH 6/8] Fix a crash in igb_uio driver when the device is removed.
-
-This crash happens because the device still has MSI configured,
-the fix is to free the IRQ.
-
-Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
----
- lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-index f5617d2..23a5cfa 100644
---- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
-@@ -571,6 +571,12 @@ igbuio_pci_remove(struct pci_dev *dev)
- udev = info->priv;
-
- sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
-+
-+ if (info->irq && (info->irq != UIO_IRQ_CUSTOM)){
-+ free_irq(info->irq, info->uio_dev);
-+ info->irq = UIO_IRQ_NONE;
-+ }
-+
- uio_unregister_device(info);
- igbuio_pci_release_iomem(info);
- if (udev->mode == RTE_INTR_MODE_MSIX)
---
-2.2.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch b/dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch
deleted file mode 100644
index 4a1494e..0000000
--- a/dpdk/dpdk-2.2.0_patches/0007-Allow-applications-to-override-rte_delay_us.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 5d03f3ca8ddc7313de59e54d83912b1f3c049170 Mon Sep 17 00:00:00 2001
-From: "Todd Foggoa (tfoggoa)" <tfoggoa@cisco.com>
-Date: Wed, 3 Feb 2016 08:35:27 -0800
-Subject: [PATCH 7/8] Allow applications to override rte_delay_us()
-
-Some applications may wish to define their own implentation of
-usec delay other than the existing blocking one. The default
-behavior remains unchanged.
-
-Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
----
- lib/librte_eal/common/eal_common_timer.c | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
-diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c
-index 72371b8..5189fa5 100644
---- a/lib/librte_eal/common/eal_common_timer.c
-+++ b/lib/librte_eal/common/eal_common_timer.c
-@@ -47,9 +47,21 @@
- /* The frequency of the RDTSC timer resolution */
- static uint64_t eal_tsc_resolution_hz;
-
-+/* Allow an override of the rte_delay_us function */
-+int rte_delay_us_override (unsigned us) __attribute__((weak));
-+
-+int
-+rte_delay_us_override(__attribute__((unused)) unsigned us)
-+{
-+ return 0;
-+}
-+
- void
- rte_delay_us(unsigned us)
- {
-+ if (rte_delay_us_override(us))
-+ return;
-+
- const uint64_t start = rte_get_timer_cycles();
- const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
- while ((rte_get_timer_cycles() - start) < ticks)
---
-2.2.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch b/dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch
deleted file mode 100644
index 04c1fb3..0000000
--- a/dpdk/dpdk-2.2.0_patches/0008-Add-missing-init-of-packet_type-field.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From 666ceb1d0c11e2ca69baacc272b7d5690d3f11a3 Mon Sep 17 00:00:00 2001
-From: Bud Grise <griseb@cisco.com>
-Date: Mon, 1 Feb 2016 14:28:01 -0500
-Subject: [PATCH 8/8] Add missing init of packet_type field.
-
-This can cause packets to be mishandled in systems with more than
-one type of driver in use.
-
-Signed-off-by: Todd Foggoa (tfoggoa) <tfoggoa@cisco.com>
----
- drivers/net/e1000/em_rxtx.c | 2 ++
- drivers/net/virtio/virtio_rxtx.c | 2 ++
- drivers/net/vmxnet3/vmxnet3_rxtx.c | 1 +
- 3 files changed, 5 insertions(+)
-
-diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
-index d8fb252..8796c8a 100644
---- a/drivers/net/e1000/em_rxtx.c
-+++ b/drivers/net/e1000/em_rxtx.c
-@@ -799,6 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
- rxm->ol_flags = rxm->ol_flags |
- rx_desc_error_to_pkt_flags(rxd.errors);
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
-@@ -1025,6 +1026,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
- first_seg->ol_flags = first_seg->ol_flags |
- rx_desc_error_to_pkt_flags(rxd.errors);
-+ first_seg->packet_type = RTE_PTYPE_UNKNOWN;
-
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
-diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
-index 2512bce..a74c816 100644
---- a/drivers/net/virtio/virtio_rxtx.c
-+++ b/drivers/net/virtio/virtio_rxtx.c
-@@ -619,6 +619,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
- rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->ol_flags = 0;
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- if (hw->vlan_strip)
- rte_vlan_strip(rxm);
-@@ -739,6 +740,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
- rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
- rxm->data_len = (uint16_t)(len[0] - hdr_size);
- rxm->ol_flags = 0;
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- rxm->port = rxvq->port_id;
- rx_pkts[nb_rx] = rxm;
-diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
-index 4de5d89..c76b230 100644
---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
-+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
-@@ -640,6 +640,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxm->ol_flags = 0;
- rxm->vlan_tci = 0;
-+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
- vmxnet3_rx_offload(rcd, rxm);
-
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch b/dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch
deleted file mode 100644
index 62132b1..0000000
--- a/dpdk/dpdk-2.2.0_patches/0009-Temporarily-disable-unthrottled-log-message.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From da141a8f16224a97a1a4093a3293f9bb6b15fa90 Mon Sep 17 00:00:00 2001
-From: Dave Barach <dave@barachs.net>
-Date: Tue, 9 Feb 2016 10:22:39 -0500
-Subject: [PATCH] Temporarily disable unthrottled log message.
-
-Signed-off-by: Dave Barach <dave@barachs.net>
----
- lib/librte_eal/linuxapp/eal/eal_interrupts.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-index 06b26a9..8d918a4 100644
---- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
-@@ -711,6 +711,8 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
- if (errno == EINTR || errno == EWOULDBLOCK)
- continue;
-
-+ /* $$$ disable to avoid filling /var/log */
-+ if (0)
- RTE_LOG(ERR, EAL, "Error reading from file "
- "descriptor %d: %s\n",
- events[n].data.fd,
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch b/dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch
deleted file mode 100644
index a524007..0000000
--- a/dpdk/dpdk-2.2.0_patches/0010-enic-fix-dma-addr-of-outgoing-packets.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From c68ded695938b43682d4bd7dfaf40e5b267dfe3b Mon Sep 17 00:00:00 2001
-From: Yoann Desmouceaux <ydesmouc@cisco.com>
-Date: Fri, 19 Feb 2016 12:49:29 +0100
-Subject: [PATCH] enic: fix dma addr of outgoing packets
-
-The enic PMD driver send function uses a constant offset instead of relying on the data_off in the mbuf to find the start of the packet.
-
-Signed-off-by: Yoann Desmouceaux <ydesmouc@cisco.com>
----
- drivers/net/enic/enic_main.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 07a9810..f818c32 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -166,7 +166,7 @@ void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- uint16_t mss = 0;
- uint8_t vlan_tag_insert = 0;
- uint64_t bus_addr = (dma_addr_t)
-- (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);
-+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
-
- if (sop) {
- if (ol_flags & PKT_TX_VLAN_PKT)
---
-2.1.4
-
diff --git a/dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch b/dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch
deleted file mode 100644
index 2aa4840..0000000
--- a/dpdk/dpdk-2.2.0_patches/0011-enic-improve-Rx-performance.patch
+++ /dev/null
@@ -1,1349 +0,0 @@
-From 057358356e7d05f07ab2df37c12b1cce37a3cca9 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Fri, 4 Mar 2016 13:09:00 -0800
-Subject: [PATCH 11/22] enic: improve Rx performance
-
- This is a wholesale replacement of the Enic PMD receive path in order
- to improve performance and code clarity. The changes are:
- - Simplify and reduce code path length of receive function.
- - Put most of the fast-path receive functions in one file.
- - Reduce the number of posted_index updates (pay attention to
- rx_free_thresh)
- - Remove the unneeded container structure around the RQ mbuf ring
- - Prefetch next Mbuf and descriptors while processing the current one
- - Use a lookup table for converting CQ flags to mbuf flags.
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/Makefile | 1 +
- drivers/net/enic/base/vnic_rq.c | 99 ++---------
- drivers/net/enic/base/vnic_rq.h | 147 +---------------
- drivers/net/enic/enic.h | 16 +-
- drivers/net/enic/enic_ethdev.c | 27 ++-
- drivers/net/enic/enic_main.c | 321 ++++++++++------------------------
- drivers/net/enic/enic_res.h | 16 +-
- drivers/net/enic/enic_rx.c | 370 ++++++++++++++++++++++++++++++++++++++++
- 8 files changed, 511 insertions(+), 486 deletions(-)
- create mode 100644 drivers/net/enic/enic_rx.c
-
-diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
-index f0ee093..f316274 100644
---- a/drivers/net/enic/Makefile
-+++ b/drivers/net/enic/Makefile
-@@ -53,6 +53,7 @@ VPATH += $(SRCDIR)/src
- #
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
-+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
-diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
-index 1441604..cb62c5e 100644
---- a/drivers/net/enic/base/vnic_rq.c
-+++ b/drivers/net/enic/base/vnic_rq.c
-@@ -35,77 +35,21 @@
- #include "vnic_dev.h"
- #include "vnic_rq.h"
-
--static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
--{
-- struct vnic_rq_buf *buf;
-- unsigned int i, j, count = rq->ring.desc_count;
-- unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
--
-- for (i = 0; i < blks; i++) {
-- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
-- if (!rq->bufs[i])
-- return -ENOMEM;
-- }
--
-- for (i = 0; i < blks; i++) {
-- buf = rq->bufs[i];
-- for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
-- buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
-- buf->desc = (u8 *)rq->ring.descs +
-- rq->ring.desc_size * buf->index;
-- if (buf->index + 1 == count) {
-- buf->next = rq->bufs[0];
-- break;
-- } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
-- buf->next = rq->bufs[i + 1];
-- } else {
-- buf->next = buf + 1;
-- buf++;
-- }
-- }
-- }
--
-- rq->to_use = rq->to_clean = rq->bufs[0];
--
-- return 0;
--}
--
--int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
-- unsigned int desc_size)
--{
-- int mem_size = 0;
--
-- mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size);
--
-- mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) *
-- VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count);
--
-- return mem_size;
--}
--
- void vnic_rq_free(struct vnic_rq *rq)
- {
- struct vnic_dev *vdev;
-- unsigned int i;
-
- vdev = rq->vdev;
-
- vnic_dev_free_desc_ring(vdev, &rq->ring);
-
-- for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
-- if (rq->bufs[i]) {
-- kfree(rq->bufs[i]);
-- rq->bufs[i] = NULL;
-- }
-- }
--
- rq->ctrl = NULL;
- }
-
- int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
- unsigned int desc_count, unsigned int desc_size)
- {
-- int err;
-+ int rc;
- char res_name[NAME_MAX];
- static int instance;
-
-@@ -121,18 +65,9 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
- vnic_rq_disable(rq);
-
- snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
-- err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
-+ rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
- rq->socket_id, res_name);
-- if (err)
-- return err;
--
-- err = vnic_rq_alloc_bufs(rq);
-- if (err) {
-- vnic_rq_free(rq);
-- return err;
-- }
--
-- return 0;
-+ return rc;
- }
-
- void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
-@@ -154,9 +89,6 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
- iowrite32(fetch_index, &rq->ctrl->fetch_index);
- iowrite32(posted_index, &rq->ctrl->posted_index);
-
-- rq->to_use = rq->to_clean =
-- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
-- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
- }
-
- void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
-@@ -176,6 +108,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
- fetch_index, fetch_index,
- error_interrupt_enable,
- error_interrupt_offset);
-+ rq->rxst_idx = 0;
-+ rq->tot_pkts = 0;
- }
-
- void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
-@@ -212,21 +146,20 @@ int vnic_rq_disable(struct vnic_rq *rq)
- }
-
- void vnic_rq_clean(struct vnic_rq *rq,
-- void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
-+ void (*buf_clean)(struct rte_mbuf **buf))
- {
-- struct vnic_rq_buf *buf;
-- u32 fetch_index;
-+ struct rte_mbuf **buf;
-+ u32 fetch_index, i;
- unsigned int count = rq->ring.desc_count;
-
-- buf = rq->to_clean;
--
-- while (vnic_rq_desc_used(rq) > 0) {
-+ buf = &rq->mbuf_ring[0];
-
-- (*buf_clean)(rq, buf);
--
-- buf = rq->to_clean = buf->next;
-- rq->ring.desc_avail++;
-+ for (i = 0; i < count; i++) {
-+ (*buf_clean)(buf);
-+ buf++;
- }
-+ rq->ring.desc_avail = count - 1;
-+ rq->rx_nb_hold = 0;
-
- /* Use current fetch_index as the ring starting point */
- fetch_index = ioread32(&rq->ctrl->fetch_index);
-@@ -235,9 +168,7 @@ void vnic_rq_clean(struct vnic_rq *rq,
- /* Hardware surprise removal: reset fetch_index */
- fetch_index = 0;
- }
-- rq->to_use = rq->to_clean =
-- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
-- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
-+
- iowrite32(fetch_index, &rq->ctrl->posted_index);
-
- vnic_dev_clear_desc_ring(&rq->ring);
-diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
-index 0f5c3c1..e083ccc 100644
---- a/drivers/net/enic/base/vnic_rq.h
-+++ b/drivers/net/enic/base/vnic_rq.h
-@@ -66,42 +66,22 @@ struct vnic_rq_ctrl {
- u32 pad10;
- };
-
--/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
--#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
--#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
--#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
-- ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
-- VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
--#define VNIC_RQ_BUF_BLK_SZ(entries) \
-- (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
--#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
-- DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
--#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
--
--struct vnic_rq_buf {
-- struct vnic_rq_buf *next;
-- dma_addr_t dma_addr;
-- void *os_buf;
-- unsigned int os_buf_index;
-- unsigned int len;
-- unsigned int index;
-- void *desc;
-- uint64_t wr_id;
--};
--
- struct vnic_rq {
- unsigned int index;
-+ unsigned int posted_index;
- struct vnic_dev *vdev;
-- struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
-+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
- struct vnic_dev_ring ring;
-- struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
-- struct vnic_rq_buf *to_use;
-- struct vnic_rq_buf *to_clean;
-+ struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
-+ unsigned int mbuf_next_idx; /* next mb to consume */
- void *os_buf_head;
- unsigned int pkts_outstanding;
--
-+ uint16_t rx_nb_hold;
-+ uint16_t rx_free_thresh;
- unsigned int socket_id;
- struct rte_mempool *mp;
-+ uint16_t rxst_idx;
-+ uint32_t tot_pkts;
- };
-
- static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
-@@ -116,119 +96,13 @@ static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
- return rq->ring.desc_count - rq->ring.desc_avail - 1;
- }
-
--static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
--{
-- return rq->to_use->desc;
--}
--
--static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
--{
-- return rq->to_use->index;
--}
--
--static inline void vnic_rq_post(struct vnic_rq *rq,
-- void *os_buf, unsigned int os_buf_index,
-- dma_addr_t dma_addr, unsigned int len,
-- uint64_t wrid)
--{
-- struct vnic_rq_buf *buf = rq->to_use;
--
-- buf->os_buf = os_buf;
-- buf->os_buf_index = os_buf_index;
-- buf->dma_addr = dma_addr;
-- buf->len = len;
-- buf->wr_id = wrid;
--
-- buf = buf->next;
-- rq->to_use = buf;
-- rq->ring.desc_avail--;
--
-- /* Move the posted_index every nth descriptor
-- */
--
--#ifndef VNIC_RQ_RETURN_RATE
--#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
--#endif
--
-- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
-- /* Adding write memory barrier prevents compiler and/or CPU
-- * reordering, thus avoiding descriptor posting before
-- * descriptor is initialized. Otherwise, hardware can read
-- * stale descriptor fields.
-- */
-- wmb();
-- iowrite32(buf->index, &rq->ctrl->posted_index);
-- }
--}
--
--static inline void vnic_rq_post_commit(struct vnic_rq *rq,
-- void *os_buf, unsigned int os_buf_index,
-- dma_addr_t dma_addr, unsigned int len)
--{
-- struct vnic_rq_buf *buf = rq->to_use;
--
-- buf->os_buf = os_buf;
-- buf->os_buf_index = os_buf_index;
-- buf->dma_addr = dma_addr;
-- buf->len = len;
--
-- buf = buf->next;
-- rq->to_use = buf;
-- rq->ring.desc_avail--;
--
-- /* Move the posted_index every descriptor
-- */
--
-- /* Adding write memory barrier prevents compiler and/or CPU
-- * reordering, thus avoiding descriptor posting before
-- * descriptor is initialized. Otherwise, hardware can read
-- * stale descriptor fields.
-- */
-- wmb();
-- iowrite32(buf->index, &rq->ctrl->posted_index);
--}
-
--static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
--{
-- rq->ring.desc_avail += count;
--}
-
- enum desc_return_options {
- VNIC_RQ_RETURN_DESC,
- VNIC_RQ_DEFER_RETURN_DESC,
- };
-
--static inline int vnic_rq_service(struct vnic_rq *rq,
-- struct cq_desc *cq_desc, u16 completed_index,
-- int desc_return, int (*buf_service)(struct vnic_rq *rq,
-- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
-- int skipped, void *opaque), void *opaque)
--{
-- struct vnic_rq_buf *buf;
-- int skipped;
-- int eop = 0;
--
-- buf = rq->to_clean;
-- while (1) {
--
-- skipped = (buf->index != completed_index);
--
-- if ((*buf_service)(rq, cq_desc, buf, skipped, opaque))
-- eop++;
--
-- if (desc_return == VNIC_RQ_RETURN_DESC)
-- rq->ring.desc_avail++;
--
-- rq->to_clean = buf->next;
--
-- if (!skipped)
-- break;
--
-- buf = rq->to_clean;
-- }
-- return eop;
--}
--
- static inline int vnic_rq_fill(struct vnic_rq *rq,
- int (*buf_fill)(struct vnic_rq *rq))
- {
-@@ -274,8 +148,5 @@ unsigned int vnic_rq_error_status(struct vnic_rq *rq);
- void vnic_rq_enable(struct vnic_rq *rq);
- int vnic_rq_disable(struct vnic_rq *rq);
- void vnic_rq_clean(struct vnic_rq *rq,
-- void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
--int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
-- unsigned int desc_size);
--
-+ void (*buf_clean)(struct rte_mbuf **buf));
- #endif /* _VNIC_RQ_H_ */
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 9e78305..8c914f5 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -45,6 +45,7 @@
- #include "vnic_nic.h"
- #include "vnic_rss.h"
- #include "enic_res.h"
-+#include "cq_enet_desc.h"
-
- #define DRV_NAME "enic_pmd"
- #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
-@@ -154,6 +155,16 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
- return (struct enic *)eth_dev->data->dev_private;
- }
-
-+#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
-+#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
-+#define ASSERT(x) do { \
-+ if (!(x)) \
-+ rte_panic("ENIC: x"); \
-+} while (0)
-+#else
-+#define ASSERT(x)
-+#endif
-+
- extern void enic_fdir_stats_get(struct enic *enic,
- struct rte_eth_fdir_stats *stats);
- extern int enic_fdir_add_fltr(struct enic *enic,
-@@ -193,9 +204,10 @@ extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- uint16_t ol_flags, uint16_t vlan_tag);
-
- extern void enic_post_wq_index(struct vnic_wq *wq);
--extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
-- unsigned int budget, unsigned int *work_done);
- extern int enic_probe(struct enic *enic);
- extern int enic_clsf_init(struct enic *enic);
- extern void enic_clsf_destroy(struct enic *enic);
-+uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-+ uint16_t nb_pkts);
-+
- #endif /* _ENIC_H_ */
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index 2a88043..6f2ada5 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -255,7 +255,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
-- __rte_unused const struct rte_eth_rxconf *rx_conf,
-+ const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
- {
- int ret;
-@@ -270,6 +270,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- return ret;
- }
-
-+ enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh;
-+ dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
-+ enic->rq[queue_idx].rx_free_thresh);
-+
- return enicpmd_dev_setup_intr(enic);
- }
-
-@@ -429,6 +433,9 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
-+ device_info->default_rxconf = (struct rte_eth_rxconf) {
-+ .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
-+ };
- }
-
- static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
-@@ -538,18 +545,6 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- return index;
- }
-
--static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-- uint16_t nb_pkts)
--{
-- struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
-- unsigned int work_done;
--
-- if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
-- dev_err(enic, "error in enicpmd poll\n");
--
-- return work_done;
--}
--
- static const struct eth_dev_ops enicpmd_eth_dev_ops = {
- .dev_configure = enicpmd_dev_configure,
- .dev_start = enicpmd_dev_start,
-@@ -606,7 +601,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
- enic->port_id = eth_dev->data->port_id;
- enic->rte_dev = eth_dev;
- eth_dev->dev_ops = &enicpmd_eth_dev_ops;
-- eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
-+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
-
- pdev = eth_dev->pci_dev;
-@@ -635,8 +630,8 @@ static struct eth_driver rte_enic_pmd = {
- * Register as the [Poll Mode] Driver of Cisco ENIC device.
- */
- static int
--rte_enic_pmd_init(const char *name __rte_unused,
-- const char *params __rte_unused)
-+rte_enic_pmd_init(__rte_unused const char *name,
-+ __rte_unused const char *params)
- {
- ENICPMD_FUNC_TRACE();
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index f818c32..9fff020 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -60,6 +60,17 @@
- #include "vnic_nic.h"
- #include "enic_vnic_wq.h"
-
-+static inline struct rte_mbuf *
-+rte_rxmbuf_alloc(struct rte_mempool *mp)
-+{
-+ struct rte_mbuf *m;
-+
-+ m = __rte_mbuf_raw_alloc(mp);
-+ __rte_mbuf_sanity_check_raw(m, 0);
-+ return m;
-+}
-+
-+
- static inline int enic_is_sriov_vf(struct enic *enic)
- {
- return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
-@@ -80,16 +91,25 @@ static int is_eth_addr_valid(uint8_t *addr)
- return !is_mcast_addr(addr) && !is_zero_addr(addr);
- }
-
--static inline struct rte_mbuf *
--enic_rxmbuf_alloc(struct rte_mempool *mp)
-+static void
-+enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
- {
-- struct rte_mbuf *m;
-+ uint16_t i;
-
-- m = __rte_mbuf_raw_alloc(mp);
-- __rte_mbuf_sanity_check_raw(m, 0);
-- return m;
-+ if (!rq || !rq->mbuf_ring) {
-+ dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
-+ return;
-+ }
-+
-+ for (i = 0; i < enic->config.rq_desc_count; i++) {
-+ if (rq->mbuf_ring[i]) {
-+ rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
-+ rq->mbuf_ring[i] = NULL;
-+ }
-+ }
- }
-
-+
- void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
- {
- vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
-@@ -262,13 +282,13 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
- }
-
- static void
--enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf)
-+enic_free_rq_buf(struct rte_mbuf **mbuf)
- {
-- if (!buf->os_buf)
-+ if (*mbuf == NULL)
- return;
-
-- rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);
-- buf->os_buf = NULL;
-+ rte_pktmbuf_free(*mbuf);
-+ mbuf = NULL;
- }
-
- void enic_init_vnic_resources(struct enic *enic)
-@@ -314,221 +334,47 @@ void enic_init_vnic_resources(struct enic *enic)
- }
-
-
--static int enic_rq_alloc_buf(struct vnic_rq *rq)
-+static int
-+enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- {
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-+ struct rte_mbuf *mb;
-+ struct rq_enet_desc *rqd = rq->ring.descs;
-+ unsigned i;
- dma_addr_t dma_addr;
-- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-- uint8_t type = RQ_ENET_TYPE_ONLY_SOP;
-- u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);
-- struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
-- struct rte_mbuf *hdr_mbuf = NULL;
--
-- if (!mbuf) {
-- dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n");
-- return -1;
-- }
--
-- if (unlikely(split_hdr_size)) {
-- if (vnic_rq_desc_avail(rq) < 2) {
-- rte_mempool_put(mbuf->pool, mbuf);
-- return -1;
-- }
-- hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
-- if (!hdr_mbuf) {
-- rte_mempool_put(mbuf->pool, mbuf);
-- dev_err(enic,
-- "hdr_mbuf alloc in enic_rq_alloc_buf failed\n");
-- return -1;
-- }
--
-- hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
--
-- hdr_mbuf->nb_segs = 2;
-- hdr_mbuf->port = enic->port_id;
-- hdr_mbuf->next = mbuf;
--
-- dma_addr = (dma_addr_t)
-- (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);
--
-- rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);
-
-- vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,
-- (unsigned int)split_hdr_size, 0 /*wrid*/);
-+ dev_debug(enic, "queue %u, allocating %u rx queue mbufs", rq->index,
-+ rq->ring.desc_count);
-
-- desc = vnic_rq_next_desc(rq);
-- type = RQ_ENET_TYPE_NOT_SOP;
-- } else {
-- mbuf->nb_segs = 1;
-- mbuf->port = enic->port_id;
-- }
--
-- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-- mbuf->next = NULL;
--
-- dma_addr = (dma_addr_t)
-- (mbuf->buf_physaddr + mbuf->data_off);
--
-- rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);
--
-- vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,
-- (unsigned int)mbuf->buf_len, 0 /*wrid*/);
--
-- return 0;
--}
--
--static int enic_rq_indicate_buf(struct vnic_rq *rq,
-- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
-- int skipped, void *opaque)
--{
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-- struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque;
-- struct rte_mbuf *rx_pkt = NULL;
-- struct rte_mbuf *hdr_rx_pkt = NULL;
--
-- u8 type, color, eop, sop, ingress_port, vlan_stripped;
-- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
-- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
-- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
-- u8 packet_error;
-- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
-- u32 rss_hash;
--
-- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
-- &type, &color, &q_number, &completed_index,
-- &ingress_port, &fcoe, &eop, &sop, &rss_type,
-- &csum_not_calc, &rss_hash, &bytes_written,
-- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
-- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
-- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
-- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
-- &fcs_ok);
--
-- rx_pkt = (struct rte_mbuf *)buf->os_buf;
-- buf->os_buf = NULL;
--
-- if (unlikely(packet_error)) {
-- dev_err(enic, "packet error\n");
-- rx_pkt->data_len = 0;
-- return 0;
-- }
--
-- if (unlikely(skipped)) {
-- rx_pkt->data_len = 0;
-- return 0;
-- }
--
-- if (likely(!vnic_get_hdr_split_size(enic->vdev))) {
-- /* No header split configured */
-- *rx_pkt_bucket = rx_pkt;
-- rx_pkt->pkt_len = bytes_written;
--
-- if (ipv4) {
-- rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
-- if (!csum_not_calc) {
-- if (unlikely(!ipv4_csum_ok))
-- rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
--
-- if ((tcp || udp) && (!tcp_udp_csum_ok))
-- rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-- }
-- } else if (ipv6)
-- rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
-- } else {
-- /* Header split */
-- if (sop && !eop) {
-- /* This piece is header */
-- *rx_pkt_bucket = rx_pkt;
-- rx_pkt->pkt_len = bytes_written;
-- } else {
-- if (sop && eop) {
-- /* The packet is smaller than split_hdr_size */
-- *rx_pkt_bucket = rx_pkt;
-- rx_pkt->pkt_len = bytes_written;
-- if (ipv4) {
-- rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
-- if (!csum_not_calc) {
-- if (unlikely(!ipv4_csum_ok))
-- rx_pkt->ol_flags |=
-- PKT_RX_IP_CKSUM_BAD;
--
-- if ((tcp || udp) &&
-- (!tcp_udp_csum_ok))
-- rx_pkt->ol_flags |=
-- PKT_RX_L4_CKSUM_BAD;
-- }
-- } else if (ipv6)
-- rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
-- } else {
-- /* Payload */
-- hdr_rx_pkt = *rx_pkt_bucket;
-- hdr_rx_pkt->pkt_len += bytes_written;
-- if (ipv4) {
-- hdr_rx_pkt->packet_type =
-- RTE_PTYPE_L3_IPV4;
-- if (!csum_not_calc) {
-- if (unlikely(!ipv4_csum_ok))
-- hdr_rx_pkt->ol_flags |=
-- PKT_RX_IP_CKSUM_BAD;
--
-- if ((tcp || udp) &&
-- (!tcp_udp_csum_ok))
-- hdr_rx_pkt->ol_flags |=
-- PKT_RX_L4_CKSUM_BAD;
-- }
-- } else if (ipv6)
-- hdr_rx_pkt->packet_type =
-- RTE_PTYPE_L3_IPV6;
-- }
-+ for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
-+ mb = rte_rxmbuf_alloc(rq->mp);
-+ if (mb == NULL) {
-+ dev_err(enic, "RX mbuf alloc failed queue_id=%u",
-+ (unsigned)rq->index);
-+ return -ENOMEM;
- }
-- }
-
-- rx_pkt->data_len = bytes_written;
-+ dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off);
-
-- if (rss_hash) {
-- rx_pkt->ol_flags |= PKT_RX_RSS_HASH;
-- rx_pkt->hash.rss = rss_hash;
-+ rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
-+ mb->buf_len);
-+ rq->mbuf_ring[i] = mb;
- }
-
-- if (vlan_tci) {
-- rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
-- rx_pkt->vlan_tci = vlan_tci;
-- }
-+ /* make sure all prior writes are complete before doing the PIO write */
-+ rte_rmb();
-
-- return eop;
--}
-+ /* Post all but the last 2 cache lines' worth of descriptors */
-+ rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
-+ / sizeof(struct rq_enet_desc));
-+ rq->rx_nb_hold = 0;
-
--static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
--{
-- struct enic *enic = vnic_dev_priv(vdev);
--
-- return vnic_rq_service(&enic->rq[q_number], cq_desc,
-- completed_index, VNIC_RQ_RETURN_DESC,
-- enic_rq_indicate_buf, opaque);
--
--}
-+ dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
-+ enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
-+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-+ rte_rmb();
-
--int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
-- unsigned int budget, unsigned int *work_done)
--{
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-- unsigned int cq = enic_cq_rq(enic, rq->index);
-- int err = 0;
--
-- *work_done = vnic_cq_service(&enic->cq[cq],
-- budget, enic_rq_service, (void *)rx_pkts);
--
-- if (*work_done) {
-- vnic_rq_fill(rq, enic_rq_alloc_buf);
-+ return 0;
-
-- /* Need at least one buffer on ring to get going */
-- if (vnic_rq_desc_used(rq) == 0) {
-- dev_err(enic, "Unable to alloc receive buffers\n");
-- err = -1;
-- }
-- }
-- return err;
- }
-
- static void *
-@@ -576,6 +422,7 @@ enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
- int enic_enable(struct enic *enic)
- {
- unsigned int index;
-+ int err;
- struct rte_eth_dev *eth_dev = enic->rte_dev;
-
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-@@ -586,15 +433,11 @@ int enic_enable(struct enic *enic)
- dev_warning(enic, "Init of hash table for clsf failed."\
- "Flow director feature will not work\n");
-
-- /* Fill RQ bufs */
- for (index = 0; index < enic->rq_count; index++) {
-- vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);
--
-- /* Need at least one buffer on ring to get going
-- */
-- if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
-- dev_err(enic, "Unable to alloc receive buffers\n");
-- return -1;
-+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
-+ if (err) {
-+ dev_err(enic, "Failed to alloc RX queue mbufs\n");
-+ return err;
- }
- }
-
-@@ -636,6 +479,9 @@ void enic_free_rq(void *rxq)
- struct vnic_rq *rq = (struct vnic_rq *)rxq;
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
-+ enic_rxmbuf_queue_release(enic, rq);
-+ rte_free(rq->mbuf_ring);
-+ rq->mbuf_ring = NULL;
- vnic_rq_free(rq);
- vnic_cq_free(&enic->cq[rq->index]);
- }
-@@ -664,7 +510,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, struct rte_mempool *mp,
- uint16_t nb_desc)
- {
-- int err;
-+ int rc;
- struct vnic_rq *rq = &enic->rq[queue_idx];
-
- rq->socket_id = socket_id;
-@@ -687,23 +533,35 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- }
-
- /* Allocate queue resources */
-- err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
-- enic->config.rq_desc_count,
-- sizeof(struct rq_enet_desc));
-- if (err) {
-+ rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
-+ enic->config.rq_desc_count, sizeof(struct rq_enet_desc));
-+ if (rc) {
- dev_err(enic, "error in allocation of rq\n");
-- return err;
-+ goto err_exit;
- }
-
-- err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
-+ rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
- socket_id, enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
-- if (err) {
-- vnic_rq_free(rq);
-+ if (rc) {
- dev_err(enic, "error in allocation of cq for rq\n");
-+ goto err_free_rq_exit;
- }
-
-- return err;
-+ /* Allocate the mbuf ring */
-+ rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
-+ sizeof(struct rte_mbuf *) * enic->config.rq_desc_count,
-+ RTE_CACHE_LINE_SIZE, rq->socket_id);
-+
-+ if (rq->mbuf_ring != NULL)
-+ return 0;
-+
-+ /* cleanup on error */
-+ vnic_cq_free(&enic->cq[queue_idx]);
-+err_free_rq_exit:
-+ vnic_rq_free(rq);
-+err_exit:
-+ return -ENOMEM;
- }
-
- void enic_free_wq(void *txq)
-@@ -790,6 +648,7 @@ int enic_disable(struct enic *enic)
-
- for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
-+
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
- for (i = 0; i < enic->cq_count; i++)
-@@ -1074,7 +933,7 @@ int enic_probe(struct enic *enic)
-
- /* Set ingress vlan rewrite mode before vnic initialization */
- err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
-- IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
-+ IG_VLAN_REWRITE_MODE_PASS_THRU);
- if (err) {
- dev_err(enic,
- "Failed to set ingress vlan rewrite mode, aborting.\n");
-diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
-index 49f7e22..33f2e84 100644
---- a/drivers/net/enic/enic_res.h
-+++ b/drivers/net/enic/enic_res.h
-@@ -52,6 +52,7 @@
- #define ENIC_UNICAST_PERFECT_FILTERS 32
-
- #define ENIC_NON_TSO_MAX_DESC 16
-+#define ENIC_DEFAULT_RX_FREE_THRESH 32
-
- #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
-
-@@ -133,21 +134,6 @@ static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
- WQ_ENET_OFFLOAD_MODE_TSO,
- eop, 1 /* SOP */, eop, loopback);
- }
--static inline void enic_queue_rq_desc(struct vnic_rq *rq,
-- void *os_buf, unsigned int os_buf_index,
-- dma_addr_t dma_addr, unsigned int len)
--{
-- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-- u64 wrid = 0;
-- u8 type = os_buf_index ?
-- RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
--
-- rq_enet_desc_enc(desc,
-- (u64)dma_addr | VNIC_PADDR_TARGET,
-- type, (u16)len);
--
-- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
--}
-
- struct enic;
-
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-new file mode 100644
-index 0000000..945a60f
---- /dev/null
-+++ b/drivers/net/enic/enic_rx.c
-@@ -0,0 +1,370 @@
-+/*
-+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
-+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-+ *
-+ * Copyright (c) 2014, Cisco Systems, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * 1. Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ *
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ *
-+ */
-+
-+#include <rte_mbuf.h>
-+#include <rte_ethdev.h>
-+#include <rte_prefetch.h>
-+
-+#include "enic_compat.h"
-+#include "rq_enet_desc.h"
-+#include "enic.h"
-+
-+#define RTE_PMD_USE_PREFETCH
-+
-+#ifdef RTE_PMD_USE_PREFETCH
-+/*
-+ * Prefetch a cache line into all cache levels.
-+ */
-+#define rte_enic_prefetch(p) rte_prefetch0(p)
-+#else
-+#define rte_enic_prefetch(p) do {} while (0)
-+#endif
-+
-+#ifdef RTE_PMD_PACKET_PREFETCH
-+#define rte_packet_prefetch(p) rte_prefetch1(p)
-+#else
-+#define rte_packet_prefetch(p) do {} while (0)
-+#endif
-+
-+static inline struct rte_mbuf *
-+rte_rxmbuf_alloc(struct rte_mempool *mp)
-+{
-+ struct rte_mbuf *m;
-+
-+ m = __rte_mbuf_raw_alloc(mp);
-+ __rte_mbuf_sanity_check_raw(m, 0);
-+ return m;
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-+{
-+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-+{
-+ return(le16_to_cpu(crd->bytes_written_flags) &
-+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_packet_error(uint16_t bwflags)
-+{
-+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_eop(uint16_t ciflags)
-+{
-+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
-+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-+{
-+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-+}
-+
-+static inline uint32_t
-+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-+{
-+ return le32_to_cpu(cqrd->rss_hash);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_FCS_OK);
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-+{
-+ return le16_to_cpu(cqrd->vlan);
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ return le16_to_cpu(cqrd->bytes_written_flags) &
-+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-+}
-+
-+static inline uint64_t
-+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint16_t bwflags;
-+ uint64_t pkt_err_flags = 0;
-+
-+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
-+
-+ /* Check for packet error. Can't be more specific than MAC error */
-+ if (enic_cq_rx_desc_packet_error(bwflags)) {
-+ pkt_err_flags |= PKT_RX_MAC_ERR;
-+ }
-+
-+ /* Check for bad FCS. MAC error isn't quite, but no other choice */
-+ if (!enic_cq_rx_desc_fcs_ok(cqrd)) {
-+ pkt_err_flags |= PKT_RX_MAC_ERR;
-+ }
-+ return pkt_err_flags;
-+}
-+
-+/*
-+ * Lookup table to translate RX CQ flags to mbuf flags.
-+ */
-+static inline uint32_t
-+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint8_t cqrd_flags = cqrd->flags;
-+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
-+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_UDP,
-+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_TCP,
-+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_FRAG,
-+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
-+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_UDP,
-+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_TCP,
-+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_FRAG,
-+ /* All others reserved */
-+ };
-+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-+ return cq_type_table[cqrd_flags];
-+}
-+
-+static inline void
-+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint16_t ciflags, bwflags, pkt_flags = 0;
-+ ciflags = enic_cq_rx_desc_ciflags(cqrd);
-+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
-+
-+ ASSERT(mbuf->ol_flags == 0);
-+
-+ /* flags are meaningless if !EOP */
-+ if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-+ goto mbuf_flags_done;
-+
-+ /* VLAN stripping */
-+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-+ } else {
-+ mbuf->vlan_tci = 0;
-+ }
-+
-+ /* RSS flag */
-+ if (enic_cq_rx_desc_rss_type(cqrd)) {
-+ pkt_flags |= PKT_RX_RSS_HASH;
-+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-+ }
-+
-+ /* checksum flags */
-+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
-+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
-+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
-+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
-+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
-+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-+ }
-+ }
-+
-+ mbuf_flags_done:
-+ mbuf->ol_flags = pkt_flags;
-+}
-+
-+static inline uint32_t
-+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-+{
-+ uint32_t d = i0 + i1;
-+ ASSERT(i0 < n_descriptors);
-+ ASSERT(i1 < n_descriptors);
-+ d -= (d >= n_descriptors) ? n_descriptors : 0;
-+ return d;
-+}
-+
-+
-+uint16_t
-+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-+ uint16_t nb_pkts)
-+{
-+ struct vnic_rq *rq = rx_queue;
-+ struct enic *enic = vnic_dev_priv(rq->vdev);
-+ unsigned int rx_id;
-+ struct rte_mbuf *nmb, *rxmb;
-+ uint16_t nb_rx = 0;
-+ uint16_t nb_hold;
-+ struct vnic_cq *cq;
-+ volatile struct cq_desc *cqd_ptr;
-+ uint8_t color;
-+
-+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+
-+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */
-+
-+ while (nb_rx < nb_pkts) {
-+ uint16_t rx_pkt_len;
-+ volatile struct rq_enet_desc *rqd_ptr;
-+ dma_addr_t dma_addr;
-+ struct cq_desc cqd;
-+ uint64_t ol_err_flags;
-+
-+ /* Check for pkts available */
-+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-+ & CQ_DESC_COLOR_MASK;
-+ if (color == cq->last_color)
-+ break;
-+
-+ /* Get the cq descriptor and rq pointer */
-+ cqd = *cqd_ptr;
-+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
-+
-+ /* allocate a new mbuf */
-+ nmb = rte_rxmbuf_alloc(rq->mp);
-+ if (nmb == NULL) {
-+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
-+ enic->port_id, (unsigned)rq->index);
-+ rte_eth_devices[enic->port_id].
-+ data->rx_mbuf_alloc_failed++;
-+ break;
-+ }
-+
-+ /* Check for FCS or packet errors */
-+ ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd);
-+ if (ol_err_flags == 0)
-+ rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-+ else
-+ rx_pkt_len = 0;
-+
-+ /* Get the mbuf to return and replace with one just allocated */
-+ rxmb = rq->mbuf_ring[rx_id];
-+ rq->mbuf_ring[rx_id] = nmb;
-+
-+ /* Increment cqd, rqd, mbuf_table index */
-+ rx_id++;
-+ if (unlikely(rx_id == rq->ring.desc_count)) {
-+ rx_id = 0;
-+ cq->last_color = cq->last_color ? 0 : 1;
-+ }
-+
-+ /* Prefetch next mbuf & desc while processing current one */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+ rte_enic_prefetch(cqd_ptr);
-+ rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-+ + rx_id);
-+
-+ /* Push descriptor for newly allocated mbuf */
-+ dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
-+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
-+
-+ /* Fill in the rest of the mbuf */
-+ rxmb->data_off = RTE_PKTMBUF_HEADROOM;
-+ rxmb->nb_segs = 1;
-+ rxmb->next = NULL;
-+ rxmb->pkt_len = rx_pkt_len;
-+ rxmb->data_len = rx_pkt_len;
-+ rxmb->port = enic->port_id;
-+ rxmb->ol_flags = ol_err_flags;
-+ if (!ol_err_flags)
-+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-+
-+ /* prefetch mbuf data for caller */
-+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
-+ RTE_PKTMBUF_HEADROOM));
-+
-+ /* store the mbuf address into the next entry of the array */
-+ rx_pkts[nb_rx++] = rxmb;
-+ }
-+
-+ nb_hold += nb_rx;
-+ cq->to_clean = rx_id;
-+
-+ if (nb_hold > rq->rx_free_thresh) {
-+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
-+ rq->posted_index, nb_hold);
-+ nb_hold = 0;
-+ rte_mb();
-+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-+ }
-+
-+ rq->rx_nb_hold = nb_hold;
-+
-+ return nb_rx;
-+}
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch b/dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch
deleted file mode 100644
index 218a42f..0000000
--- a/dpdk/dpdk-2.2.0_patches/0012-enic-fix-last-packet-not-being-sent.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From a31a1dbdf5e1ff46d04f50fea02e83453b84652c Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 8 Mar 2016 10:49:07 -0800
-Subject: [PATCH 12/22] enic: fix last packet not being sent
-
- The last packet of the tx burst function array was not being
- emitted until the subsequent call. The nic descriptor index
- was being set to the current tx descriptor instead of one past
- the descriptor as required by the nic.
-
- Fixes: d739ba4c6abf ("enic: improve Tx packet rate")
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/base/enic_vnic_wq.h | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h
-index e3ea574..b019109 100644
---- a/drivers/net/enic/base/enic_vnic_wq.h
-+++ b/drivers/net/enic/base/enic_vnic_wq.h
-@@ -69,11 +69,11 @@ static inline void enic_vnic_post_wq(struct vnic_wq *wq,
- buf->wr_id = wrid;
-
- buf = buf->next;
-- if (cq_entry)
-- enic_vnic_post_wq_index(wq);
-+ wq->ring.desc_avail -= desc_skip_cnt;
- wq->to_use = buf;
-
-- wq->ring.desc_avail -= desc_skip_cnt;
-+ if (cq_entry)
-+ enic_vnic_post_wq_index(wq);
- }
-
- #endif /* _ENIC_VNIC_WQ_H_ */
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch b/dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch
deleted file mode 100644
index 97a424f..0000000
--- a/dpdk/dpdk-2.2.0_patches/0013-enic-add-missing-newline-to-print-statements.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From a1ed99bc24f88f061d75eed0db84dc6355855dd2 Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Thu, 17 Mar 2016 15:48:13 -0700
-Subject: [PATCH 13/22] enic: add missing newline to print statements
-
- Add the missing '\n' character to the end of a few print statements.
-
- Fixes: fefed3d1e62c ("enic: new driver")
-
- Signed-off-by: Nelson Escobar <neescoba@cisco.com>
- Acked-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 9fff020..e30672c 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -342,13 +342,13 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- unsigned i;
- dma_addr_t dma_addr;
-
-- dev_debug(enic, "queue %u, allocating %u rx queue mbufs", rq->index,
-+ dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
- rq->ring.desc_count);
-
- for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
- mb = rte_rxmbuf_alloc(rq->mp);
- if (mb == NULL) {
-- dev_err(enic, "RX mbuf alloc failed queue_id=%u",
-+ dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
- (unsigned)rq->index);
- return -ENOMEM;
- }
-@@ -388,7 +388,7 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size,
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
- if (!rz) {
-- pr_err("%s : Failed to allocate memory requested for %s",
-+ pr_err("%s : Failed to allocate memory requested for %s\n",
- __func__, name);
- return NULL;
- }
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch b/dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch
deleted file mode 100644
index cae055e..0000000
--- a/dpdk/dpdk-2.2.0_patches/0014-vmxnet3-support-jumbo-frames.patch
+++ /dev/null
@@ -1,171 +0,0 @@
-From fef2b892245d5a2f3c68d2e03a6c5f2a40205cf7 Mon Sep 17 00:00:00 2001
-From: Steve Shin <jonshin@cisco.com>
-Date: Wed, 23 Mar 2016 09:54:54 -0700
-Subject: [PATCH 14/22] vmxnet3: support jumbo frames
-
----
- drivers/net/vmxnet3/vmxnet3_ethdev.c | 3 +-
- drivers/net/vmxnet3/vmxnet3_ring.h | 2 +
- drivers/net/vmxnet3/vmxnet3_rxtx.c | 77 ++++++++++++++++++++++--------------
- 3 files changed, 52 insertions(+), 30 deletions(-)
-
-diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
-index c363bf6..b78acd4 100644
---- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
-+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
-@@ -425,6 +425,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
- {
- struct rte_eth_conf port_conf = dev->data->dev_conf;
- struct vmxnet3_hw *hw = dev->data->dev_private;
-+ uint32_t mtu = dev->data->mtu;
- Vmxnet3_DriverShared *shared = hw->shared;
- Vmxnet3_DSDevRead *devRead = &shared->devRead;
- uint32_t *mac_ptr;
-@@ -442,7 +443,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
- devRead->misc.driverInfo.vmxnet3RevSpt = 1;
- devRead->misc.driverInfo.uptVerSpt = 1;
-
-- devRead->misc.mtu = rte_le_to_cpu_32(dev->data->mtu);
-+ devRead->misc.mtu = rte_le_to_cpu_32(mtu);
- devRead->misc.queueDescPA = hw->queueDescPA;
- devRead->misc.queueDescLen = hw->queue_desc_len;
- devRead->misc.numTxQueues = hw->num_tx_queues;
-diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
-index 612487e..b1582f8 100644
---- a/drivers/net/vmxnet3/vmxnet3_ring.h
-+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
-@@ -171,6 +171,8 @@ typedef struct vmxnet3_rx_queue {
- uint32_t qid1;
- uint32_t qid2;
- Vmxnet3_RxQueueDesc *shared;
-+ struct rte_mbuf *start_seg;
-+ struct rte_mbuf *last_seg;
- struct vmxnet3_rxq_stats stats;
- bool stopped;
- uint16_t queue_id; /**< Device RX queue index. */
-diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
-index c76b230..59b6db8 100644
---- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
-+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
-@@ -547,7 +547,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- vmxnet3_rx_queue_t *rxq;
- Vmxnet3_RxCompDesc *rcd;
- vmxnet3_buf_info_t *rbi;
-- Vmxnet3_RxDesc *rxd;
- struct rte_mbuf *rxm = NULL;
- struct vmxnet3_hw *hw;
-
-@@ -572,37 +571,16 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-
- idx = rcd->rxdIdx;
- ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
-- rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
- rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
-
-- if (unlikely(rcd->sop != 1 || rcd->eop != 1)) {
-- rte_pktmbuf_free_seg(rbi->m);
-- PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
-- goto rcd_done;
-- }
--
- PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
-
-+ #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-+ Vmxnet3_RxDesc *rxd
-+ = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
- VMXNET3_ASSERT(rcd->len <= rxd->len);
- VMXNET3_ASSERT(rbi->m);
--
-- if (unlikely(rcd->len == 0)) {
-- PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
-- ring_idx, idx);
-- VMXNET3_ASSERT(rcd->sop && rcd->eop);
-- rte_pktmbuf_free_seg(rbi->m);
-- goto rcd_done;
-- }
--
-- /* Assuming a packet is coming in a single packet buffer */
-- if (unlikely(rxd->btype != VMXNET3_RXD_BTYPE_HEAD)) {
-- PMD_RX_LOG(DEBUG,
-- "Alert : Misbehaving device, incorrect "
-- " buffer type used. iPacket dropped.");
-- rte_pktmbuf_free_seg(rbi->m);
-- goto rcd_done;
-- }
-- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
-+ #endif
-
- /* Get the packet buffer pointer from buf_info */
- rxm = rbi->m;
-@@ -615,7 +593,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxq->cmd_ring[ring_idx].next2comp = idx;
-
- /* For RCD with EOP set, check if there is frame error */
-- if (unlikely(rcd->err)) {
-+ if (unlikely(rcd->eop && rcd->err)) {
- rxq->stats.drop_total++;
- rxq->stats.drop_err++;
-
-@@ -642,9 +620,49 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
- rxm->vlan_tci = 0;
- rxm->packet_type = RTE_PTYPE_UNKNOWN;
-
-- vmxnet3_rx_offload(rcd, rxm);
-+ /*
-+ * If this is the first buffer of the received packet,
-+ * set the pointer to the first mbuf of the packet
-+ * Otherwise, update the total length and the number of segments
-+ * of the current scattered packet, and update the pointer to
-+ * the last mbuf of the current packet.
-+ */
-+ if (rcd->sop) {
-+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-+ VMXNET3_ASSERT(!rxq->start_seg);
-+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
-+#endif
-+
-+ if (unlikely(rcd->len == 0)) {
-+ PMD_RX_LOG(DEBUG,
-+ "Rx buf was skipped. rxring[%d][%d])",
-+ ring_idx, idx);
-+ rte_pktmbuf_free_seg(rbi->m);
-+ goto rcd_done;
-+ }
-+
-+ rxq->start_seg = rxm;
-+ vmxnet3_rx_offload(rcd, rxm);
-+ } else {
-+ struct rte_mbuf *start = rxq->start_seg;
-+
-+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
-+ VMXNET3_ASSERT(start != NULL);
-+#endif
-+
-+ start->pkt_len += rxm->data_len;
-+ start->nb_segs++;
-+
-+ rxq->last_seg->next = rxm;
-+ }
-+ rxq->last_seg = rxm;
-+
-+ if (rcd->eop) {
-+ rx_pkts[nb_rx++] = rxq->start_seg;
-+ rxq->start_seg = NULL;
-+ }
-
-- rx_pkts[nb_rx++] = rxm;
- rcd_done:
- rxq->cmd_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
-@@ -945,6 +963,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
- }
- }
- rxq->stopped = FALSE;
-+ rxq->start_seg = NULL;
- }
-
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch b/dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch
deleted file mode 100644
index 2c0e65d..0000000
--- a/dpdk/dpdk-2.2.0_patches/0015-enic-fix-crash-when-allocating-too-many-queues.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 7a7fa2891df4ec4af0c34f3bbd203e1376e83951 Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Thu, 17 Mar 2016 15:49:58 -0700
-Subject: [PATCH 15/22] enic: fix crash when allocating too many queues
-
- Add checks to make sure we don't try to allocate more tx or rx queues
- than we support.
-
- Fixes: fefed3d1e62c ("enic: new driver")
-
- Signed-off-by: Nelson Escobar <neescoba@cisco.com>
- Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_ethdev.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index 6f2ada5..6c3c734 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -174,6 +174,13 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
- struct enic *enic = pmd_priv(eth_dev);
-
- ENICPMD_FUNC_TRACE();
-+ if (queue_idx >= ENIC_WQ_MAX) {
-+ dev_err(enic,
-+ "Max number of TX queues exceeded. Max is %d\n",
-+ ENIC_WQ_MAX);
-+ return -EINVAL;
-+ }
-+
- eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
-
- ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
-@@ -262,6 +269,13 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- struct enic *enic = pmd_priv(eth_dev);
-
- ENICPMD_FUNC_TRACE();
-+ if (queue_idx >= ENIC_RQ_MAX) {
-+ dev_err(enic,
-+ "Max number of RX queues exceeded. Max is %d\n",
-+ ENIC_RQ_MAX);
-+ return -EINVAL;
-+ }
-+
- eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
-
- ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch b/dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch
deleted file mode 100644
index 895d571..0000000
--- a/dpdk/dpdk-2.2.0_patches/0016-enic-fix-mbuf-flags-on-Rx.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 3ffb9431d6ba34dbcffab5cff4c060d5dca167e1 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Thu, 17 Mar 2016 15:57:05 -0700
-Subject: [PATCH 16/22] enic: fix mbuf flags on Rx
-
- In the receive path, the function to set mbuf ol_flags used the
- mbuf packet_type before it was set.
-
- Fixes: 947d860c821f ("enic: improve Rx performance")
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_rx.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-index 945a60f..59ebaa4 100644
---- a/drivers/net/enic/enic_rx.c
-+++ b/drivers/net/enic/enic_rx.c
-@@ -210,7 +210,7 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
- ciflags = enic_cq_rx_desc_ciflags(cqrd);
- bwflags = enic_cq_rx_desc_bwflags(cqrd);
-
-- ASSERT(mbuf->ol_flags == 0);
-+ mbuf->ol_flags = 0;
-
- /* flags are meaningless if !EOP */
- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-@@ -340,10 +340,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rxmb->pkt_len = rx_pkt_len;
- rxmb->data_len = rx_pkt_len;
- rxmb->port = enic->port_id;
-+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
- rxmb->ol_flags = ol_err_flags;
- if (!ol_err_flags)
- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-
- /* prefetch mbuf data for caller */
- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch b/dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch
deleted file mode 100644
index 28c05e8..0000000
--- a/dpdk/dpdk-2.2.0_patches/0017-enic-fix-error-packets-handling.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-From 678e5952cf49bb66c2d697581a70dc8c7d703e8f Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Thu, 17 Mar 2016 15:57:06 -0700
-Subject: [PATCH 17/22] enic: fix error packets handling
-
- If the packet_error bit in the completion descriptor is set, the
- remainder of the descriptor and data are invalid. PKT_RX_MAC_ERR
- was set in the mbuf->ol_flags if packet_error was set and used
- later to indicate an error packet. But since PKT_RX_MAC_ERR is
- defined as 0, mbuf flags and packet types and length were being
- misinterpreted.
-
- Make the function enic_cq_rx_to_pkt_err_flags() return true for error
- packets and use the return value instead of mbuf->ol_flags to indicate
- error packets. Also remove warning for error packets and rely on
- rx_error stats.
-
- Fixes: 947d860c821f ("enic: improve Rx performance")
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_rx.c | 43 ++++++++++++++++++-------------------------
- 1 file changed, 18 insertions(+), 25 deletions(-)
-
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-index 59ebaa4..817a891 100644
---- a/drivers/net/enic/enic_rx.c
-+++ b/drivers/net/enic/enic_rx.c
-@@ -129,13 +129,6 @@ enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
- return le32_to_cpu(cqrd->rss_hash);
- }
-
--static inline uint8_t
--enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd)
--{
-- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ==
-- CQ_ENET_RQ_DESC_FLAGS_FCS_OK);
--}
--
- static inline uint16_t
- enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
- {
-@@ -150,25 +143,21 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- }
-
--static inline uint64_t
--enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd)
-+static inline uint8_t
-+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
- {
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t bwflags;
-+ int ret = 0;
- uint64_t pkt_err_flags = 0;
-
- bwflags = enic_cq_rx_desc_bwflags(cqrd);
--
-- /* Check for packet error. Can't be more specific than MAC error */
-- if (enic_cq_rx_desc_packet_error(bwflags)) {
-- pkt_err_flags |= PKT_RX_MAC_ERR;
-- }
--
-- /* Check for bad FCS. MAC error isn't quite, but no other choice */
-- if (!enic_cq_rx_desc_fcs_ok(cqrd)) {
-- pkt_err_flags |= PKT_RX_MAC_ERR;
-+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
-+ pkt_err_flags = PKT_RX_MAC_ERR;
-+ ret = 1;
- }
-- return pkt_err_flags;
-+ *pkt_err_flags_out = pkt_err_flags;
-+ return ret;
- }
-
- /*
-@@ -282,6 +271,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- dma_addr_t dma_addr;
- struct cq_desc cqd;
- uint64_t ol_err_flags;
-+ uint8_t packet_error;
-
- /* Check for pkts available */
- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-@@ -303,9 +293,9 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- break;
- }
-
-- /* Check for FCS or packet errors */
-- ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd);
-- if (ol_err_flags == 0)
-+ /* A packet error means descriptor and data are untrusted */
-+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
-+ if (!packet_error)
- rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
- else
- rx_pkt_len = 0;
-@@ -340,10 +330,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rxmb->pkt_len = rx_pkt_len;
- rxmb->data_len = rx_pkt_len;
- rxmb->port = enic->port_id;
-- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-- rxmb->ol_flags = ol_err_flags;
-- if (!ol_err_flags)
-+ if (!packet_error) {
-+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-+ } else {
-+ rxmb->packet_type = 0;
-+ rxmb->ol_flags = 0;
-+ }
-
- /* prefetch mbuf data for caller */
- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch b/dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch
deleted file mode 100644
index 3f29f6e..0000000
--- a/dpdk/dpdk-2.2.0_patches/0018-enic-remove-packet-error-conditional.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-From 2fa6a45ff9f9fb3108b09403e32393416bd0a732 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Thu, 17 Mar 2016 15:57:07 -0700
-Subject: [PATCH 18/22] enic: remove packet error conditional
-
- small cleanup to remove conditional.
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_rx.c | 10 +++-------
- 1 file changed, 3 insertions(+), 7 deletions(-)
-
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-index 817a891..232987a 100644
---- a/drivers/net/enic/enic_rx.c
-+++ b/drivers/net/enic/enic_rx.c
-@@ -266,7 +266,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
-
- while (nb_rx < nb_pkts) {
-- uint16_t rx_pkt_len;
- volatile struct rq_enet_desc *rqd_ptr;
- dma_addr_t dma_addr;
- struct cq_desc cqd;
-@@ -295,10 +294,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-
- /* A packet error means descriptor and data are untrusted */
- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
-- if (!packet_error)
-- rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-- else
-- rx_pkt_len = 0;
-
- /* Get the mbuf to return and replace with one just allocated */
- rxmb = rq->mbuf_ring[rx_id];
-@@ -327,16 +322,17 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
- rxmb->nb_segs = 1;
- rxmb->next = NULL;
-- rxmb->pkt_len = rx_pkt_len;
-- rxmb->data_len = rx_pkt_len;
- rxmb->port = enic->port_id;
- if (!packet_error) {
-+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
- } else {
-+ rxmb->pkt_len = 0;
- rxmb->packet_type = 0;
- rxmb->ol_flags = 0;
- }
-+ rxmb->data_len = rxmb->pkt_len;
-
- /* prefetch mbuf data for caller */
- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch b/dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch
deleted file mode 100644
index 7415177..0000000
--- a/dpdk/dpdk-2.2.0_patches/0019-enic-update-maintainers.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From 8ad252ab40b8f95db8413220146d54bf8a7d7be8 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Fri, 18 Mar 2016 11:27:07 -0700
-Subject: [PATCH 19/22] enic: update maintainers
-
- Change maintainers for ENIC PMD and fix pointer to enic
- documentation in MAINTAINERS.
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- MAINTAINERS | 3 ++-
- doc/guides/nics/enic.rst | 2 +-
- 2 files changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/MAINTAINERS b/MAINTAINERS
-index b90aeea..f5b8bb4 100644
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -264,8 +264,9 @@ F: doc/guides/nics/cxgbe.rst
-
- Cisco enic
- M: John Daley <johndale@cisco.com>
--M: Sujith Sankar <ssujith@cisco.com>
-+M: Nelson Escobar <neescoba@cisco.com>
- F: drivers/net/enic/
-+F: doc/guides/nics/enic.rst
-
- Combo szedata2
- M: Matej Vido <matejvido@gmail.com>
-diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
-index 2a228fd..e67c3db 100644
---- a/doc/guides/nics/enic.rst
-+++ b/doc/guides/nics/enic.rst
-@@ -218,4 +218,4 @@ Any questions or bugs should be reported to DPDK community and to the ENIC PMD
- maintainers:
-
- - John Daley <johndale@cisco.com>
--- Sujith Sankar <ssujith@cisco.com>
-+- Nelson Escobar <neescoba@cisco.com>
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch b/dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch
deleted file mode 100644
index db2ac64..0000000
--- a/dpdk/dpdk-2.2.0_patches/0020-enic-fix-Rx-descriptor-limit.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From ce6badc60736f5e78a295f30fe84c3e40ad0c330 Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Fri, 18 Mar 2016 11:33:34 -0700
-Subject: [PATCH 20/22] enic: fix Rx descriptor limit
-
- On initialization, the rq descriptor count was set to the limit
- of the vic. When the requested number of rx descriptors was
- less than this count, enic_alloc_rq() was incorrectly setting
- the count to the lower value. This results in later calls to
- enic_alloc_rq() incorrectly using the lower value as the adapter
- limit.
-
- Fixes: fefed3d1e62c ("enic: new driver")
-
- Signed-off-by: Nelson Escobar <neescoba@cisco.com>
- Reviewed-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 14 ++++++--------
- 1 file changed, 6 insertions(+), 8 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index e30672c..2f79cf0 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -524,24 +524,22 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- "policy. Applying the value in the adapter "\
- "policy (%d).\n",
- queue_idx, nb_desc, enic->config.rq_desc_count);
-- } else if (nb_desc != enic->config.rq_desc_count) {
-- enic->config.rq_desc_count = nb_desc;
-- dev_info(enic,
-- "RX Queues - effective number of descs:%d\n",
-- nb_desc);
-+ nb_desc = enic->config.rq_desc_count;
- }
-+ dev_info(enic, "RX Queues - effective number of descs:%d\n",
-+ nb_desc);
- }
-
- /* Allocate queue resources */
- rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
-- enic->config.rq_desc_count, sizeof(struct rq_enet_desc));
-+ nb_desc, sizeof(struct rq_enet_desc));
- if (rc) {
- dev_err(enic, "error in allocation of rq\n");
- goto err_exit;
- }
-
- rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
-- socket_id, enic->config.rq_desc_count,
-+ socket_id, nb_desc,
- sizeof(struct cq_enet_rq_desc));
- if (rc) {
- dev_err(enic, "error in allocation of cq for rq\n");
-@@ -550,7 +548,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
-
- /* Allocate the mbuf ring */
- rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
-- sizeof(struct rte_mbuf *) * enic->config.rq_desc_count,
-+ sizeof(struct rte_mbuf *) * nb_desc,
- RTE_CACHE_LINE_SIZE, rq->socket_id);
-
- if (rq->mbuf_ring != NULL)
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch b/dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch
deleted file mode 100644
index 154e6f1..0000000
--- a/dpdk/dpdk-2.2.0_patches/0021-enic-fix-TX-hang-when-number-of-packets-queue-size.patch
+++ /dev/null
@@ -1,89 +0,0 @@
-From e89ea2a038987102d9eb0a7ea217d7a301b484cb Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Thu, 24 Mar 2016 14:00:39 -0700
-Subject: [PATCH 21/22] enic: fix TX hang when number of packets > queue
- size
-
- If the nb_pkts parameter to rte_eth_tx_burst() was greater than
- the TX descriptor count, a completion was not being requested
- from the NIC, so descriptors would not be released back to the
- host causing a lock-up.
-
- Introduce a limit of how many TX descriptors can be used in a single
- call to the enic PMD burst TX function before requesting a completion.
-
- Fixes: d739ba4c6abf ("enic: improve Tx packet rate")
-
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_ethdev.c | 20 ++++++++++++++++----
- drivers/net/enic/enic_res.h | 1 +
- 2 files changed, 17 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index 6c3c734..61bb83c 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -510,7 +510,7 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui
- static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
- {
-- unsigned int index;
-+ uint16_t index;
- unsigned int frags;
- unsigned int pkt_len;
- unsigned int seg_len;
-@@ -522,6 +522,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- unsigned short vlan_id;
- unsigned short ol_flags;
- uint8_t last_seg, eop;
-+ unsigned int host_tx_descs = 0;
-
- for (index = 0; index < nb_pkts; index++) {
- tx_pkt = *tx_pkts++;
-@@ -537,6 +538,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- return index;
- }
- }
-+
- pkt_len = tx_pkt->pkt_len;
- vlan_id = tx_pkt->vlan_tci;
- ol_flags = tx_pkt->ol_flags;
-@@ -546,9 +548,19 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- next_tx_pkt = tx_pkt->next;
- seg_len = tx_pkt->data_len;
- inc_len += seg_len;
-- eop = (pkt_len == inc_len) || (!next_tx_pkt);
-- last_seg = eop &&
-- (index == ((unsigned int)nb_pkts - 1));
-+
-+ host_tx_descs++;
-+ last_seg = 0;
-+ eop = 0;
-+ if ((pkt_len == inc_len) || !next_tx_pkt) {
-+ eop = 1;
-+ /* post if last packet in batch or > thresh */
-+ if ((index == (nb_pkts - 1)) ||
-+ (host_tx_descs > ENIC_TX_POST_THRESH)) {
-+ last_seg = 1;
-+ host_tx_descs = 0;
-+ }
-+ }
- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
- !frags, eop, last_seg, ol_flags, vlan_id);
- tx_pkt = next_tx_pkt;
-diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
-index 33f2e84..00fa71d 100644
---- a/drivers/net/enic/enic_res.h
-+++ b/drivers/net/enic/enic_res.h
-@@ -53,6 +53,7 @@
-
- #define ENIC_NON_TSO_MAX_DESC 16
- #define ENIC_DEFAULT_RX_FREE_THRESH 32
-+#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2)
-
- #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
-
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch b/dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch
deleted file mode 100644
index 3e03c89..0000000
--- a/dpdk/dpdk-2.2.0_patches/0022-bonding-fix-bond-link-detect-in-non-interrupt-mode.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From a2f08a919c72af29c56b937e6c92eb104037fed5 Mon Sep 17 00:00:00 2001
-From: Nelson Escobar <neescoba@cisco.com>
-Date: Tue, 22 Mar 2016 13:42:08 -0700
-Subject: [PATCH 22/22] bonding: fix bond link detect in non-interrupt mode
-
- Stopping then re-starting a bond interface containing slaves that
- used polling for link detection caused the bond to think all slave
- links were down and inactive.
-
- Move the start of the polling for link from slave_add() to
- bond_ethdev_start() and in bond_ethdev_stop() make sure we clear
- the last_link_status of the slaves.
-
- Signed-off-by: Nelson Escobar <neescoba@cisco.com>
- Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/bonding/rte_eth_bond_pmd.c | 27 +++++++++++++++++----------
- 1 file changed, 17 insertions(+), 10 deletions(-)
-
-diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
-index b1373c6..d32c6f5 100644
---- a/drivers/net/bonding/rte_eth_bond_pmd.c
-+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
-@@ -1447,18 +1447,11 @@ slave_add(struct bond_dev_private *internals,
- slave_details->port_id = slave_eth_dev->data->port_id;
- slave_details->last_link_status = 0;
-
-- /* If slave device doesn't support interrupts then we need to enabled
-- * polling to monitor link status */
-+ /* Mark slave devices that don't support interrupts so we can
-+ * compensate when we start the bond
-+ */
- if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
- slave_details->link_status_poll_enabled = 1;
--
-- if (!internals->link_status_polling_enabled) {
-- internals->link_status_polling_enabled = 1;
--
-- rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
-- bond_ethdev_slave_link_status_change_monitor,
-- (void *)&rte_eth_devices[internals->port_id]);
-- }
- }
-
- slave_details->link_status_wait_to_complete = 0;
-@@ -1543,6 +1536,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
- eth_dev->data->port_id, internals->slaves[i].port_id);
- return -1;
- }
-+ /* We will need to poll for link status if any slave doesn't
-+ * support interrupts
-+ */
-+ if (internals->slaves[i].link_status_poll_enabled)
-+ internals->link_status_polling_enabled = 1;
-+ }
-+ /* start polling if needed */
-+ if (internals->link_status_polling_enabled) {
-+ rte_eal_alarm_set(
-+ internals->link_status_polling_interval_ms * 1000,
-+ bond_ethdev_slave_link_status_change_monitor,
-+ (void *)&rte_eth_devices[internals->port_id]);
- }
-
- if (internals->user_defined_primary_port)
-@@ -1615,6 +1620,8 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
-
- internals->active_slave_count = 0;
- internals->link_status_polling_enabled = 0;
-+ for (i = 0; i < internals->slave_count; i++)
-+ internals->slaves[i].last_link_status = 0;
-
- eth_dev->data->dev_link.link_status = 0;
- eth_dev->data->dev_started = 0;
---
-1.9.1
-
diff --git a/dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch b/dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch
deleted file mode 100644
index 53f3eaa..0000000
--- a/dpdk/dpdk-2.2.0_patches/0023-enic-expose-RX-missed-packets-counter.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-commit 7182d3e7d17722d088322695fc09f0d3bb7f1eab
-Author: John Daley <johndale@cisco.com>
-Date: Wed Mar 30 11:07:31 2016 -0700
-
- enic: expose Rx missed packets counter
-
- Update the 'imissed' counter with the number of packets dropped
- by the NIC.
-
- Fixes: fefed3d1e62c ("enic: new driver")
-
- Signed-off-by: John Daley <johndale@cisco.com>
- Reviewed-by: Nelson Escobar <neescoba@cisco.com>
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 2f79cf0..e3da51d 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -246,6 +246,8 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
- r_stats->ierrors = stats->rx.rx_errors;
- r_stats->oerrors = stats->tx.tx_errors;
-
-+ r_stats->imissed = stats->rx.rx_drop;
-+
- r_stats->imcasts = stats->rx.rx_multicast_frames_ok;
- r_stats->rx_nombuf = stats->rx.rx_no_bufs;
- }
diff --git a/dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch b/dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch
deleted file mode 100644
index 81e7bf3..0000000
--- a/dpdk/dpdk-2.2.0_patches/0024-enic-fix-imissed-rx-counter.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 3433c7828ec909fccb768636ee21867030da14c9 Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 26 Apr 2016 13:30:50 -0700
-Subject: [PATCH 1/3] enic: fix 'imissed' to count drops due to lack of RX
- buffers
-
-Fixes: 7182d3e7d177 ("enic: expose Rx missed packets counter")
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 60fe765..be4e9e5 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -243,10 +243,10 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
- r_stats->ibytes = stats->rx.rx_bytes_ok;
- r_stats->obytes = stats->tx.tx_bytes_ok;
-
-- r_stats->ierrors = stats->rx.rx_errors;
-+ r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
- r_stats->oerrors = stats->tx.tx_errors;
-
-- r_stats->imissed = stats->rx.rx_drop;
-+ r_stats->imissed = stats->rx.rx_no_bufs;
-
- r_stats->rx_nombuf = stats->rx.rx_no_bufs;
- }
---
-2.7.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch b/dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch
deleted file mode 100644
index 69ca3f3..0000000
--- a/dpdk/dpdk-2.2.0_patches/0025-enic-fix-misalignment-of-Rx-mbuf-data.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 454eb71eca1912e32a509c738a99a340cc2488cf Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Mon, 25 Apr 2016 16:24:53 -0700
-Subject: [PATCH 2/3] enic: fix misalignment of Rx mbuf data
-
-Data DMA used m->data_off of uninitialized mbufs instead of
-RTE_PKTMBUF_HEADROOM, potentially causing Rx data to be
-placed at the wrong alignment in the mbuf.
-
-Fixes: 947d860c821f ("enic: improve Rx performance")
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/enic_main.c | 5 +++--
- drivers/net/enic/enic_rx.c | 6 ++++--
- 2 files changed, 7 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index be4e9e5..646d87f 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -354,10 +354,11 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
- return -ENOMEM;
- }
-
-- dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off);
-+ dma_addr = (dma_addr_t)(mb->buf_physaddr
-+ + RTE_PKTMBUF_HEADROOM);
-
- rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
-- mb->buf_len);
-+ mb->buf_len - RTE_PKTMBUF_HEADROOM);
- rq->mbuf_ring[i] = mb;
- }
-
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-index 232987a..39bb55c 100644
---- a/drivers/net/enic/enic_rx.c
-+++ b/drivers/net/enic/enic_rx.c
-@@ -314,9 +314,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- + rx_id);
-
- /* Push descriptor for newly allocated mbuf */
-- dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
-+ dma_addr = (dma_addr_t)(nmb->buf_physaddr
-+ + RTE_PKTMBUF_HEADROOM);
- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
-+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-+ - RTE_PKTMBUF_HEADROOM);
-
- /* Fill in the rest of the mbuf */
- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
---
-2.7.0
-
diff --git a/dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch b/dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch
deleted file mode 100644
index 4858b8f..0000000
--- a/dpdk/dpdk-2.2.0_patches/0026-enic-Optimization-of-Tx-path-to-reduce-Host-CPU-over.patch
+++ /dev/null
@@ -1,1844 +0,0 @@
-From ca6bbb723880e91d006de6cc485259da988859aa Mon Sep 17 00:00:00 2001
-From: John Daley <johndale@cisco.com>
-Date: Tue, 5 Apr 2016 15:19:06 -0700
-Subject: [PATCH 3/3] enic: Optimization of Tx path to reduce Host CPU
- overhead, cleanup
-
-Optimizations and cleanup:
-- flatten packet send path
-- flatten mbuf free path
-- disable CQ entry writing and use CQ messages instead
-- use rte_mempool_put_bulk() to bulk return freed mbufs
-- remove unnecessary fields vnic_bufs struct, use contiguous array of cache
- aligned divisible elements. No next pointers.
-- use local variables inside per packet loop instead of fields in structs.
-- factor book keeping out of the per packet tx loop where possible
- (removed several conditionals)
-- put Tx and Rx code in 1 file (enic_rxtx.c)
-
-Reviewed-by: Nelson Escobar <neescoba@cisco.com>
-Signed-off-by: John Daley <johndale@cisco.com>
----
- drivers/net/enic/Makefile | 2 +-
- drivers/net/enic/base/enic_vnic_wq.h | 79 ------
- drivers/net/enic/base/vnic_cq.h | 37 +--
- drivers/net/enic/base/vnic_rq.h | 2 +-
- drivers/net/enic/base/vnic_wq.c | 89 +++---
- drivers/net/enic/base/vnic_wq.h | 113 +-------
- drivers/net/enic/enic.h | 27 +-
- drivers/net/enic/enic_ethdev.c | 67 +----
- drivers/net/enic/enic_main.c | 132 +++------
- drivers/net/enic/enic_res.h | 81 +-----
- drivers/net/enic/enic_rx.c | 361 -------------------------
- drivers/net/enic/enic_rxtx.c | 505 +++++++++++++++++++++++++++++++++++
- 12 files changed, 635 insertions(+), 860 deletions(-)
- delete mode 100644 drivers/net/enic/base/enic_vnic_wq.h
- delete mode 100644 drivers/net/enic/enic_rx.c
- create mode 100644 drivers/net/enic/enic_rxtx.c
-
-diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
-index f316274..3926b79 100644
---- a/drivers/net/enic/Makefile
-+++ b/drivers/net/enic/Makefile
-@@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src
- #
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
--SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
-+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
- SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
-diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h
-deleted file mode 100644
-index b019109..0000000
---- a/drivers/net/enic/base/enic_vnic_wq.h
-+++ /dev/null
-@@ -1,79 +0,0 @@
--/*
-- * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
-- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-- *
-- * Copyright (c) 2015, Cisco Systems, Inc.
-- * All rights reserved.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions
-- * are met:
-- *
-- * 1. Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- *
-- * 2. Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in
-- * the documentation and/or other materials provided with the
-- * distribution.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- *
-- */
--
--#ifndef _ENIC_VNIC_WQ_H_
--#define _ENIC_VNIC_WQ_H_
--
--#include "vnic_dev.h"
--#include "vnic_cq.h"
--
--static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
--{
-- struct vnic_wq_buf *buf = wq->to_use;
--
-- /* Adding write memory barrier prevents compiler and/or CPU
-- * reordering, thus avoiding descriptor posting before
-- * descriptor is initialized. Otherwise, hardware can read
-- * stale descriptor fields.
-- */
-- wmb();
-- iowrite32(buf->index, &wq->ctrl->posted_index);
--}
--
--static inline void enic_vnic_post_wq(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr,
-- unsigned int len, int sop,
-- uint8_t desc_skip_cnt, uint8_t cq_entry,
-- uint8_t compressed_send, uint64_t wrid)
--{
-- struct vnic_wq_buf *buf = wq->to_use;
--
-- buf->sop = sop;
-- buf->cq_entry = cq_entry;
-- buf->compressed_send = compressed_send;
-- buf->desc_skip_cnt = desc_skip_cnt;
-- buf->os_buf = os_buf;
-- buf->dma_addr = dma_addr;
-- buf->len = len;
-- buf->wr_id = wrid;
--
-- buf = buf->next;
-- wq->ring.desc_avail -= desc_skip_cnt;
-- wq->to_use = buf;
--
-- if (cq_entry)
-- enic_vnic_post_wq_index(wq);
--}
--
--#endif /* _ENIC_VNIC_WQ_H_ */
-diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h
-index 922391b..ffc1aaa 100644
---- a/drivers/net/enic/base/vnic_cq.h
-+++ b/drivers/net/enic/base/vnic_cq.h
-@@ -96,41 +96,46 @@ static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
- {
-- struct cq_desc *cq_desc;
-+ struct cq_desc *cq_desc, *cq_desc_last;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
-- u8 type, color;
-- struct rte_mbuf **rx_pkts = opaque;
-- unsigned int ret;
-+ u8 type, color, type_color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
-- cq_desc_dec(cq_desc, &type, &color,
-- &q_number, &completed_index);
-+
-+ type_color = cq_desc->type_color;
-+ color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-+ if (color == cq->last_color)
-+ return 0;
-
- while (color != cq->last_color) {
-- if (opaque)
-- opaque = (void *)&(rx_pkts[work_done]);
-+ cq_desc_last = cq_desc;
-
-- ret = (*q_service)(cq->vdev, cq_desc, type,
-- q_number, completed_index, opaque);
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
-+ work_done++;
-+ if (work_done >= work_to_do)
-+ break;
-+
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
-- cq_desc_dec(cq_desc, &type, &color,
-- &q_number, &completed_index);
-
-- if (ret)
-- work_done++;
-- if (work_done >= work_to_do)
-- break;
-+ type_color = cq_desc->type_color;
-+ color = (type_color >> CQ_DESC_COLOR_SHIFT)
-+ & CQ_DESC_COLOR_MASK;
-+
- }
-
-+ cq_desc_dec(cq_desc_last, &type, &color,
-+ &q_number, &completed_index);
-+
-+ (*q_service)(cq->vdev, cq_desc, type,
-+ q_number, completed_index, opaque);
- return work_done;
- }
-
-diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
-index e083ccc..424415c 100644
---- a/drivers/net/enic/base/vnic_rq.h
-+++ b/drivers/net/enic/base/vnic_rq.h
-@@ -74,7 +74,7 @@ struct vnic_rq {
- struct vnic_dev_ring ring;
- struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
- unsigned int mbuf_next_idx; /* next mb to consume */
-- void *os_buf_head;
-+ void *mb_head;
- unsigned int pkts_outstanding;
- uint16_t rx_nb_hold;
- uint16_t rx_free_thresh;
-diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
-index a3ef417..ccbbd61 100644
---- a/drivers/net/enic/base/vnic_wq.c
-+++ b/drivers/net/enic/base/vnic_wq.c
-@@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
-
- static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
- {
-- struct vnic_wq_buf *buf;
-- unsigned int i, j, count = wq->ring.desc_count;
-- unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
--
-- for (i = 0; i < blks; i++) {
-- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
-- if (!wq->bufs[i])
-- return -ENOMEM;
-- }
--
-- for (i = 0; i < blks; i++) {
-- buf = wq->bufs[i];
-- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
-- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
-- buf->desc = (u8 *)wq->ring.descs +
-- wq->ring.desc_size * buf->index;
-- if (buf->index + 1 == count) {
-- buf->next = wq->bufs[0];
-- break;
-- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
-- buf->next = wq->bufs[i + 1];
-- } else {
-- buf->next = buf + 1;
-- buf++;
-- }
-- }
-- }
--
-- wq->to_use = wq->to_clean = wq->bufs[0];
--
-+ unsigned int count = wq->ring.desc_count;
-+ /* Allocate the mbuf ring */
-+ wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
-+ sizeof(struct vnic_wq_buf) * count,
-+ RTE_CACHE_LINE_SIZE, wq->socket_id);
-+ wq->head_idx = 0;
-+ wq->tail_idx = 0;
-+ if (wq->bufs == NULL)
-+ return -ENOMEM;
- return 0;
- }
-
- void vnic_wq_free(struct vnic_wq *wq)
- {
- struct vnic_dev *vdev;
-- unsigned int i;
-
- vdev = wq->vdev;
-
- vnic_dev_free_desc_ring(vdev, &wq->ring);
-
-- for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
-- if (wq->bufs[i]) {
-- kfree(wq->bufs[i]);
-- wq->bufs[i] = NULL;
-- }
-- }
--
-+ rte_free(wq->bufs);
- wq->ctrl = NULL;
- }
-
--int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
-- unsigned int desc_size)
--{
-- int mem_size = 0;
--
-- mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size);
--
-- mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) *
-- VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count);
--
-- return mem_size;
--}
--
-
- int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
- unsigned int desc_count, unsigned int desc_size)
-@@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
- iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
- iowrite32(0, &wq->ctrl->error_status);
-
-- wq->to_use = wq->to_clean =
-- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
-- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
-+ wq->head_idx = fetch_index;
-+ wq->tail_idx = wq->head_idx;
- }
-
- void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
-@@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
- vnic_wq_init_start(wq, cq_index, 0, 0,
- error_interrupt_enable,
- error_interrupt_offset);
-+ wq->last_completed_index = 0;
- }
-
- void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
-@@ -219,22 +178,34 @@ int vnic_wq_disable(struct vnic_wq *wq)
- return -ETIMEDOUT;
- }
-
-+static inline uint32_t
-+buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
-+{
-+ idx++;
-+ if (unlikely(idx == n_descriptors))
-+ idx = 0;
-+ return idx;
-+}
-+
- void vnic_wq_clean(struct vnic_wq *wq,
-- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
-+ void (*buf_clean)(struct vnic_wq_buf *buf))
- {
- struct vnic_wq_buf *buf;
-+ unsigned int to_clean = wq->tail_idx;
-
-- buf = wq->to_clean;
-+ buf = &wq->bufs[to_clean];
-
- while (vnic_wq_desc_used(wq) > 0) {
-
-- (*buf_clean)(wq, buf);
-+ (*buf_clean)(buf);
-+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
-
-- buf = wq->to_clean = buf->next;
-+ buf = &wq->bufs[to_clean];
- wq->ring.desc_avail++;
- }
-
-- wq->to_use = wq->to_clean = wq->bufs[0];
-+ wq->head_idx = 0;
-+ wq->tail_idx = 0;
-
- iowrite32(0, &wq->ctrl->fetch_index);
- iowrite32(0, &wq->ctrl->posted_index);
-diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
-index c23de62..37c3ff9 100644
---- a/drivers/net/enic/base/vnic_wq.h
-+++ b/drivers/net/enic/base/vnic_wq.h
-@@ -64,42 +64,23 @@ struct vnic_wq_ctrl {
- u32 pad9;
- };
-
-+/* 16 bytes */
- struct vnic_wq_buf {
-- struct vnic_wq_buf *next;
-- dma_addr_t dma_addr;
-- void *os_buf;
-- unsigned int len;
-- unsigned int index;
-- int sop;
-- void *desc;
-- uint64_t wr_id; /* Cookie */
-- uint8_t cq_entry; /* Gets completion event from hw */
-- uint8_t desc_skip_cnt; /* Num descs to occupy */
-- uint8_t compressed_send; /* Both hdr and payload in one desc */
-+ struct rte_mempool *pool;
-+ void *mb;
- };
-
--/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
--#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
--#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
--#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
-- ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
-- VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
--#define VNIC_WQ_BUF_BLK_SZ(entries) \
-- (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
--#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
-- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
--#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
--
- struct vnic_wq {
- unsigned int index;
- struct vnic_dev *vdev;
- struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
- struct vnic_dev_ring ring;
-- struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
-- struct vnic_wq_buf *to_use;
-- struct vnic_wq_buf *to_clean;
-- unsigned int pkts_outstanding;
-+ struct vnic_wq_buf *bufs;
-+ unsigned int head_idx;
-+ unsigned int tail_idx;
- unsigned int socket_id;
-+ const struct rte_memzone *cqmsg_rz;
-+ uint16_t last_completed_index;
- };
-
- static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
-@@ -114,11 +95,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
- return wq->ring.desc_count - wq->ring.desc_avail - 1;
- }
-
--static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
--{
-- return wq->to_use->desc;
--}
--
- #define PI_LOG2_CACHE_LINE_SIZE 5
- #define PI_INDEX_BITS 12
- #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
-@@ -191,75 +167,6 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
- PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
- }
-
--static inline void vnic_wq_post(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr,
-- unsigned int len, int sop, int eop,
-- uint8_t desc_skip_cnt, uint8_t cq_entry,
-- uint8_t compressed_send, uint64_t wrid)
--{
-- struct vnic_wq_buf *buf = wq->to_use;
--
-- buf->sop = sop;
-- buf->cq_entry = cq_entry;
-- buf->compressed_send = compressed_send;
-- buf->desc_skip_cnt = desc_skip_cnt;
-- buf->os_buf = os_buf;
-- buf->dma_addr = dma_addr;
-- buf->len = len;
-- buf->wr_id = wrid;
--
-- buf = buf->next;
-- if (eop) {
--#ifdef DO_PREFETCH
-- uint64_t wr = vnic_cached_posted_index(dma_addr, len,
-- buf->index);
--#endif
-- /* Adding write memory barrier prevents compiler and/or CPU
-- * reordering, thus avoiding descriptor posting before
-- * descriptor is initialized. Otherwise, hardware can read
-- * stale descriptor fields.
-- */
-- wmb();
--#ifdef DO_PREFETCH
-- /* Intel chipsets seem to limit the rate of PIOs that we can
-- * push on the bus. Thus, it is very important to do a single
-- * 64 bit write here. With two 32-bit writes, my maximum
-- * pkt/sec rate was cut almost in half. -AJF
-- */
-- iowrite64((uint64_t)wr, &wq->ctrl->posted_index);
--#else
-- iowrite32(buf->index, &wq->ctrl->posted_index);
--#endif
-- }
-- wq->to_use = buf;
--
-- wq->ring.desc_avail -= desc_skip_cnt;
--}
--
--static inline void vnic_wq_service(struct vnic_wq *wq,
-- struct cq_desc *cq_desc, u16 completed_index,
-- void (*buf_service)(struct vnic_wq *wq,
-- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
-- void *opaque)
--{
-- struct vnic_wq_buf *buf;
--
-- buf = wq->to_clean;
-- while (1) {
--
-- (*buf_service)(wq, cq_desc, buf, opaque);
--
-- wq->ring.desc_avail++;
--
-- wq->to_clean = buf->next;
--
-- if (buf->index == completed_index)
-- break;
--
-- buf = wq->to_clean;
-- }
--}
--
- void vnic_wq_free(struct vnic_wq *wq);
- int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
- unsigned int desc_count, unsigned int desc_size);
-@@ -275,8 +182,6 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
- void vnic_wq_enable(struct vnic_wq *wq);
- int vnic_wq_disable(struct vnic_wq *wq);
- void vnic_wq_clean(struct vnic_wq *wq,
-- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
--int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
-- unsigned int desc_size);
-+ void (*buf_clean)(struct vnic_wq_buf *buf));
-
- #endif /* _VNIC_WQ_H_ */
-diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
-index 8c914f5..43b82a6 100644
---- a/drivers/net/enic/enic.h
-+++ b/drivers/net/enic/enic.h
-@@ -155,6 +155,30 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
- return (struct enic *)eth_dev->data->dev_private;
- }
-
-+static inline uint32_t
-+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-+{
-+ uint32_t d = i0 + i1;
-+ d -= (d >= n_descriptors) ? n_descriptors : 0;
-+ return d;
-+}
-+
-+static inline uint32_t
-+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-+{
-+ int32_t d = i1 - i0;
-+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
-+}
-+
-+static inline uint32_t
-+enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
-+{
-+ idx++;
-+ if (unlikely(idx == n_descriptors))
-+ idx = 0;
-+ return idx;
-+}
-+
- #define RTE_LIBRTE_ENIC_ASSERT_ENABLE
- #ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
- #define ASSERT(x) do { \
-@@ -209,5 +233,6 @@ extern int enic_clsf_init(struct enic *enic);
- extern void enic_clsf_destroy(struct enic *enic);
- uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
--
-+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-+ uint16_t nb_pkts);
- #endif /* _ENIC_H_ */
-diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
-index 6bea940..697ff82 100644
---- a/drivers/net/enic/enic_ethdev.c
-+++ b/drivers/net/enic/enic_ethdev.c
-@@ -519,71 +519,6 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui
- enic_del_mac_address(enic);
- }
-
--
--static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-- uint16_t nb_pkts)
--{
-- uint16_t index;
-- unsigned int frags;
-- unsigned int pkt_len;
-- unsigned int seg_len;
-- unsigned int inc_len;
-- unsigned int nb_segs;
-- struct rte_mbuf *tx_pkt, *next_tx_pkt;
-- struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
-- struct enic *enic = vnic_dev_priv(wq->vdev);
-- unsigned short vlan_id;
-- unsigned short ol_flags;
-- uint8_t last_seg, eop;
-- unsigned int host_tx_descs = 0;
--
-- for (index = 0; index < nb_pkts; index++) {
-- tx_pkt = *tx_pkts++;
-- inc_len = 0;
-- nb_segs = tx_pkt->nb_segs;
-- if (nb_segs > vnic_wq_desc_avail(wq)) {
-- if (index > 0)
-- enic_post_wq_index(wq);
--
-- /* wq cleanup and try again */
-- if (!enic_cleanup_wq(enic, wq) ||
-- (nb_segs > vnic_wq_desc_avail(wq))) {
-- return index;
-- }
-- }
--
-- pkt_len = tx_pkt->pkt_len;
-- vlan_id = tx_pkt->vlan_tci;
-- ol_flags = tx_pkt->ol_flags;
-- for (frags = 0; inc_len < pkt_len; frags++) {
-- if (!tx_pkt)
-- break;
-- next_tx_pkt = tx_pkt->next;
-- seg_len = tx_pkt->data_len;
-- inc_len += seg_len;
--
-- host_tx_descs++;
-- last_seg = 0;
-- eop = 0;
-- if ((pkt_len == inc_len) || !next_tx_pkt) {
-- eop = 1;
-- /* post if last packet in batch or > thresh */
-- if ((index == (nb_pkts - 1)) ||
-- (host_tx_descs > ENIC_TX_POST_THRESH)) {
-- last_seg = 1;
-- host_tx_descs = 0;
-- }
-- }
-- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
-- !frags, eop, last_seg, ol_flags, vlan_id);
-- tx_pkt = next_tx_pkt;
-- }
-- }
--
-- enic_cleanup_wq(enic, wq);
-- return index;
--}
--
- static const struct eth_dev_ops enicpmd_eth_dev_ops = {
- .dev_configure = enicpmd_dev_configure,
- .dev_start = enicpmd_dev_start,
-@@ -642,7 +577,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
- enic->rte_dev = eth_dev;
- eth_dev->dev_ops = &enicpmd_eth_dev_ops;
- eth_dev->rx_pkt_burst = &enic_recv_pkts;
-- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
-+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
-
- pdev = eth_dev->pci_dev;
- rte_eth_copy_pci_info(eth_dev, pdev);
-diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
-index 646d87f..ba73604 100644
---- a/drivers/net/enic/enic_main.c
-+++ b/drivers/net/enic/enic_main.c
-@@ -40,11 +40,11 @@
- #include <libgen.h>
-
- #include <rte_pci.h>
--#include <rte_memzone.h>
- #include <rte_malloc.h>
- #include <rte_mbuf.h>
- #include <rte_string_fns.h>
- #include <rte_ethdev.h>
-+#include <rte_memzone.h>
-
- #include "enic_compat.h"
- #include "enic.h"
-@@ -58,7 +58,6 @@
- #include "vnic_cq.h"
- #include "vnic_intr.h"
- #include "vnic_nic.h"
--#include "enic_vnic_wq.h"
-
- static inline struct rte_mbuf *
- rte_rxmbuf_alloc(struct rte_mempool *mp)
-@@ -109,38 +108,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
- }
- }
-
--
- void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
- {
- vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
- }
-
--static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
-+static void enic_free_wq_buf(struct vnic_wq_buf *buf)
- {
-- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
-+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
-
- rte_mempool_put(mbuf->pool, mbuf);
-- buf->os_buf = NULL;
--}
--
--static void enic_wq_free_buf(struct vnic_wq *wq,
-- __rte_unused struct cq_desc *cq_desc,
-- struct vnic_wq_buf *buf,
-- __rte_unused void *opaque)
--{
-- enic_free_wq_buf(wq, buf);
--}
--
--static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
--{
-- struct enic *enic = vnic_dev_priv(vdev);
--
-- vnic_wq_service(&enic->wq[q_number], cq_desc,
-- completed_index, enic_wq_free_buf,
-- opaque);
--
-- return 0;
-+ buf->mb = NULL;
- }
-
- static void enic_log_q_error(struct enic *enic)
-@@ -163,64 +141,6 @@ static void enic_log_q_error(struct enic *enic)
- }
- }
-
--unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
--{
-- unsigned int cq = enic_cq_wq(enic, wq->index);
--
-- /* Return the work done */
-- return vnic_cq_service(&enic->cq[cq],
-- -1 /*wq_work_to_do*/, enic_wq_service, NULL);
--}
--
--void enic_post_wq_index(struct vnic_wq *wq)
--{
-- enic_vnic_post_wq_index(wq);
--}
--
--void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
-- struct rte_mbuf *tx_pkt, unsigned short len,
-- uint8_t sop, uint8_t eop, uint8_t cq_entry,
-- uint16_t ol_flags, uint16_t vlan_tag)
--{
-- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
-- uint16_t mss = 0;
-- uint8_t vlan_tag_insert = 0;
-- uint64_t bus_addr = (dma_addr_t)
-- (tx_pkt->buf_physaddr + tx_pkt->data_off);
--
-- if (sop) {
-- if (ol_flags & PKT_TX_VLAN_PKT)
-- vlan_tag_insert = 1;
--
-- if (enic->hw_ip_checksum) {
-- if (ol_flags & PKT_TX_IP_CKSUM)
-- mss |= ENIC_CALC_IP_CKSUM;
--
-- if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
-- mss |= ENIC_CALC_TCP_UDP_CKSUM;
-- }
-- }
--
-- wq_enet_desc_enc(desc,
-- bus_addr,
-- len,
-- mss,
-- 0 /* header_length */,
-- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
-- eop,
-- cq_entry,
-- 0 /* fcoe_encap */,
-- vlan_tag_insert,
-- vlan_tag,
-- 0 /* loopback */);
--
-- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
-- sop,
-- 1 /*desc_skip_cnt*/,
-- cq_entry,
-- 0 /*compressed send*/,
-- 0 /*wrid*/);
--}
-
- void enic_dev_stats_clear(struct enic *enic)
- {
-@@ -297,12 +217,28 @@ void enic_init_vnic_resources(struct enic *enic)
- unsigned int error_interrupt_enable = 1;
- unsigned int error_interrupt_offset = 0;
- unsigned int index = 0;
-+ unsigned int cq_idx;
-+
-+ vnic_dev_stats_clear(enic->vdev);
-
- for (index = 0; index < enic->rq_count; index++) {
- vnic_rq_init(&enic->rq[index],
- enic_cq_rq(enic, index),
- error_interrupt_enable,
- error_interrupt_offset);
-+
-+ cq_idx = enic_cq_rq(enic, index);
-+ vnic_cq_init(&enic->cq[cq_idx],
-+ 0 /* flow_control_enable */,
-+ 1 /* color_enable */,
-+ 0 /* cq_head */,
-+ 0 /* cq_tail */,
-+ 1 /* cq_tail_color */,
-+ 0 /* interrupt_enable */,
-+ 1 /* cq_entry_enable */,
-+ 0 /* cq_message_enable */,
-+ 0 /* interrupt offset */,
-+ 0 /* cq_message_addr */);
- }
-
- for (index = 0; index < enic->wq_count; index++) {
-@@ -310,22 +246,19 @@ void enic_init_vnic_resources(struct enic *enic)
- enic_cq_wq(enic, index),
- error_interrupt_enable,
- error_interrupt_offset);
-- }
--
-- vnic_dev_stats_clear(enic->vdev);
-
-- for (index = 0; index < enic->cq_count; index++) {
-- vnic_cq_init(&enic->cq[index],
-+ cq_idx = enic_cq_wq(enic, index);
-+ vnic_cq_init(&enic->cq[cq_idx],
- 0 /* flow_control_enable */,
- 1 /* color_enable */,
- 0 /* cq_head */,
- 0 /* cq_tail */,
- 1 /* cq_tail_color */,
- 0 /* interrupt_enable */,
-- 1 /* cq_entry_enable */,
-- 0 /* cq_message_enable */,
-+ 0 /* cq_entry_enable */,
-+ 1 /* cq_message_enable */,
- 0 /* interrupt offset */,
-- 0 /* cq_message_addr */);
-+ (u64)enic->wq[index].cqmsg_rz->phys_addr);
- }
-
- vnic_intr_init(&enic->intr,
-@@ -569,6 +502,7 @@ void enic_free_wq(void *txq)
- struct vnic_wq *wq = (struct vnic_wq *)txq;
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
-+ rte_memzone_free(wq->cqmsg_rz);
- vnic_wq_free(wq);
- vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
- }
-@@ -579,6 +513,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- int err;
- struct vnic_wq *wq = &enic->wq[queue_idx];
- unsigned int cq_index = enic_cq_wq(enic, queue_idx);
-+ char name[NAME_MAX];
-+ static int instance;
-
- wq->socket_id = socket_id;
- if (nb_desc) {
-@@ -614,6 +550,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- dev_err(enic, "error in allocation of cq for wq\n");
- }
-
-+ /* setup up CQ message */
-+ snprintf((char *)name, sizeof(name),
-+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
-+ instance++);
-+
-+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
-+ sizeof(uint32_t),
-+ SOCKET_ID_ANY, 0,
-+ ENIC_ALIGN);
-+ if (!wq->cqmsg_rz)
-+ return -ENOMEM;
-+
- return err;
- }
-
-diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
-index 00fa71d..3e1bdf5 100644
---- a/drivers/net/enic/enic_res.h
-+++ b/drivers/net/enic/enic_res.h
-@@ -53,89 +53,10 @@
-
- #define ENIC_NON_TSO_MAX_DESC 16
- #define ENIC_DEFAULT_RX_FREE_THRESH 32
--#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2)
-+#define ENIC_TX_XMIT_MAX 64
-
- #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
-
--static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- unsigned int mss_or_csum_offset, unsigned int hdr_len,
-- int vlan_tag_insert, unsigned int vlan_tag,
-- int offload_mode, int cq_entry, int sop, int eop, int loopback)
--{
-- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
-- u8 desc_skip_cnt = 1;
-- u8 compressed_send = 0;
-- u64 wrid = 0;
--
-- wq_enet_desc_enc(desc,
-- (u64)dma_addr | VNIC_PADDR_TARGET,
-- (u16)len,
-- (u16)mss_or_csum_offset,
-- (u16)hdr_len, (u8)offload_mode,
-- (u8)eop, (u8)cq_entry,
-- 0, /* fcoe_encap */
-- (u8)vlan_tag_insert,
-- (u16)vlan_tag,
-- (u8)loopback);
--
-- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
-- (u8)cq_entry, compressed_send, wrid);
--}
--
--static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- 0, 0, 0, 0, 0,
-- eop, 0 /* !SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
-- dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
-- unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- 0, 0, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_CSUM,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- int ip_csum, int tcpudp_csum, int vlan_tag_insert,
-- unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
-- 0, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_CSUM,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- unsigned int csum_offset, unsigned int hdr_len,
-- int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_CSUM_L4,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
--static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
-- void *os_buf, dma_addr_t dma_addr, unsigned int len,
-- unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
-- unsigned int vlan_tag, int eop, int loopback)
--{
-- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
-- mss, hdr_len, vlan_tag_insert, vlan_tag,
-- WQ_ENET_OFFLOAD_MODE_TSO,
-- eop, 1 /* SOP */, eop, loopback);
--}
--
- struct enic;
-
- int enic_get_vnic_config(struct enic *);
-diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
-deleted file mode 100644
-index 39bb55c..0000000
---- a/drivers/net/enic/enic_rx.c
-+++ /dev/null
-@@ -1,361 +0,0 @@
--/*
-- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
-- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-- *
-- * Copyright (c) 2014, Cisco Systems, Inc.
-- * All rights reserved.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions
-- * are met:
-- *
-- * 1. Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- *
-- * 2. Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in
-- * the documentation and/or other materials provided with the
-- * distribution.
-- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
-- *
-- */
--
--#include <rte_mbuf.h>
--#include <rte_ethdev.h>
--#include <rte_prefetch.h>
--
--#include "enic_compat.h"
--#include "rq_enet_desc.h"
--#include "enic.h"
--
--#define RTE_PMD_USE_PREFETCH
--
--#ifdef RTE_PMD_USE_PREFETCH
--/*
-- * Prefetch a cache line into all cache levels.
-- */
--#define rte_enic_prefetch(p) rte_prefetch0(p)
--#else
--#define rte_enic_prefetch(p) do {} while (0)
--#endif
--
--#ifdef RTE_PMD_PACKET_PREFETCH
--#define rte_packet_prefetch(p) rte_prefetch1(p)
--#else
--#define rte_packet_prefetch(p) do {} while (0)
--#endif
--
--static inline struct rte_mbuf *
--rte_rxmbuf_alloc(struct rte_mempool *mp)
--{
-- struct rte_mbuf *m;
--
-- m = __rte_mbuf_raw_alloc(mp);
-- __rte_mbuf_sanity_check_raw(m, 0);
-- return m;
--}
--
--static inline uint16_t
--enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
--{
-- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
--}
--
--static inline uint16_t
--enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
--{
-- return(le16_to_cpu(crd->bytes_written_flags) &
-- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_packet_error(uint16_t bwflags)
--{
-- return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_eop(uint16_t ciflags)
--{
-- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-- == CQ_ENET_RQ_DESC_FLAGS_EOP;
--}
--
--static inline uint8_t
--enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
--{
-- return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
-- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
--{
-- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
--{
-- return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
--}
--
--static inline uint8_t
--enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
--{
-- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
--}
--
--static inline uint32_t
--enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
--{
-- return le32_to_cpu(cqrd->rss_hash);
--}
--
--static inline uint16_t
--enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
--{
-- return le16_to_cpu(cqrd->vlan);
--}
--
--static inline uint16_t
--enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- return le16_to_cpu(cqrd->bytes_written_flags) &
-- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
--}
--
--static inline uint8_t
--enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- uint16_t bwflags;
-- int ret = 0;
-- uint64_t pkt_err_flags = 0;
--
-- bwflags = enic_cq_rx_desc_bwflags(cqrd);
-- if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
-- pkt_err_flags = PKT_RX_MAC_ERR;
-- ret = 1;
-- }
-- *pkt_err_flags_out = pkt_err_flags;
-- return ret;
--}
--
--/*
-- * Lookup table to translate RX CQ flags to mbuf flags.
-- */
--static inline uint32_t
--enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- uint8_t cqrd_flags = cqrd->flags;
-- static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
-- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-- | RTE_PTYPE_L4_UDP,
-- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-- | RTE_PTYPE_L4_TCP,
-- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-- | RTE_PTYPE_L4_FRAG,
-- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
-- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-- | RTE_PTYPE_L4_UDP,
-- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-- | RTE_PTYPE_L4_TCP,
-- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-- | RTE_PTYPE_L4_FRAG,
-- /* All others reserved */
-- };
-- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-- return cq_type_table[cqrd_flags];
--}
--
--static inline void
--enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
--{
-- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-- uint16_t ciflags, bwflags, pkt_flags = 0;
-- ciflags = enic_cq_rx_desc_ciflags(cqrd);
-- bwflags = enic_cq_rx_desc_bwflags(cqrd);
--
-- mbuf->ol_flags = 0;
--
-- /* flags are meaningless if !EOP */
-- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-- goto mbuf_flags_done;
--
-- /* VLAN stripping */
-- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-- pkt_flags |= PKT_RX_VLAN_PKT;
-- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-- } else {
-- mbuf->vlan_tci = 0;
-- }
--
-- /* RSS flag */
-- if (enic_cq_rx_desc_rss_type(cqrd)) {
-- pkt_flags |= PKT_RX_RSS_HASH;
-- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-- }
--
-- /* checksum flags */
-- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
-- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
-- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
-- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
-- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
-- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-- }
-- }
--
-- mbuf_flags_done:
-- mbuf->ol_flags = pkt_flags;
--}
--
--static inline uint32_t
--enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
--{
-- uint32_t d = i0 + i1;
-- ASSERT(i0 < n_descriptors);
-- ASSERT(i1 < n_descriptors);
-- d -= (d >= n_descriptors) ? n_descriptors : 0;
-- return d;
--}
--
--
--uint16_t
--enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-- uint16_t nb_pkts)
--{
-- struct vnic_rq *rq = rx_queue;
-- struct enic *enic = vnic_dev_priv(rq->vdev);
-- unsigned int rx_id;
-- struct rte_mbuf *nmb, *rxmb;
-- uint16_t nb_rx = 0;
-- uint16_t nb_hold;
-- struct vnic_cq *cq;
-- volatile struct cq_desc *cqd_ptr;
-- uint8_t color;
--
-- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
--
-- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
--
-- while (nb_rx < nb_pkts) {
-- volatile struct rq_enet_desc *rqd_ptr;
-- dma_addr_t dma_addr;
-- struct cq_desc cqd;
-- uint64_t ol_err_flags;
-- uint8_t packet_error;
--
-- /* Check for pkts available */
-- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-- & CQ_DESC_COLOR_MASK;
-- if (color == cq->last_color)
-- break;
--
-- /* Get the cq descriptor and rq pointer */
-- cqd = *cqd_ptr;
-- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
--
-- /* allocate a new mbuf */
-- nmb = rte_rxmbuf_alloc(rq->mp);
-- if (nmb == NULL) {
-- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
-- enic->port_id, (unsigned)rq->index);
-- rte_eth_devices[enic->port_id].
-- data->rx_mbuf_alloc_failed++;
-- break;
-- }
--
-- /* A packet error means descriptor and data are untrusted */
-- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
--
-- /* Get the mbuf to return and replace with one just allocated */
-- rxmb = rq->mbuf_ring[rx_id];
-- rq->mbuf_ring[rx_id] = nmb;
--
-- /* Increment cqd, rqd, mbuf_table index */
-- rx_id++;
-- if (unlikely(rx_id == rq->ring.desc_count)) {
-- rx_id = 0;
-- cq->last_color = cq->last_color ? 0 : 1;
-- }
--
-- /* Prefetch next mbuf & desc while processing current one */
-- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-- rte_enic_prefetch(cqd_ptr);
-- rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-- + rx_id);
--
-- /* Push descriptor for newly allocated mbuf */
-- dma_addr = (dma_addr_t)(nmb->buf_physaddr
-- + RTE_PKTMBUF_HEADROOM);
-- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-- - RTE_PKTMBUF_HEADROOM);
--
-- /* Fill in the rest of the mbuf */
-- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
-- rxmb->nb_segs = 1;
-- rxmb->next = NULL;
-- rxmb->port = enic->port_id;
-- if (!packet_error) {
-- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-- } else {
-- rxmb->pkt_len = 0;
-- rxmb->packet_type = 0;
-- rxmb->ol_flags = 0;
-- }
-- rxmb->data_len = rxmb->pkt_len;
--
-- /* prefetch mbuf data for caller */
-- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
-- RTE_PKTMBUF_HEADROOM));
--
-- /* store the mbuf address into the next entry of the array */
-- rx_pkts[nb_rx++] = rxmb;
-- }
--
-- nb_hold += nb_rx;
-- cq->to_clean = rx_id;
--
-- if (nb_hold > rq->rx_free_thresh) {
-- rq->posted_index = enic_ring_add(rq->ring.desc_count,
-- rq->posted_index, nb_hold);
-- nb_hold = 0;
-- rte_mb();
-- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-- }
--
-- rq->rx_nb_hold = nb_hold;
--
-- return nb_rx;
--}
-diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
-new file mode 100644
-index 0000000..71ca34e
---- /dev/null
-+++ b/drivers/net/enic/enic_rxtx.c
-@@ -0,0 +1,505 @@
-+/*
-+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
-+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
-+ *
-+ * Copyright (c) 2016, Cisco Systems, Inc.
-+ * All rights reserved.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions
-+ * are met:
-+ *
-+ * 1. Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ *
-+ * 2. Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in
-+ * the documentation and/or other materials provided with the
-+ * distribution.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ *
-+ */
-+
-+#include <rte_mbuf.h>
-+#include <rte_ethdev.h>
-+#include <rte_prefetch.h>
-+#include <rte_memzone.h>
-+
-+#include "enic_compat.h"
-+#include "rq_enet_desc.h"
-+#include "enic.h"
-+
-+#define RTE_PMD_USE_PREFETCH
-+
-+#ifdef RTE_PMD_USE_PREFETCH
-+/*
-+ * Prefetch a cache line into all cache levels.
-+ */
-+#define rte_enic_prefetch(p) rte_prefetch0(p)
-+#else
-+#define rte_enic_prefetch(p) do {} while (0)
-+#endif
-+
-+#ifdef RTE_PMD_PACKET_PREFETCH
-+#define rte_packet_prefetch(p) rte_prefetch1(p)
-+#else
-+#define rte_packet_prefetch(p) do {} while (0)
-+#endif
-+
-+static inline struct rte_mbuf *
-+rte_rxmbuf_alloc(struct rte_mempool *mp)
-+{
-+ struct rte_mbuf *m;
-+
-+ m = __rte_mbuf_raw_alloc(mp);
-+ __rte_mbuf_sanity_check_raw(m, 0);
-+ return m;
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-+{
-+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-+{
-+ return(le16_to_cpu(crd->bytes_written_flags) &
-+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_packet_error(uint16_t bwflags)
-+{
-+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_eop(uint16_t ciflags)
-+{
-+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
-+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-+{
-+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-+{
-+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-+}
-+
-+static inline uint32_t
-+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-+{
-+ return le32_to_cpu(cqrd->rss_hash);
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-+{
-+ return le16_to_cpu(cqrd->vlan);
-+}
-+
-+static inline uint16_t
-+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ return le16_to_cpu(cqrd->bytes_written_flags) &
-+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-+}
-+
-+static inline uint8_t
-+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint16_t bwflags;
-+ int ret = 0;
-+ uint64_t pkt_err_flags = 0;
-+
-+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
-+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
-+ pkt_err_flags = PKT_RX_MAC_ERR;
-+ ret = 1;
-+ }
-+ *pkt_err_flags_out = pkt_err_flags;
-+ return ret;
-+}
-+
-+/*
-+ * Lookup table to translate RX CQ flags to mbuf flags.
-+ */
-+static inline uint32_t
-+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint8_t cqrd_flags = cqrd->flags;
-+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
-+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_UDP,
-+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_TCP,
-+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
-+ | RTE_PTYPE_L4_FRAG,
-+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
-+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_UDP,
-+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_TCP,
-+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
-+ | RTE_PTYPE_L4_FRAG,
-+ /* All others reserved */
-+ };
-+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-+ return cq_type_table[cqrd_flags];
-+}
-+
-+static inline void
-+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-+{
-+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-+ uint16_t ciflags, bwflags, pkt_flags = 0;
-+ ciflags = enic_cq_rx_desc_ciflags(cqrd);
-+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
-+
-+ mbuf->ol_flags = 0;
-+
-+ /* flags are meaningless if !EOP */
-+ if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
-+ goto mbuf_flags_done;
-+
-+ /* VLAN stripping */
-+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-+ pkt_flags |= PKT_RX_VLAN_PKT;
-+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-+ } else {
-+ mbuf->vlan_tci = 0;
-+ }
-+
-+ /* RSS flag */
-+ if (enic_cq_rx_desc_rss_type(cqrd)) {
-+ pkt_flags |= PKT_RX_RSS_HASH;
-+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-+ }
-+
-+ /* checksum flags */
-+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
-+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
-+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
-+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
-+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
-+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-+ }
-+ }
-+
-+ mbuf_flags_done:
-+ mbuf->ol_flags = pkt_flags;
-+}
-+
-+uint16_t
-+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-+ uint16_t nb_pkts)
-+{
-+ struct vnic_rq *rq = rx_queue;
-+ struct enic *enic = vnic_dev_priv(rq->vdev);
-+ unsigned int rx_id;
-+ struct rte_mbuf *nmb, *rxmb;
-+ uint16_t nb_rx = 0;
-+ uint16_t nb_hold;
-+ struct vnic_cq *cq;
-+ volatile struct cq_desc *cqd_ptr;
-+ uint8_t color;
-+
-+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+
-+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */
-+
-+ while (nb_rx < nb_pkts) {
-+ volatile struct rq_enet_desc *rqd_ptr;
-+ dma_addr_t dma_addr;
-+ struct cq_desc cqd;
-+ uint64_t ol_err_flags;
-+ uint8_t packet_error;
-+
-+ /* Check for pkts available */
-+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
-+ & CQ_DESC_COLOR_MASK;
-+ if (color == cq->last_color)
-+ break;
-+
-+ /* Get the cq descriptor and rq pointer */
-+ cqd = *cqd_ptr;
-+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
-+
-+ /* allocate a new mbuf */
-+ nmb = rte_rxmbuf_alloc(rq->mp);
-+ if (nmb == NULL) {
-+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
-+ enic->port_id, (unsigned)rq->index);
-+ rte_eth_devices[enic->port_id].
-+ data->rx_mbuf_alloc_failed++;
-+ break;
-+ }
-+
-+ /* A packet error means descriptor and data are untrusted */
-+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
-+
-+ /* Get the mbuf to return and replace with one just allocated */
-+ rxmb = rq->mbuf_ring[rx_id];
-+ rq->mbuf_ring[rx_id] = nmb;
-+
-+ /* Increment cqd, rqd, mbuf_table index */
-+ rx_id++;
-+ if (unlikely(rx_id == rq->ring.desc_count)) {
-+ rx_id = 0;
-+ cq->last_color = cq->last_color ? 0 : 1;
-+ }
-+
-+ /* Prefetch next mbuf & desc while processing current one */
-+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-+ rte_enic_prefetch(cqd_ptr);
-+ rte_enic_prefetch(rq->mbuf_ring[rx_id]);
-+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
-+ + rx_id);
-+
-+ /* Push descriptor for newly allocated mbuf */
-+ dma_addr = (dma_addr_t)(nmb->buf_physaddr
-+ + RTE_PKTMBUF_HEADROOM);
-+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
-+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
-+ - RTE_PKTMBUF_HEADROOM);
-+
-+ /* Fill in the rest of the mbuf */
-+ rxmb->data_off = RTE_PKTMBUF_HEADROOM;
-+ rxmb->nb_segs = 1;
-+ rxmb->next = NULL;
-+ rxmb->port = enic->port_id;
-+ if (!packet_error) {
-+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
-+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
-+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
-+ } else {
-+ rxmb->pkt_len = 0;
-+ rxmb->packet_type = 0;
-+ rxmb->ol_flags = 0;
-+ }
-+ rxmb->data_len = rxmb->pkt_len;
-+
-+ /* prefetch mbuf data for caller */
-+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
-+ RTE_PKTMBUF_HEADROOM));
-+
-+ /* store the mbuf address into the next entry of the array */
-+ rx_pkts[nb_rx++] = rxmb;
-+ }
-+
-+ nb_hold += nb_rx;
-+ cq->to_clean = rx_id;
-+
-+ if (nb_hold > rq->rx_free_thresh) {
-+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
-+ rq->posted_index, nb_hold);
-+ nb_hold = 0;
-+ rte_mb();
-+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
-+ }
-+
-+ rq->rx_nb_hold = nb_hold;
-+
-+ return nb_rx;
-+}
-+
-+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
-+{
-+ struct vnic_wq_buf *buf;
-+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
-+ unsigned int nb_to_free, nb_free = 0, i;
-+ struct rte_mempool *pool;
-+ unsigned int tail_idx;
-+ unsigned int desc_count = wq->ring.desc_count;
-+
-+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
-+ + 1;
-+ tail_idx = wq->tail_idx;
-+ buf = &wq->bufs[tail_idx];
-+ pool = ((struct rte_mbuf *)buf->mb)->pool;
-+ for (i = 0; i < nb_to_free; i++) {
-+ buf = &wq->bufs[tail_idx];
-+ m = (struct rte_mbuf *)(buf->mb);
-+ if (likely(m->pool == pool)) {
-+ ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
-+ free[nb_free++] = m;
-+ } else {
-+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
-+ free[0] = m;
-+ nb_free = 1;
-+ pool = m->pool;
-+ }
-+ tail_idx = enic_ring_incr(desc_count, tail_idx);
-+ buf->mb = NULL;
-+ }
-+
-+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
-+
-+ wq->tail_idx = tail_idx;
-+ wq->ring.desc_avail += nb_to_free;
-+}
-+
-+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
-+{
-+ u16 completed_index;
-+
-+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
-+
-+ if (wq->last_completed_index != completed_index) {
-+ enic_free_wq_bufs(wq, completed_index);
-+ wq->last_completed_index = completed_index;
-+ }
-+ return 0;
-+}
-+
-+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-+ uint16_t nb_pkts)
-+{
-+ uint16_t index;
-+ unsigned int pkt_len, data_len;
-+ unsigned int nb_segs;
-+ struct rte_mbuf *tx_pkt;
-+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
-+ struct enic *enic = vnic_dev_priv(wq->vdev);
-+ unsigned short vlan_id;
-+ unsigned short ol_flags;
-+ unsigned int wq_desc_avail;
-+ int head_idx;
-+ struct vnic_wq_buf *buf;
-+ unsigned int hw_ip_cksum_enabled;
-+ unsigned int desc_count;
-+ struct wq_enet_desc *descs, *desc_p, desc_tmp;
-+ uint16_t mss;
-+ uint8_t vlan_tag_insert;
-+ uint8_t eop;
-+ uint64_t bus_addr;
-+
-+ enic_cleanup_wq(enic, wq);
-+ wq_desc_avail = vnic_wq_desc_avail(wq);
-+ head_idx = wq->head_idx;
-+ desc_count = wq->ring.desc_count;
-+
-+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
-+
-+ hw_ip_cksum_enabled = enic->hw_ip_checksum;
-+ for (index = 0; index < nb_pkts; index++) {
-+ tx_pkt = *tx_pkts++;
-+ nb_segs = tx_pkt->nb_segs;
-+ if (nb_segs > wq_desc_avail) {
-+ if (index > 0)
-+ goto post;
-+ goto done;
-+ }
-+
-+ pkt_len = tx_pkt->pkt_len;
-+ data_len = tx_pkt->data_len;
-+ vlan_id = tx_pkt->vlan_tci;
-+ ol_flags = tx_pkt->ol_flags;
-+
-+ mss = 0;
-+ vlan_tag_insert = 0;
-+ bus_addr = (dma_addr_t)
-+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
-+
-+ descs = (struct wq_enet_desc *)wq->ring.descs;
-+ desc_p = descs + head_idx;
-+
-+ eop = (data_len == pkt_len);
-+
-+ if (ol_flags & PKT_TX_VLAN_PKT)
-+ vlan_tag_insert = 1;
-+
-+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_IP_CKSUM))
-+ mss |= ENIC_CALC_IP_CKSUM;
-+
-+ if (hw_ip_cksum_enabled && (ol_flags & PKT_TX_TCP_UDP_CKSUM))
-+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
-+
-+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
-+ eop, 0, vlan_tag_insert, vlan_id, 0);
-+
-+ *desc_p = desc_tmp;
-+ buf = &wq->bufs[head_idx];
-+ buf->mb = (void *)tx_pkt;
-+ head_idx = enic_ring_incr(desc_count, head_idx);
-+ wq_desc_avail--;
-+
-+ if (!eop) {
-+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
-+ tx_pkt->next) {
-+ data_len = tx_pkt->data_len;
-+
-+ if (tx_pkt->next == NULL)
-+ eop = 1;
-+ desc_p = descs + head_idx;
-+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
-+ + tx_pkt->data_off);
-+ wq_enet_desc_enc((struct wq_enet_desc *)
-+ &desc_tmp, bus_addr, data_len,
-+ mss, 0, 0, eop, eop, 0,
-+ vlan_tag_insert, vlan_id, 0);
-+
-+ *desc_p = desc_tmp;
-+ buf = &wq->bufs[head_idx];
-+ buf->mb = (void *)tx_pkt;
-+ head_idx = enic_ring_incr(desc_count, head_idx);
-+ wq_desc_avail--;
-+ }
-+ }
-+ }
-+ post:
-+ rte_wmb();
-+ iowrite32(head_idx, &wq->ctrl->posted_index);
-+ done:
-+ wq->ring.desc_avail = wq_desc_avail;
-+ wq->head_idx = head_idx;
-+
-+ return index;
-+}
---
-2.7.0
-