[ipq806x] Initial commit for NSS Driver

        NSS driver supports following features:
        [1] Dual concurrent NSS cores
        [2] Dual concurrent interrupts per NSS core
        [3] Crypto, Connection manager, GMAC, Profiler, IPsec
        [4] C2C
        [5] Fast path rules
        [6] Sync messages
	[7] Descriptor queue congestion cleared callback

        Note that following parts of code require more work:
        [1] NSS clock and reset initialization
        [2] Performance analysis (hrtimer, interrupt co-alescing)
        [3] Weighed RR queueing
        [4] Virtual interface support
	[5] Debug and statistics support

Change-Id: I9d6e158a73f67a5df20c9ae4c7c6786c1b9742b0
Signed-off-by: Abhishek Rastogi <arastogi@codeaurora.org>
diff --git a/Kconfig b/Kconfig
new file mode 100755
index 0000000..5a8d3f2
--- /dev/null
+++ b/Kconfig
@@ -0,0 +1,9 @@
+#
+# QCA IPQ Network accelerator device configuration
+#
+config NSS_DEBUG_LEVEL
+	int "Debug level for NSS driver"
+	default 0
+	---help---
+	  Select between 0 and 4 (0: none, 1: assert, 2: warning, 3: info, 4: trace)
+
diff --git a/Makefile b/Makefile
new file mode 100755
index 0000000..6b9595f
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,11 @@
+obj-m += qca-nss-drv.o
+qca-nss-drv-objs := nss_init.o nss_core.o nss_tx_rx.o
+
+obj ?= .
+
+EXTRA_CFLAGS += -I$(obj)/nss_hal/include
+
+ifeq "$(CONFIG_ARCH_IPQ806X)" "y"
+qca-nss-drv-objs += nss_hal/ipq806x/nss_hal_pvt.o
+EXTRA_CFLAGS += -I$(obj)/nss_hal/ipq806x
+endif
diff --git a/nss_api_if.h b/nss_api_if.h
new file mode 100755
index 0000000..244cd78
--- /dev/null
+++ b/nss_api_if.h
@@ -0,0 +1,924 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/**
+ * na_api_if.h
+ *	NSS driver APIs and Declarations.
+ */
+
+/**
+ * @addtogroup nss_drv
+ * @{
+ */
+
+/**
+  * @file
+  * This file declares all the public interfaces for NSS driver.
+  *
+  */
+
+#ifndef __NSS_API_IF_H
+#define __NSS_API_IF_H
+
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+
+/**
+ * This macro converts format for IPv6 address (from Linux to NSS)
+ */
+#define IN6_ADDR_TO_IPV6_ADDR(ipv6, in6) \
+	{ \
+		((uint32_t *)ipv6)[0] = in6.in6_u.u6_addr32[0]; \
+		((uint32_t *)ipv6)[1] = in6.in6_u.u6_addr32[1]; \
+		((uint32_t *)ipv6)[2] = in6.in6_u.u6_addr32[2]; \
+		((uint32_t *)ipv6)[3] = in6.in6_u.u6_addr32[3]; \
+	}
+
+/**
+ * This macro converts format for IPv6 address (from NSS to Linux)
+ */
+#define IPV6_ADDR_TO_IN6_ADDR(in6, ipv6) \
+	{ \
+		in6.in6_u.u6_addr32[0] = ((uint32_t *)ipv6)[0]; \
+		in6.in6_u.u6_addr32[1] = ((uint32_t *)ipv6)[1]; \
+		in6.in6_u.u6_addr32[2] = ((uint32_t *)ipv6)[2]; \
+		in6.in6_u.u6_addr32[3] = ((uint32_t *)ipv6)[3]; \
+	}
+
+/**
+ * This macro can be used to print IPv6 address (16 * 8 bits)
+ */
+#define IPV6_ADDR_OCTAL_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
+
+/**
+ * This macro can be used to print IPv6 address (16 * 8 bits)
+ */
+#define IPV6_ADDR_TO_OCTAL(ipv6) ((uint16_t *)ipv6)[0], ((uint16_t *)ipv6)[1], ((uint16_t *)ipv6)[2], ((uint16_t *)ipv6)[3], ((uint16_t *)ipv6)[4], ((uint16_t *)ipv6)[5], ((uint16_t *)ipv6)[6], ((uint16_t *)ipv6)[7]
+
+/**
+ * IPv4 rule sync reasons.
+ */
+#define NSS_IPV4_RULE_SYNC_REASON_STATS 0
+					/**< Sync is to synchronize stats */
+
+#define NSS_IPV4_RULE_SYNC_REASON_FLUSH 1
+					/**< Sync is to flush a cache entry */
+
+#define NSS_IPV4_RULE_SYNC_REASON_EVICT 2
+					/**< Sync is to evict a cache entry */
+
+#define NSS_IPV4_RULE_SYNC_REASON_DESTROY 3
+					/**< Sync is to destroy a cache entry (requested by host OS) */
+
+#define NSS_IPV4_RULE_SYNC_REASON_PPPOE_DESTROY 4
+					/**< Sync is to destroy a cache entry which belongs to a particular PPPoE session */
+/**
+ * Structure to be used while sending an IPv4 flow/connection destroy rule.
+ *
+ * Caution: All fields must be passed in host endian order
+ */
+struct nss_ipv4_create {
+	int32_t src_interface_num;	/**< Source i/f number (virtual/physical) */
+	int32_t dest_interface_num;	/**< Destination i/f number (virtual/physical) */
+	int32_t protocol;		/**< L4 Protocol (e.g. TCP/UDP) */
+	uint32_t flags;			/**< Flags (if any) associated with this rule */
+	uint32_t from_mtu;		/**< MTU of incoming interface */
+	uint32_t to_mtu;		/**< MTU of outgoing interface */
+	uint32_t src_ip;		/**< Source IP address */
+	int32_t src_port;		/**< Source L4 port (e.g. TCP/UDP port) */
+	uint32_t src_ip_xlate;		/**< Translated Source IP address (used with SNAT) */
+	int32_t src_port_xlate;		/**< Translated Source L4 port (used with SNAT) */
+	uint32_t dest_ip;		/**< Destination IP address */
+	int32_t dest_port;		/**< Destination L4 port (e.g. TCP/UDP port) */
+	uint32_t dest_ip_xlate;		/**< Translated Destination IP address (used with DNAT) */
+	int32_t dest_port_xlate;	/**< Translated Destination L4 port (used with DNAT) */
+	uint8_t src_mac[ETH_ALEN];	/**< Source MAC address */
+	uint8_t dest_mac[ETH_ALEN];	/**< Destination MAC address */
+	uint8_t src_mac_xlate[ETH_ALEN];	/**< Translated Source MAC address (post routing) */
+	uint8_t dest_mac_xlate[ETH_ALEN];	/**< Translated Destination MAC address (post routing) */
+	uint8_t flow_window_scale;	/**< Window scaling factor (TCP) */
+	uint32_t flow_max_window;	/**< Maximum window size (TCP) */
+	uint32_t flow_end;		/**< Flow end */
+	uint32_t flow_max_end;		/**< Flow maximum end */
+	uint16_t flow_pppoe_session_id;			/**< PPPoE session associated with flow */
+	uint8_t flow_pppoe_remote_mac[ETH_ALEN];	/**< Remote PPPoE peer MAC address */
+	uint8_t return_window_scale;	/**< Window scaling factor of return direction (TCP) */
+	uint32_t return_max_window;	/**< Maximum window size of return direction */
+	uint32_t return_end;		/**< Flow end for return direction */
+	uint32_t return_max_end;	/**< Flow maximum end for return direction */
+	uint16_t return_pppoe_session_id;		/**< PPPoE session ID for return direction */
+	uint8_t return_pppoe_remote_mac[ETH_ALEN];	/**< Remote PPPoE peer MAC sddress for return */
+	uint8_t spo_needed;		/**< Is SPO required */
+	uint32_t param_a0;		/**< Custom extra parameter 0 */
+	uint32_t param_a1;		/**< Custom extra parameter 1 */
+	uint32_t param_a2;		/**< Custom extra parameter 2 */
+	uint32_t param_a3;		/**< Custom extra parameter 3 */
+	uint32_t param_a4;		/**< Custom extra parameter 4 */
+};
+
+/**
+ * IPv4 connection flags (to be used with flags field of nss_ipv4_create structure)
+ */
+
+/** Indicates that we should not check sequence numbers */
+#define NSS_IPV4_CREATE_FLAG_NO_SEQ_CHECK 0x1
+
+/**
+ * Structure to be used while sending an IPv4 flow/connection destroy rule.
+ */
+struct nss_ipv4_destroy {
+	int32_t protocol;		/**< L4 protocol ID */
+	uint32_t src_ip;		/**< Source IP address */
+	int32_t src_port;		/**< Source L4 port (e.g. TCP/UDP port) */
+	uint32_t dest_ip;		/**< Destination IP address */
+	int32_t dest_port;		/**< Destination L4 port (e.g. TCP/UDP port) */
+};
+
+/**
+ * IPv6 rule sync reasons.
+ */
+#define NSS_IPV6_RULE_SYNC_REASON_STATS 0
+					/**< Sync is to synchronize stats */
+
+#define NSS_IPV6_RULE_SYNC_REASON_FLUSH 1
+					/**< Sync is to flush a cache entry */
+
+#define NSS_IPV6_RULE_SYNC_REASON_EVICT 2
+					/**< Sync is to evict a cache entry */
+
+#define NSS_IPV6_RULE_SYNC_REASON_DESTROY 3
+					/**< Sync is to destroy a cache entry (requested by host OS) */
+
+#define NSS_IPV6_RULE_SYNC_REASON_PPPOE_DESTROY 4
+					/**< Sync is to destroy a cache entry which belongs to a particular PPPoE session */
+
+/**
+ * Structure to be used while sending an IPv6 flow/connection destroy rule.
+ *
+ * Caution: All fields must be passed in host endian order
+ */
+struct nss_ipv6_create {
+	int32_t src_interface_num;	/**< Source i/f number (virtual/physical) */
+	int32_t dest_interface_num;	/**< Destination i/f number (virtual/physical) */
+	int32_t protocol;		/**< L4 Protocol (e.g. TCP/UDP) */
+	uint32_t flags;			/**< Flags (if any) associated with this rule */
+	uint32_t from_mtu;		/**< MTU of incoming interface */
+	uint32_t to_mtu;		/**< MTU of outgoing interface */
+	uint32_t src_ip[4];		/**< Source IP address */
+	int32_t src_port;		/**< Source L4 port (e.g. TCP/UDP port) */
+	uint32_t dest_ip[4];		/**< Destination IP address */
+	int32_t dest_port;		/**< Destination L4 port (e.g. TCP/UDP port) */
+	uint8_t src_mac[ETH_ALEN];	/**< Source MAC address */
+	uint8_t dest_mac[ETH_ALEN];	/**< Destination MAC address */
+	uint8_t flow_window_scale;	/**< Window scaling factor (TCP) */
+	uint32_t flow_max_window;	/**< Maximum window size (TCP) */
+	uint32_t flow_end;		/**< Flow end */
+	uint32_t flow_max_end;		/**< Flow max end */
+	uint16_t flow_pppoe_session_id;			/**< PPPoE session associated with flow */
+	uint8_t flow_pppoe_remote_mac[ETH_ALEN];	/**< Remote PPPoE peer MAC address */
+	uint8_t return_window_scale;	/**< Window scaling factor (TCP) for return */
+	uint32_t return_max_window;	/**< Maximum window size (TCP) for return */
+	uint32_t return_end;		/**< End for return */
+	uint32_t return_max_end;	/**< Maximum end for return */
+	uint16_t return_pppoe_session_id;		/**< PPPoE session associated with return */
+	uint8_t return_pppoe_remote_mac[ETH_ALEN];	/**< Remote PPPoE peer MAC address for return */
+};
+
+/**
+ * IPv6 connection flags (to be used with flags filed of nss_ipv6_create structure.
+ */
+#define NSS_IPV6_CREATE_FLAG_NO_SEQ_CHECK 0x1
+					/**< Indicates that we should not check sequence numbers */
+
+/**
+ * Structure to be used while sending an IPv6 flow/connection destroy rule.
+ */
+struct nss_ipv6_destroy {
+	int32_t protocol;		/**< L4 Protocol (e.g. TCP/UDP) */
+	uint32_t src_ip[4];		/**< Source IP address */
+	int32_t src_port;		/**< Source L4 port (e.g. TCP/UDP port) */
+	uint32_t dest_ip[4];		/**< Destination IP address */
+	int32_t dest_port;		/**< Destination L4 port (e.g. TCP/UDP port) */
+};
+
+/**
+ * L2 switch rule sync reasons.
+ */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_STATS 0
+					/**< Sync is to synchronize stats */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_FLUSH 1
+					/**< Sync is to flush a cache entry */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_EVICT 2
+					/**< Sync is to evict a cache entry */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_DESTROY 3
+					/**< Sync is to destroy a cache entry (requested by host OS) */
+
+/**
+ * Structure to be used while sending L2 switch flow creation rule.
+ */
+struct nss_l2switch_create {
+	uint16_t addr[3];		/**< Destination MAC address */
+	uint8_t state;			/**< State */
+	uint8_t priority;		/**< Priority of the flow */
+	int32_t interface_num;		/**< Interface number */
+	uint16_t port_no;		/**< Port number */
+	uint16_t port_id;		/**< Port ID */
+};
+
+/**
+ * Structure to be used while sending L2 switch flow destruction rule.
+ */
+struct nss_l2switch_destroy {
+	int32_t interface_num;		/**< Interface number */
+	uint16_t addr[3];		/**< Destination MAC address */
+};
+
+/**
+ * Structure to be used while sending IPsec Tx creation rule.
+ */
+struct nss_ipsec_tx_create {
+	uint32_t spi;			/**< SPI index */
+	uint32_t replay;		/**< Replay number */
+	uint32_t src_addr;		/**< Source IPv4 address */
+	uint32_t dest_addr;		/**< Destination IPv4 address */
+	uint32_t ses_idx;		/**< Session index */
+};
+
+/**
+ * Structure to be used while sending IPsec Tx destruction rule.
+ */
+struct nss_ipsec_tx_destroy {
+	uint32_t ses_idx;		/**< Session index */
+};
+
+/**
+ * Structure to be used while sending IPsec Rx rule.
+ */
+struct nss_ipsec_rx_create {
+	uint32_t spi;			/**< SPI index */
+	uint32_t replay;		/**< Replay number */
+	uint32_t src_addr;		/**< Source IPv4 address */
+	uint32_t dest_addr;		/**< Destination IPv4 address */
+	uint32_t ses_idx;		/**< Session index */
+};
+
+/**
+ * Structure to be used while sending IPsec Rx destruction rule
+ */
+struct nss_ipsec_rx_destroy {
+	uint32_t ses_idx;		/**< Session index */
+};
+
+/**
+ * Structure to define packet stats (bytes / packets seen over a connection) and also keep alive.
+ *
+ * NOTE: The addresses here are NON-NAT addresses, i.e. the true endpoint addressing.
+ * 'src' is the creator of the connection.
+ */
+struct nss_ipv4_sync {
+	int32_t protocol;		/**< IP protocol number (IPPROTO_...) */
+	uint32_t src_addr;		/**< Non-NAT source address, i.e. the creator of the connection */
+	int32_t src_port;		/**< Non-NAT source port */
+	uint32_t src_addr_xlate;	/**< NAT translated source address, i.e. the creator of the connection */
+	int32_t src_port_xlate;		/**< NAT translated source port */
+	uint32_t dest_addr;		/**< Non-NAT destination address, i.e. the to whom the connection was created */
+	int32_t dest_port;		/**< Non-NAT destination port */
+	uint32_t dest_addr_xlate;	/**< NAT translated destination address, i.e. the to whom the connection was created */
+	int32_t dest_port_xlate;	/**< NAT translated destination port */
+	uint32_t flow_max_window;	/**< Maximum window size (TCP) */
+	uint32_t flow_end;		/**< Flow end */
+	uint32_t flow_max_end;		/**< Flow max end */
+	uint32_t flow_packet_count;	/**< Packet count for the flow */
+	uint32_t flow_byte_count;	/**< Byte count for the flow */
+	uint32_t return_max_window;	/**< Maximum window size (TCP) for return */
+	uint32_t return_end;		/**< End for return */
+	uint32_t return_max_end;	/**< Max end for return */
+	uint32_t return_packet_count;	/**< Packet count for return direction */
+	uint32_t return_byte_count;	/**< Byte count for return */
+	unsigned long int delta_jiffies;
+					/**< Time in Linux jiffies to be added to the current timeout to keep the connection alive */
+	uint8_t reason;			/**< Reason of synchronization */
+	uint32_t param_a0;		/**< Custom extra parameter 0 */
+	uint32_t param_a1;		/**< Custom extra parameter 1 */
+	uint32_t param_a2;		/**< Custom extra parameter 2 */
+	uint32_t param_a3;		/**< Custom extra parameter 3 */
+	uint32_t param_a4;		/**< Custom extra parameter 4 */
+};
+
+/**
+ * struct nss_ipv6_sync
+ *	Update packet stats (bytes / packets seen over a connection) and also keep alive.
+ *
+ * NOTE: The addresses here are NON-NAT addresses, i.e. the true endpoint addressing.
+ * 'src' is the creator of the connection.
+ */
+struct nss_ipv6_sync {
+	int32_t protocol;		/**< IP protocol number (IPPROTO_...) */
+	uint32_t src_addr[4];		/**< Non-NAT source address, i.e. the creator of the connection */
+	int32_t src_port;		/**< Non-NAT source port */
+	uint32_t dest_addr[4];		/**< Non-NAT destination address, i.e. the to whom the connection was created */
+	int32_t dest_port;		/**< Non-NAT destination port */
+	uint32_t flow_max_window;	/**< Maximum window size (TCP) */
+	uint32_t flow_end;		/**< Flow end */
+	uint32_t flow_max_end;		/**< Flow max end */
+	uint32_t flow_packet_count;	/**< Packet count for the flow */
+	uint32_t flow_byte_count;	/**< Byte count for the flow */
+	uint32_t return_max_window;	/**< Maximum window size (TCP) for return */
+	uint32_t return_end;		/**< End for return */
+	uint32_t return_max_end;	/**< Max end for return */
+	uint32_t return_packet_count;	/**< Packet count for return direction */
+	uint32_t return_byte_count;	/**< Byte count for return */
+	unsigned long int delta_jiffies;
+					/**< Time in Linux jiffies to be added to the current timeout to keep the connection alive */
+	uint8_t final_sync;		/**< Non-zero when the NA has ceased to accelerate the given connection */
+	uint8_t evicted;		/**< Non-zero if connection evicted */
+};
+
+/**
+ * struct nss_l2switch_sync
+ *	Update packet stats (bytes / packets seen over a connection) and also keep alive.
+ */
+struct nss_l2switch_sync {
+	uint16_t addr[3];		/**< MAC address */
+	uint8_t reason;			/**< Reason of synchronization */
+	void *dev;			/**< Netdevice */
+	unsigned long int delta_jiffies;
+					/**< Time in Linux jiffies to be added to the current timeout to keep the connection alive */
+};
+
+/*
+ * struct nss_gmac_sync
+ * The NA per-GMAC statistics sync structure.
+ */
+struct nss_gmac_sync {
+	int32_t interface;		/**< Interface number */
+	uint32_t rx_bytes;		/**< Number of RX bytes */
+	uint32_t rx_packets;		/**< Number of RX packets */
+	uint32_t rx_errors;		/**< Number of RX errors */
+	uint32_t rx_receive_errors;	/**< Number of RX receive errors */
+	uint32_t rx_overflow_errors;	/**< Number of RX overflow errors */
+	uint32_t rx_descriptor_errors;	/**< Number of RX descriptor errors */
+	uint32_t rx_watchdog_timeout_errors;
+					/**< Number of RX watchdog timeout errors */
+	uint32_t rx_crc_errors;		/**< Number of RX CRC errors */
+	uint32_t rx_late_collision_errors;
+					/**< Number of RX late collision errors */
+	uint32_t rx_dribble_bit_errors;	/**< Number of RX dribble bit errors */
+	uint32_t rx_length_errors;	/**< Number of RX length errors */
+	uint32_t rx_ip_header_errors;	/**< Number of RX IP header errors */
+	uint32_t rx_ip_payload_errors;	/**< Number of RX IP payload errors */
+	uint32_t rx_no_buffer_errors;	/**< Number of RX no-buffer errors */
+	uint32_t rx_transport_csum_bypassed;
+					/**< Number of RX packets where the transport checksum was bypassed */
+	uint32_t tx_bytes;		/**< Number of TX bytes */
+	uint32_t tx_packets;		/**< Number of TX packets */
+	uint32_t tx_collisions;		/**< Number of TX collisions */
+	uint32_t tx_errors;		/**< Number of TX errors */
+	uint32_t tx_jabber_timeout_errors;
+					/**< Number of TX jabber timeout errors */
+	uint32_t tx_frame_flushed_errors;
+					/**< Number of TX frame flushed errors */
+	uint32_t tx_loss_of_carrier_errors;
+					/**< Number of TX loss of carrier errors */
+	uint32_t tx_no_carrier_errors;	/**< Number of TX no carrier errors */
+	uint32_t tx_late_collision_errors;
+					/**< Number of TX late collision errors */
+	uint32_t tx_excessive_collision_errors;
+					/**< Number of TX excessive collision errors */
+	uint32_t tx_excessive_deferral_errors;
+					/**< Number of TX excessive deferral errors */
+	uint32_t tx_underflow_errors;	/**< Number of TX underflow errors */
+	uint32_t tx_ip_header_errors;	/**< Number of TX IP header errors */
+	uint32_t tx_ip_payload_errors;	/**< Number of TX IP payload errors */
+	uint32_t tx_dropped;		/**< Number of TX dropped packets */
+	uint32_t hw_errs[10];		/**< GMAC DMA error counters */
+	uint32_t rx_missed;		/**< Number of RX packets missed by the DMA */
+	uint32_t fifo_overflows;	/**< Number of RX FIFO overflows signalled by the DMA */
+	uint32_t gmac_total_ticks;	/**< Total clock ticks spend inside the GMAC */
+	uint32_t gmac_worst_case_ticks;	/**< Worst case iteration of the GMAC in ticks */
+	uint32_t gmac_iterations;	/**< Number of iterations around the GMAC */
+};
+
+/**
+ * Tx command status
+ */
+typedef enum {
+	NSS_TX_SUCCESS = 0,	/**< Success */
+	NSS_TX_FAILURE,		/**< Command failure other than descriptor not available */
+	NSS_TX_FAILURE_QUEUE,	/**< Command failure due to descriptor not available */
+	NSS_TX_FAILURE_NOT_READY,	/**< Command failure due to NSS state uninitialized */
+} nss_tx_status_t;
+
+/**
+ * NSS state status
+ */
+typedef enum {
+	NSS_STATE_UNINITIALIZED = 0,	/**< NSS state is initailized */
+	NSS_STATE_INITIALIZED		/**< NSS state is uninitialized */
+} nss_state_t;
+
+/**
+ * NSS core id
+ */
+typedef enum {
+	NSS_CORE_0 = 0,
+	NSS_CORE_1,
+	NSS_CORE_MAX
+} nss_core_id_t;
+
+/**
+ * Callback register status
+ */
+typedef enum {
+	NSS_CB_REGISTER_SUCCESS = 0,	/**< Callback register successful */
+	NSS_CB_REGISTER_FAILED,		/**< Callback register failed */
+} nss_cb_register_status_t;
+
+/**
+ * Callback unregister status
+ */
+typedef enum {
+	NSS_CB_UNREGISTER_SUCCESS = 0,	/**< Callback unregister successful */
+	NSS_CB_UNREGISTER_FAILED,		/**< Callback unregister failed */
+} nss_cb_unregister_status_t;
+
+/**
+ * NSS GMAC event type
+ */
+typedef enum {
+	NSS_GMAC_EVENT_STATS,
+	NSS_GMAC_EVENT_OTHER,
+	NSS_GMAC_EVENT_MAX
+} nss_gmac_event_t;
+
+/**
+ * General utilities
+ */
+
+/**
+ * @brief Obtain interface number
+ *
+ * @param nss_ctx NSS context
+ * @param dev OS network device pointer
+ *
+ * @return int32_t Interface number
+ */
+extern int32_t nss_get_interface_number(void *nss_ctx, void *dev);
+
+/**
+ * @brief Obtain the NSS state
+ *
+ * @param nss_ctx NSS context
+ *
+ * @return nss_state_t NSS state
+ */
+extern nss_state_t nss_get_state(void *nss_ctx);
+
+/**
+ * Callback function for all connection expired notification
+ */
+typedef void (*nss_connection_expire_all_callback_t)(void);
+
+/**
+ * @brief Register for all connection expire notification
+ *
+ * @param event_callback Event callback
+ */
+extern void nss_register_connection_expire_all(nss_connection_expire_all_callback_t event_callback);
+
+/**
+ * @brief Unregister for all connection expire notification
+ */
+extern void nss_unregister_connection_expire_all(void);
+
+/**
+ * Callback for queue decongestion message
+ */
+typedef void (*nss_queue_decongestion_callback_t)(void *app_ctx);
+
+/**
+ * @brief Register for queue decongestion event
+ *
+ * @param nss_ctx NSS context
+ * @param event_callback Event callback
+ * @param ctx Callee context to be returned in callback
+ *
+ * @note Callback function will be called with spinlock taken
+ */
+extern nss_cb_register_status_t nss_register_queue_decongestion(void *nss_ctx, nss_queue_decongestion_callback_t event_callback, void *app_ctx);
+
+/**
+ * @brief Unregister for queue decongestion event
+ *
+ * @param event_callback
+ */
+extern nss_cb_unregister_status_t nss_unregister_queue_decongestion(void *nss_ctx, nss_queue_decongestion_callback_t event_callback);
+
+/**
+ * Methods provided by NSS device driver for use by connection tracking logic for IPv4.
+ */
+
+/**
+ * Callback for IPv4 connection sync messages
+ */
+typedef void (*nss_ipv4_sync_callback_t)(struct nss_ipv4_sync *unis);
+
+/**
+ * @brief Register for sending/receiving IPv4 messages
+ *
+ * @param event_callback Event callback
+ *
+ * @return void* NSS context to be provided with every message
+ */
+extern void *nss_register_ipv4_mgr(nss_ipv4_sync_callback_t event_callback);
+
+/**
+ * @brief Unregister for sending/receiving IPv4 messages
+ */
+extern void nss_unregister_ipv4_mgr(void);
+
+/**
+ * @brief Send IPv4 connection setup rule
+ *
+ * @param nss_ctx NSS context
+ * @param unic Rule parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_create_ipv4_rule(void *nss_ctx, struct nss_ipv4_create *unic);
+
+/**
+ * @brief Send IPv4 connection destroy rule
+ *
+ * @param nss_ctx NSS context
+ * @param unid Rule parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_destroy_ipv4_rule(void *nss_ctx, struct nss_ipv4_destroy *unid);
+
+/**
+ * Methods provided by NSS device driver for use by connection tracking logic for IPv6.
+ */
+
+/**
+ * Callback for IPv6 sync messages
+ */
+typedef void (*nss_ipv6_sync_callback_t)(struct nss_ipv6_sync *unis);
+
+/**
+ * @brief Register for sending/receiving IPv6 messages
+ *
+ * @param event_callback Callback
+ *
+ * @return void* NSS context to be provided with every message
+ */
+extern void *nss_register_ipv6_mgr(nss_ipv6_sync_callback_t event_callback);
+
+/**
+ * @brief Unregister for sending/receiving IPv4 messages
+ */
+extern void nss_unregister_ipv6_mgr(void);
+
+/**
+ * @brief Send IPv6 connection setup rule
+ *
+ * @param nss_ctx NSS context
+ * @param unic Rule parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_create_ipv6_rule(void *nss_ctx, struct nss_ipv6_create *unic);
+
+/**
+ * @brief Send IPv6 connection destroy rule
+ *
+ * @param nss_ctx NSS context
+ * @param unid Rule parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_destroy_ipv6_rule(void *nss_ctx, struct nss_ipv6_destroy *unid);
+
+/**
+ * Methods provided by NSS device driver for use by connection tracking logic for l2 switch.
+ */
+
+/**
+ * Callback for L2switch sync messages
+ */
+typedef void (*nss_l2switch_sync_callback_t)(struct nss_l2switch_sync *unls);
+
+/**
+ * @brief Register for sending/receiving L2switch messages
+ *
+ * @param event_callback Callback
+ *
+ * @return void* NSS context to be provided with every message
+ */
+extern void *nss_register_l2switch_mgr(nss_l2switch_sync_callback_t event_callback);
+
+/**
+ * @brief Unregister for sending/receiving L2switch messages
+ */
+extern void nss_unregister_l2switch_mgr(void);
+
+/**
+ * @brief Send L2switch flow setup rule
+ *
+ * @param nss_ctx NSS context
+ * @param unlc Rule parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_create_l2switch_rule(void *nss_ctx, struct nss_l2switch_create *unlc);
+
+/**
+ * @brief Send L2switch flow destroy rule
+ *
+ * @param nss_ctx NSS context
+ * @param unld Rule parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_destroy_l2switch_rule(void *nss_ctx, struct nss_l2switch_destroy *unld);
+
+/**
+ * @brief Send L2switch destroy all flows rule
+ *
+ * @param nss_ctx NSS context
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_destroy_all_l2switch_rules(void *nss_ctx);
+
+/**
+ * Methods provided by NSS device driver for use by crypto driver
+ */
+
+/**
+ * Callback to receive crypto buffers
+ */
+typedef void (*nss_crypto_callback_t)(void *ctx, void *buf, uint32_t buf_paddr, uint16_t len);
+
+/**
+ * @brief Register for sending/receiving crypto buffers
+ *
+ * @param crypto_callback Callback
+ * @param ctx Crypto context
+ *
+ * @return void* NSS context to be provided with every message
+ */
+extern void *nss_register_crypto_if(nss_crypto_callback_t crypto_callback, void *ctx);
+
+/**
+ * @brief Unregister for sending/receiving crypto buffers
+ */
+extern void nss_unregister_crypto_if(void);
+
+/**
+ * @brief Open crypto interface
+ *
+ * @param ctx NSS context
+ * @param buf Buffer to send to NSS
+ * @param len Length of buffer
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_crypto_if_open(void *ctx, uint8_t *buf, uint32_t len);
+
+/**
+ * @brief Close crypto interface
+ *
+ * @param ctx NSS context
+ * @param eng Engine number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_crypto_if_close(void *ctx, uint32_t eng);
+
+/**
+ * @brief Send crypto buffer to NSS
+ *
+ * @param nss_ctx NSS context
+ * @param buf Crypto buffer
+ * @param buf_paddr Physical address of buffer
+ * @param len Length of buffer
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_crypto_if_buf(void *nss_ctx, void *buf, uint32_t buf_paddr, uint16_t len);
+
+/**
+ * Methods provided by NSS device driver for use by GMAC driver
+ */
+
+/**
+ * Callback to receive GMAC events
+ */
+typedef void (*nss_phys_if_event_callback_t)(void *if_ctx, nss_gmac_event_t ev_type, void *buf, uint32_t len);
+
+/**
+ * Callback to receive GMAC packets
+ */
+typedef void (*nss_phys_if_rx_callback_t)(void *if_ctx, void *os_buf);
+
+/**
+ * @brief Register to send/receive GMAC packets/messages
+ *
+ * @param if_num GMAC i/f number
+ * @param rx_callback Receive callback for packets
+ * @param event_callback Receive callback for events
+ * @param if_ctx Interface context provided in callback
+ *
+ * @return void* NSS context
+ */
+extern void *nss_register_phys_if(uint32_t if_num, nss_phys_if_rx_callback_t rx_callback,
+					nss_phys_if_event_callback_t event_callback, void *if_ctx);
+
+/**
+ * @brief Unregister GMAC handlers with NSS driver
+ *
+ * @param if_num GMAC Interface number
+ */
+extern void nss_unregister_phys_if(uint32_t if_num);
+
+/**
+ * @brief Send GMAC packet
+ *
+ * @param nss_ctx NSS context
+ * @param os_buf OS buffer (e.g. skbuff)
+ * @param if_num GMAC i/f number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_phys_if_buf(void *nss_ctx, struct sk_buff *os_buf, uint32_t if_num);
+
+/**
+ * @brief Open GMAC interface on NSS
+ *
+ * @param nss_ctx NSS context
+ * @param tx_desc_ring Tx descriptor ring address
+ * @param rx_desc_ring Rx descriptor ring address
+ * @param if_num GMAC i/f number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_phys_if_open(void *nss_ctx, uint32_t tx_desc_ring, uint32_t rx_desc_ring, uint32_t if_num);
+
+/**
+ * @brief Close GMAC interface on NSS
+ *
+ * @param nss_ctx NSS context
+ * @param if_num GMAC i/f number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_phys_if_close(void *nss_ctx, uint32_t if_num);
+
+/**
+ * @brief Send link state message to NSS
+ *
+ * @param nss_ctx NSS context
+ * @param link_state Link state
+ * @param if_num GMAC i/f number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_phys_if_link_state(void *nss_ctx, uint32_t link_state, uint32_t if_num);
+
+/**
+ * @brief Send MAC address to NSS
+ *
+ * @param nss_ctx NSS context
+ * @param addr MAC address pointer
+ * @param if_num GMAC i/f number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_phys_if_mac_addr(void *nss_ctx, uint8_t *addr, uint32_t if_num);
+
+/**
+ * @brief Send MTU change notification to NSS
+ *
+ * @param nss_ctx NSS context
+ * @param mtu MTU
+ * @param if_num GMAC i/f number
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_phys_if_change_mtu(void *nss_ctx, uint32_t mtu, uint32_t if_num);
+
+/**
+ * Methods provided by NSS driver for use by IPsec stack
+ */
+
+/**
+ * Callback to receive ipsec sync messages
+ */
+typedef void (*nss_ipsec_callback_t)(void *ctx, void *os_buf);
+
+/**
+ * @brief Register to send/receive IPsec messages to NSS
+ *
+ * @param ipsec_callback Callback
+ * @param ctx IPsec context
+ *
+ * @return void* NSS context
+ */
+extern void *nss_register_ipsec_if(nss_ipsec_callback_t ipsec_callback, void *ctx);
+
+/**
+ * @brief Unregister IPsec interface with NSS
+ */
+extern void nss_unregister_ipsec_if(void);
+
+/**
+ * @brief Send rule creation message for IPsec Tx node
+ *
+ * @param nss_ctx NSS context
+ * @param nitc Rule creation parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_create_ipsec_tx_rule(void *nss_ctx, struct nss_ipsec_tx_create *nitc);
+
+/**
+ * @brief Send rule destroy message for IPsec Tx node
+ *
+ * @param nss_ctx NSS context
+ * @param nitd Rule destroy parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_destroy_ipsec_tx_rule(void *nss_ctx, struct nss_ipsec_tx_destroy *nitd);
+
+/**
+ * @brief Send rule creation message for IPsec Rx node
+ *
+ * @param nss_ctx NSS context
+ * @param nirc Rule creation parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_create_ipsec_rx_rule(void *nss_ctx, struct nss_ipsec_rx_create *nirc);
+
+/**
+ * @brief Send rule destroy message for IPsec Rx node
+ *
+ * @param nss_ctx NSS context
+ * @param nird Rule destroy parameters
+ *
+ * @return nss_tx_status_t Tx status
+ */
+extern nss_tx_status_t nss_tx_destroy_ipsec_rx_rule(void *nss_ctx, struct nss_ipsec_rx_destroy *nird);
+
+/**
+ * Methods provided by NSS driver for use by NSS Profiler
+ */
+
+/**
+ * Callback to receive profiler messages
+ *
+ * @note Memory pointed by buf is owned by caller (i.e. NSS driver)
+ *	NSS driver does not interpret "buf". It is up to profiler to make sense of it.
+ */
+typedef void (*nss_profiler_callback_t)(void *ctx, uint8_t *buf, uint16_t len);
+
+/**
+ * @brief Register to send/receive profiler messages
+ *
+ * @param profiler_callback Profiler callback
+ * @param core_id NSS core id
+ * @param ctx Profiler context
+ *
+ * @return void* NSS context
+ *
+ * @note Caller must provide valid core_id that is being profiled. This function must be called once for each core.
+ *	Context (ctx) will be provided back to caller in the registered callback function
+ */
+extern void *nss_register_profiler_if(nss_profiler_callback_t profiler_callback, nss_core_id_t core_id, void *ctx);
+
+/**
+ * @brief Unregister profiler interface
+ *
+ * @param core_id NSS core id
+ *
+ */
+extern void nss_unregister_profiler_if(nss_core_id_t core_id);
+
+/**
+ * @brief Send profiler command to NSS
+ *
+ * @param ctx NSS context
+ * @param buf Buffer to send to NSS
+ * @param len Length of buffer
+ *
+ * @return nss_tx_status_t Tx status
+ *
+ * @note Valid context must be provided (for the right core).
+ *	This context was returned during registration.
+ */
+extern nss_tx_status_t nss_tx_profiler_if_buf(void *ctx, uint8_t *buf, uint32_t len);
+
+/**@}*/
+#endif /** __NSS_API_IF_H */
diff --git a/nss_core.c b/nss_core.c
new file mode 100755
index 0000000..2e98893
--- /dev/null
+++ b/nss_core.c
@@ -0,0 +1,746 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/*
+ * na_core.c
+ *	NSS driver core APIs source file.
+ */
+
+#include "nss_core.h"
+#include <nss_hal.h>
+#include <asm/barrier.h>
+
+/*
+ * nss_send_c2c_map()
+ *	Send C2C map to NSS
+ */
+static int32_t nss_send_c2c_map(struct nss_ctx_instance *nss_own, struct nss_ctx_instance *nss_other)
+{
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_c2c_tx_map *nctm;
+
+	nss_info("%p: C2C map:%x\n", nss_own, nss_other->c2c_start);
+
+	nbuf =  __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_own->nss_top->stats_lock);
+		nss_own->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_own->nss_top->stats_lock);
+		nss_warning("%p: Unable to allocate memory for 'C2C tx map'", nss_own);
+		return NSS_CORE_STATUS_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_C2C_TX_MAP;
+
+	nctm = &ntmo->sub.c2c_tx_map;
+	nctm->c2c_start = nss_other->c2c_start;
+	nctm->c2c_int_addr = (uint32_t)(nss_other->nphys) + NSS_REGS_C2C_INTR_SET_OFFSET;
+
+	status = nss_core_send_buffer(nss_own, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'c2c tx map'\n", nss_own);
+		return NSS_CORE_STATUS_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_own->nmap, nss_own->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_CORE_STATUS_SUCCESS;
+}
+
+/*
+ * nss_core_cause_to_queue()
+ *	Map interrupt cause to queue id
+ */
+static inline uint16_t nss_core_cause_to_queue(uint16_t cause)
+{
+	if (likely(cause == NSS_REGS_N2H_INTR_STATUS_DATA_COMMAND_QUEUE)) {
+		return NSS_IF_DATA_QUEUE;
+	} else if (cause == NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE) {
+		return NSS_IF_EMPTY_BUFFER_QUEUE;
+	}
+
+	/*
+	 * There is no way we can reach here as cause was already identified to be related to valid queue
+	 */
+	nss_assert(0);
+	return 0;
+}
+
+/*
+ * nss_core_handle_cause_queue()
+ *	Handle interrupt cause related to N2H/H2N queues
+ */
+static int32_t nss_core_handle_cause_queue(struct nss_ctx_instance *nss_ctx, uint16_t cause, int16_t weight)
+{
+	void *ctx;
+	nss_phys_if_rx_callback_t cb;
+	int16_t count, count_temp;
+	uint16_t size, mask, qid;
+	uint32_t nss_index, hlos_index;
+	struct sk_buff *nbuf;
+	struct n2h_desc_if_instance *desc_if;
+	struct n2h_descriptor *desc;
+	struct nss_if_mem_map *if_map = (struct nss_if_mem_map *)(nss_ctx->vmap);
+
+	qid = nss_core_cause_to_queue(cause);
+
+	/*
+	 * Make sure qid < num_rings
+	 */
+	nss_assert(qid < if_map->n2h_rings);
+
+	desc_if = &nss_ctx->n2h_desc_if[qid];
+	nss_index = if_map->n2h_nss_index[qid];
+	hlos_index = if_map->n2h_hlos_index[qid];
+	size = desc_if->size;
+	mask = size - 1;
+
+	/*
+	 * Check if there is work to be done for this queue
+	 */
+	count = ((nss_index - hlos_index) + size) & (mask);
+	if (unlikely(count == 0)) {
+		return 0;
+	}
+
+	/*
+	 * Restrict ourselves to suggested weight
+	 */
+	if (count > weight) {
+		count = weight;
+	}
+
+	count_temp = count;
+	while (count_temp) {
+		desc = &(desc_if->desc[hlos_index]);
+
+		if (unlikely((desc->buffer_type == N2H_BUFFER_CRYPTO_RESP))) {
+			/*
+			 * This is a crypto buffer hence send it to crypto driver
+			 *
+			 * NOTE: Crypto buffers require special handling as they do not
+			 *	use OS network buffers (e.g. skb). Hence, OS buffer operations
+			 *	are not applicable to crypto buffers
+			 */
+			nss_rx_handle_crypto_buf(nss_ctx, desc->opaque, desc->buffer, desc->payload_len);
+		} else {
+			/*
+			* Obtain the nbuf
+			*/
+			nbuf = (struct sk_buff *)desc->opaque;
+
+			/*
+			 * Set relevant fields within nbuf (len, head, tail)
+			 */
+			nbuf->data = nbuf->head + desc->payload_offs;
+			nbuf->len = desc->payload_len;
+			nbuf->tail = nbuf->data + nbuf->len;
+
+			/*
+			 * TODO: Unmap data buffer area for scatter-gather
+			 * TODO: Check if there is any issue wrt map and unmap, NSS should playaround with data area and should not
+			 *	touch HEADROOM area
+			 */
+			dma_unmap_single(NULL, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_FROM_DEVICE);
+
+			switch (desc->buffer_type) {
+			case N2H_BUFFER_PACKET:
+				/*
+				 * Check if NSS was able to obtain checksum
+				 */
+				nbuf->ip_summed = CHECKSUM_UNNECESSARY;
+				if (unlikely(!(desc->bit_flags & N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID))) {
+					nbuf->ip_summed = CHECKSUM_NONE;
+				}
+
+				/*
+				 * TODO: Differentiate between physical and virtual interfaces
+				 */
+
+				ctx = nss_ctx->nss_top->phys_if_ctx[desc->interface_num];
+
+				/*
+				 * We need to ensure that processor/compiler do not re-order ctx
+				 * and cb reads. Note that write to ctx and cb happens in
+				 * reverse order. The idea is that we do not want a case where
+				 * cb is valid but ctx is NULL.
+				 */
+				rmb();
+				cb = nss_ctx->nss_top->phys_if_rx_callback[desc->interface_num];
+				if (likely(cb)) {
+					cb(ctx, (void *)nbuf);
+				} else {
+					dev_kfree_skb_any(nbuf);
+				}
+				break;
+
+			case N2H_BUFFER_STATUS:
+				nss_rx_handle_status_pkt(nss_ctx, nbuf);
+
+				/* Fall Through */
+
+			case N2H_BUFFER_EMPTY:
+				/*
+				 * TODO: Unmap fragments
+				 */
+				dev_kfree_skb_any(nbuf);
+				break;
+
+			default:
+				/*
+				 * ERROR:
+				 */
+				nss_warning("%p: Invalid buffer type %d received from NSS", nss_ctx, desc->buffer_type);
+			}
+		}
+
+		hlos_index = (hlos_index + 1) & (mask);
+		count_temp--;
+	}
+
+	if_map->n2h_hlos_index[qid] = hlos_index;
+	return count;
+}
+
+/*
+ * nss_core_init_nss()
+ *	Initialize NSS core state
+ */
+static void nss_core_init_nss(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map)
+{
+	int32_t i;
+
+	/*
+	 * NOTE: A commonly found error is that sizes and start address of per core
+	 *	virtual register map do not match in NSS and HLOS builds. This will lead
+	 *	to some hard to trace issues such as spinlock magic check failure etc.
+	 *	Following checks verify that proper virtual map has been initialized
+	 */
+	nss_assert(if_map->magic == DEV_MAGIC);
+	nss_assert(if_map->magic == DEV_MAGIC);
+
+	/*
+	 * Copy ring addresses to cacheable locations.
+	 * We do not wish to read ring start address through NC accesses
+	 */
+	for (i = 0; i < if_map->n2h_rings; i++) {
+		nss_ctx->n2h_desc_if[i].desc =
+			(struct n2h_descriptor *)((uint32_t)if_map->n2h_desc_if[i].desc - (uint32_t)nss_ctx->vphys + (uint32_t)nss_ctx->vmap);
+		nss_ctx->n2h_desc_if[i].size = if_map->n2h_desc_if[i].size;
+		nss_ctx->n2h_desc_if[i].int_bit = if_map->n2h_desc_if[i].int_bit;
+	}
+
+	for (i = 0; i < if_map->h2n_rings; i++) {
+		nss_ctx->h2n_desc_rings[i].desc_ring.desc =
+			(struct h2n_descriptor *)((uint32_t)if_map->h2n_desc_if[i].desc - (uint32_t)nss_ctx->vphys + (uint32_t)nss_ctx->vmap);
+		nss_ctx->h2n_desc_rings[i].desc_ring.size = if_map->h2n_desc_if[i].size;
+		nss_ctx->h2n_desc_rings[i].desc_ring.int_bit = if_map->h2n_desc_if[i].int_bit;
+		spin_lock_init(&(nss_ctx->h2n_desc_rings[i].lock));
+	}
+
+	nss_ctx->c2c_start = if_map->c2c_start;
+
+	spin_lock_bh(&nss_ctx->nss_top->lock);
+	nss_ctx->state = NSS_CORE_STATE_INITIALIZED;
+	spin_unlock_bh(&nss_ctx->nss_top->lock);
+}
+
+/*
+ * nss_core_handle_cause_nonqueue()
+ *	Handle non-queue interrupt causes (e.g. empty buffer SOS, Tx unblocked)
+ */
+static int32_t nss_core_handle_cause_nonqueue (struct nss_ctx_instance *nss_ctx, uint32_t cause, int16_t weight)
+{
+	struct nss_if_mem_map *if_map = (struct nss_if_mem_map *)(nss_ctx->vmap);
+	int32_t i;
+
+	nss_assert((cause == NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS) || (cause == NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED));
+
+	/*
+	 * TODO: find better mechanism to handle empty buffers
+	 */
+	if (likely(cause == NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS)) {
+		struct sk_buff *nbuf;
+		uint16_t count, size, mask;
+		int32_t nss_index, hlos_index;
+		struct h2n_desc_if_instance *desc_if = &(nss_ctx->h2n_desc_rings[NSS_IF_EMPTY_BUFFER_QUEUE].desc_ring);
+
+		/*
+		 * If this is the first time we are receiving this interrupt then
+		 * we need to initialize local state of NSS core. This helps us save an
+		 * interrupt cause bit. Hopefully, unlikley and branch prediction algorithm
+		 * of processor will prevent any excessive penalties.
+		 */
+		if (unlikely(nss_ctx->state == NSS_CORE_STATE_UNINITIALIZED)) {
+			nss_core_init_nss(nss_ctx, if_map);
+
+			/*
+			 * Pass C2C addresses of already brought up cores to the recently brought
+			 * up core. No NSS core knows the state of other other cores in system so
+			 * NSS driver needs to mediate and kick start C2C between them
+			 */
+#if (NSS_MAX_CORES > 1)
+			for (i = 0; i < NSS_MAX_CORES; i++) {
+				/*
+				 * Loop through all NSS cores and send exchange C2C addresses
+				 * TODO: Current implementation utilizes the fact that there are
+				 *	only two cores in current design. And ofcourse ignore
+				 *	the core that we are trying to initialize.
+				 */
+				if (&nss_ctx->nss_top->nss[i] != nss_ctx) {
+
+					/*
+					 * Block initialization routine of any other NSS cores running on other
+					 * processors. We do not want them to mess around with their initialization
+					 * state and C2C addresses while we check their state.
+					 */
+					spin_lock_bh(&nss_ctx->nss_top->lock);
+					if (nss_ctx->nss_top->nss[i].state == NSS_CORE_STATE_INITIALIZED) {
+						spin_unlock_bh(&nss_ctx->nss_top->lock);
+						nss_send_c2c_map(&nss_ctx->nss_top->nss[i], nss_ctx);
+						nss_send_c2c_map(nss_ctx, &nss_ctx->nss_top->nss[i]);
+						continue;
+					}
+					spin_unlock_bh(&nss_ctx->nss_top->lock);
+				}
+			}
+#endif
+		}
+
+		/*
+		 * Check how many empty buffers could be filled in queue
+		 */
+		nss_index = if_map->h2n_nss_index[NSS_IF_EMPTY_BUFFER_QUEUE];
+		hlos_index = if_map->h2n_hlos_index[NSS_IF_EMPTY_BUFFER_QUEUE];
+		size = nss_ctx->h2n_desc_rings[NSS_IF_EMPTY_BUFFER_QUEUE].desc_ring.size;
+		mask = size - 1;
+		count = ((nss_index - hlos_index - 1) + size) & (mask);
+
+		nss_info("%p: Adding %d buffers to empty queue", nss_ctx, count);
+
+		/*
+		 * Fill empty buffer queue with buffers leaving one empty descriptor
+		 * Note that total number of descriptors in queue cannot be more than (size - 1)
+		 */
+		while (count) {
+			struct h2n_descriptor *desc = &(desc_if->desc[hlos_index]);
+
+			nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+			if (unlikely(!nbuf)) {
+				/*
+				 * ERR:
+				 */
+				spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+				nss_ctx->nss_top->nbuf_alloc_err++;
+				spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+				nss_warning("%p: Could not obtain empty buffer", nss_ctx);
+				break;
+			}
+
+			desc->opaque = (uint32_t)nbuf;
+			desc->payload_offs = (uint16_t) (nbuf->data - nbuf->head);
+			desc->buffer = dma_map_single(NULL, nbuf->head, (nbuf->end - nbuf->head), DMA_FROM_DEVICE);
+			if (unlikely(dma_mapping_error(NULL, desc->buffer))) {
+				/*
+				 * ERR:
+				 */
+				dev_kfree_skb_any(nbuf);
+				nss_warning("%p: DMA mapping failed for empty buffer", nss_ctx);
+				break;
+			}
+			desc->buffer_len = (uint16_t)(nbuf->end - nbuf->head);
+			desc->buffer_type = H2N_BUFFER_EMPTY;
+			hlos_index = (hlos_index + 1) & (mask);
+			count--;
+		}
+
+		if_map->h2n_hlos_index[NSS_IF_EMPTY_BUFFER_QUEUE] = hlos_index;
+
+		/*
+		 * Inform NSS that new buffers are available
+		 */
+		nss_hal_send_interrupt(nss_ctx->nmap, desc_if->int_bit, NSS_REGS_H2N_INTR_STATUS_EMPTY_BUFFER_QUEUE);
+	} else if (cause == NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED) {
+		nss_trace("%p: Data queue unblocked", nss_ctx);
+
+		/*
+		 * Call callback functions of drivers that have registered with us
+		 */
+		spin_lock_bh(&nss_ctx->decongest_cb_lock);
+
+		for (i = 0; i< NSS_MAX_CLIENTS; i++) {
+			if (nss_ctx->queue_decongestion_callback[i]) {
+				nss_ctx->queue_decongestion_callback[i](nss_ctx->queue_decongestion_ctx[i]);
+			}
+		}
+
+		spin_unlock_bh(&nss_ctx->decongest_cb_lock);
+		nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].flags &= ~NSS_H2N_DESC_RING_FLAGS_TX_STOPPED;
+
+		/*
+		 * Mask Tx unblocked interrupt and unmask it again when queue full condition is reached
+		 */
+		nss_hal_disable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
+				nss_ctx->int_ctx[0].shift_factor, NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED);
+	}
+
+	return 0;
+}
+
+/*
+ * nss_core_get_prioritized_cause()
+ *	Obtain proritized cause (from multiple interrupt causes) that
+ *	must be handled by NSS driver before other causes
+ */
+static uint32_t nss_core_get_prioritized_cause(uint32_t cause, uint32_t *type, int16_t *weight)
+{
+	*type = NSS_INTR_CAUSE_INVALID;
+	*weight = 0;
+
+	/*
+	 * NOTE: This is a very simple algorithm with fixed weight and strict priority
+	 *
+	 * TODO: Modify the algorithm later with proper weights and Round Robin
+	 */
+	if (cause & NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS) {
+		*type = NSS_INTR_CAUSE_NON_QUEUE;
+		*weight = NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT;
+		return NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS;
+	}
+
+	if (cause & NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED) {
+		*type = NSS_INTR_CAUSE_NON_QUEUE;
+		*weight = NSS_TX_UNBLOCKED_PROCESSING_WEIGHT;
+		return NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED;
+	}
+
+	if (cause & NSS_REGS_N2H_INTR_STATUS_DATA_COMMAND_QUEUE) {
+		*type = NSS_INTR_CAUSE_QUEUE;
+		*weight = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT;
+		return NSS_REGS_N2H_INTR_STATUS_DATA_COMMAND_QUEUE;
+	}
+
+	if (cause & NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE) {
+		*type = NSS_INTR_CAUSE_QUEUE;
+		*weight = NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT;
+		return NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE;
+	}
+
+	return 0;
+}
+
+void nss_core_handle_bh(unsigned long ctx)
+{
+	uint32_t prio_cause, int_cause;
+	int16_t processed, weight;
+	uint32_t cause_type;
+	struct int_ctx_instance *int_ctx = (struct int_ctx_instance *)ctx;
+	struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx;
+
+	/*
+	 * Read cause of interrupt
+	 */
+	nss_hal_read_interrupt_cause(nss_ctx->nmap, int_ctx->irq, int_ctx->shift_factor, &int_cause);
+
+	do {
+		nss_hal_clear_interrupt_cause(nss_ctx->nmap, int_ctx->irq, int_ctx->shift_factor, int_cause);
+		while (int_cause) {
+
+			/*
+			 * Obtain the cause as per priority. Also obtain the weight
+			 *
+			 * NOTE: The idea is that all causes are processed as per priority and weight
+			 * so that no single cause can overwhelm the system.
+			 */
+			prio_cause = nss_core_get_prioritized_cause(int_cause, &cause_type, &weight);
+			processed = 0;
+			switch (cause_type) {
+			case NSS_INTR_CAUSE_QUEUE:
+				processed = nss_core_handle_cause_queue(nss_ctx, prio_cause, weight);
+				break;
+
+			case NSS_INTR_CAUSE_NON_QUEUE:
+				processed = nss_core_handle_cause_nonqueue(nss_ctx, prio_cause, weight);
+				break;
+
+			default:
+				nss_warning("%p: Invalid cause %x received from nss", nss_ctx, int_cause);
+				break;
+			}
+
+			if (processed <= weight) {
+				/*
+				 * If we could only manage to process packets lesser
+				 * than weight then processing for our queue/cause
+				 * is complete and we can clear the cause for this cycle
+				 */
+				int_cause &= ~prio_cause;
+			}
+		}
+
+		nss_hal_read_interrupt_cause(nss_ctx->nmap, int_ctx->irq, int_ctx->shift_factor, &int_cause);
+	} while (int_cause);
+
+	/*
+	 * Re-enable any further interrupt from this IRQ
+	 */
+	nss_hal_enable_interrupt(nss_ctx->nmap, int_ctx->irq, int_ctx->shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
+
+	/*
+	 * WARNING: This code should be removed after UBI32 IRQ mask issue is resolved in hardware
+	 */
+	enable_irq(int_ctx->irq);
+}
+
+/*
+ * nss_core_send_crypto()
+ *	Send crypto buffer to NSS
+ */
+int32_t nss_core_send_crypto(struct nss_ctx_instance *nss_ctx, void *buf, uint32_t buf_paddr, uint16_t len)
+{
+	int16_t count, hlos_index, nss_index, size;
+	struct h2n_descriptor *desc;
+	struct h2n_desc_if_instance *desc_if = &nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].desc_ring;
+	struct nss_if_mem_map *if_map = (struct nss_if_mem_map *) nss_ctx->vmap;
+
+	/*
+	 * Take a lock for queue
+	 */
+	spin_lock_bh(&nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].lock);
+
+	/*
+	 * We need to work out if there's sufficent space in our transmit descriptor
+	 * ring to place the crypto packet.
+	 */
+	hlos_index = if_map->h2n_hlos_index[NSS_IF_DATA_QUEUE];
+	nss_index = if_map->h2n_nss_index[NSS_IF_DATA_QUEUE];
+
+	size = desc_if->size;
+	count = ((nss_index - hlos_index - 1) + size) & (size - 1);
+
+	if (unlikely(count < 1)) {
+		/* TODO: What is the use case of TX_STOPPED_FLAGS */
+		nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].tx_q_full_cnt++;
+		nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].flags |= NSS_H2N_DESC_RING_FLAGS_TX_STOPPED;
+		spin_unlock_bh(&nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].lock);
+		nss_warning("%p: Data/Command Queue full reached", nss_ctx);
+
+		/*
+		 * Enable de-congestion interrupt from NSS
+		 */
+		nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
+				nss_ctx->int_ctx[0].shift_factor, NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED);
+
+		return NSS_CORE_STATUS_FAILURE_QUEUE;
+	}
+
+	desc = &(desc_if->desc[hlos_index]);
+	desc->opaque = (uint32_t) buf;
+	desc->buffer_type = H2N_BUFFER_CRYPTO_REQ;
+	desc->buffer = buf_paddr;
+	desc->buffer_len = len;
+	desc->payload_len = len;
+	desc->payload_offs = 0;
+	desc->bit_flags = 0;
+
+	/*
+	 * Update our host index so the NSS sees we've written a new descriptor.
+	 */
+	if_map->h2n_hlos_index[NSS_IF_DATA_QUEUE] = (hlos_index + 1) & (size - 1);
+	spin_unlock_bh(&nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].lock);
+
+	/*
+	 * Memory barrier to ensure all writes have been successful
+	 * NOTE: NOCs have internal buffer and hence race condition may occur between NOC
+	 *	and register write for interrupt
+	 * TODO: Verify and remove if not required
+	 */
+	wmb();
+	return NSS_CORE_STATUS_SUCCESS;
+}
+
+/*
+ * nss_core_send_buffer()
+ *	Send network buffer to NSS
+ */
+int32_t nss_core_send_buffer(struct nss_ctx_instance *nss_ctx, uint32_t if_num,
+					struct sk_buff *nbuf, uint16_t qid,
+					uint8_t buffer_type, uint16_t flags)
+{
+	int16_t count, hlos_index, nss_index, size, mask;
+	uint32_t nr_frags;
+	struct h2n_descriptor *desc;
+	struct h2n_desc_if_instance *desc_if = &nss_ctx->h2n_desc_rings[qid].desc_ring;
+	struct nss_if_mem_map *if_map = (struct nss_if_mem_map *) nss_ctx->vmap;
+
+	nr_frags = skb_shinfo(nbuf)->nr_frags;
+	BUG_ON(nr_frags > MAX_SKB_FRAGS);
+
+	/*
+	 * Take a lock for queue
+	 */
+	spin_lock_bh(&nss_ctx->h2n_desc_rings[qid].lock);
+
+	/*
+	 * We need to work out if there's sufficent space in our transmit descriptor
+	 * ring to place all of the segments of the nbuf.
+	 */
+	hlos_index = if_map->h2n_hlos_index[qid];
+	nss_index = if_map->h2n_nss_index[qid];
+
+	size = desc_if->size;
+	mask = size - 1;
+	count = ((nss_index - hlos_index - 1) + size) & (mask);
+
+	if (unlikely(count < (nr_frags + 1))) {
+		/*
+		 * NOTE: tx_q_full_cnt and TX_STOPPED flags will be used
+		 *	when we will add support for DESC Q congestion management
+		 *	in future
+		 */
+		nss_ctx->h2n_desc_rings[qid].tx_q_full_cnt++;
+		nss_ctx->h2n_desc_rings[qid].flags |= NSS_H2N_DESC_RING_FLAGS_TX_STOPPED;
+		spin_unlock_bh(&nss_ctx->h2n_desc_rings[qid].lock);
+		nss_warning("%p: Data/Command Queue full reached", nss_ctx);
+
+		/*
+		 * Enable de-congestion interrupt from NSS
+		 */
+		nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
+				nss_ctx->int_ctx[0].shift_factor, NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED);
+
+		return NSS_CORE_STATUS_FAILURE_QUEUE;
+	}
+
+	desc = &(desc_if->desc[hlos_index]);
+
+	/*
+	 * Is this a conventional unfragmented nbuf?
+	 */
+	if (likely(nr_frags == 0)) {
+		desc->buffer_type = buffer_type;
+		desc->bit_flags = flags | H2N_BIT_FLAG_FIRST_SEGMENT | H2N_BIT_FLAG_LAST_SEGMENT;
+
+		if (likely(nbuf->ip_summed == CHECKSUM_PARTIAL)) {
+			desc->bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM;
+		}
+
+		desc->interface_num = (int8_t)if_num;
+		desc->opaque = (uint32_t)nbuf;
+		desc->payload_offs = (uint16_t) (nbuf->data - nbuf->head);
+		desc->payload_len = nbuf->len;
+		desc->buffer_len = (uint16_t)(nbuf->end - nbuf->head);
+		desc->buffer = (uint32_t)dma_map_single(NULL, nbuf->head, (nbuf->tail - nbuf->head), DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(NULL, desc->buffer))) {
+			spin_unlock_bh(&nss_ctx->h2n_desc_rings[qid].lock);
+			nss_warning("%p: DMA mapping failed for virtual address = %x", nss_ctx, desc->buffer);
+			return NSS_CORE_STATUS_FAILURE;
+		}
+	} else {
+		/*
+		 * TODO: convert to BUGON/ASSERT
+		 */
+		uint32_t i = 0;
+		const skb_frag_t *frag;
+		uint16_t mss = 0;
+
+		/*
+		 * Check if segmentation enabled.
+		 * Configure descriptor bit flags accordingly
+		 */
+		if (skb_is_gso(nbuf)) {
+			mss = skb_shinfo(nbuf)->gso_size;
+			flags |= H2N_BIT_FLAG_SEGMENTATION_ENABLE;
+			if (skb_shinfo(nbuf)->gso_type & SKB_GSO_TCPV4) {
+				flags |= H2N_BIT_FLAG_SEGMENT_TSO;
+			} else if (skb_shinfo(nbuf)->gso_type & SKB_GSO_TCPV6) {
+				flags |= H2N_BIT_FLAG_SEGMENT_TSO6;
+			} else if (skb_shinfo(nbuf)->gso_type & SKB_GSO_UDP) {
+				flags |= H2N_BIT_FLAG_SEGMENT_UFO;
+			} else {
+				/*
+				 * Invalid segmentation type
+				 */
+				nss_assert(0);
+			}
+		}
+
+		/*
+		 * Handle all fragments
+		 */
+
+		/*
+		 * First fragment/descriptor is special
+		 */
+		desc->buffer_type = buffer_type;
+		desc->bit_flags = (flags | H2N_BIT_FLAG_FIRST_SEGMENT | H2N_BIT_FLAG_DISCARD);
+		if (likely(nbuf->ip_summed == CHECKSUM_PARTIAL)) {
+			desc->bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM;
+		}
+
+		desc->interface_num = (int8_t)if_num;
+		desc->opaque = (uint32_t)NULL;
+		desc->payload_offs = nbuf->data - nbuf->head;
+		desc->payload_len = nbuf->len - nbuf->data_len;
+		desc->buffer_len = nbuf->end - nbuf->head;
+		desc->buffer = (uint32_t)dma_map_single(NULL, nbuf->head, (nbuf->tail - nbuf->head), DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(NULL, desc->buffer))) {
+			spin_unlock_bh(&nss_ctx->h2n_desc_rings[qid].lock);
+			nss_warning("%p: DMA mapping failed for virtual address = %x", nss_ctx, nbuf->head);
+			return NSS_CORE_STATUS_FAILURE;
+		}
+
+		desc->mss = mss;
+
+		/*
+		 * Now handle rest of the fragments.
+		 */
+		while (likely(i < (nr_frags))) {
+			frag = &skb_shinfo(nbuf)->frags[i++];
+			hlos_index = (hlos_index + 1) & (mask);
+			desc = &(desc_if->desc[hlos_index]);
+			desc->buffer_type = buffer_type;
+			desc->bit_flags = (flags | H2N_BIT_FLAG_DISCARD);
+			if (likely(nbuf->ip_summed == CHECKSUM_PARTIAL)) {
+				desc->bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM;
+			}
+
+			desc->interface_num = (int8_t)if_num;
+			desc->opaque = (uint32_t)NULL;
+			desc->payload_offs = frag->page_offset;
+			desc->payload_len = skb_frag_size(frag);
+			desc->buffer_len = skb_frag_size(frag);
+			desc->buffer = skb_frag_dma_map(NULL, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(NULL, desc->buffer))) {
+				spin_unlock_bh(&nss_ctx->h2n_desc_rings[qid].lock);
+				nss_warning("%p: DMA mapping failed for fragment", nss_ctx);
+				return NSS_CORE_STATUS_FAILURE;
+			}
+			desc->mss = mss;
+		}
+
+		/*
+		 * Update bit flag for last descriptor
+		 */
+		desc->bit_flags |= H2N_BIT_FLAG_LAST_SEGMENT;
+	}
+
+	/*
+	 * Update our host index so the NSS sees we've written a new descriptor.
+	 */
+	if_map->h2n_hlos_index[qid] = (hlos_index + 1) & (mask);
+	spin_unlock_bh(&nss_ctx->h2n_desc_rings[qid].lock);
+
+	/*
+	 * Memory barrier to ensure all writes have been successful
+	 * NOTE: NOCs have internal buffer and hence race condition may occur between NOC
+	 *	and register write for interrupt
+	 * TODO: Verify and remove if not required
+	 */
+	wmb();
+	return NSS_CORE_STATUS_SUCCESS;
+}
diff --git a/nss_core.h b/nss_core.h
new file mode 100755
index 0000000..674d8a7
--- /dev/null
+++ b/nss_core.h
@@ -0,0 +1,544 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/**
+ * na_core.h
+ *	NSS driver core header file.
+ */
+
+#ifndef __NSS_CORE_H
+#define __NSS_CORE_H
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "nss_hlos_if.h"
+#include "nss_api_if.h"
+
+/**
+ * NSS debug macros
+ */
+#if (CONFIG_NSS_DEBUG_LEVEL < 1)
+#define nss_assert(fmt, args...)
+#else
+#define nss_assert(c) if (!c) {	BUG_ON(!(c)); }
+#endif
+
+#if (CONFIG_NSS_DEBUG_LEVEL < 2)
+#define nss_warning(fmt, args...)
+#else
+#define nss_warning(fmt, args...) printk(KERN_WARNING "nss:"fmt, ##args)
+#endif
+
+#if (CONFIG_NSS_DEBUG_LEVEL < 3)
+#define nss_info(fmt, args...)
+#else
+#define nss_info(fmt, args...) printk(KERN_INFO "nss:"fmt, ##args)
+#endif
+
+#if (CONFIG_NSS_DEBUG_LEVEL < 4)
+#define nss_trace(fmt, args...)
+#else
+#define nss_trace(fmt, args...) printk(KERN_DEBUG "nss:"fmt, ##args)
+#endif
+
+/**
+ * NSS max values supported
+ */
+#define NSS_MAX_CORES CONFIG_IPQ_NSS_MAX_CORES
+#define NSS_MAX_PHYSICAL_INTERFACES CONFIG_IPQ_NSS_MAX_PHYSICAL_INTERFACES
+#define NSS_MAX_VIRTUAL_INTERFACES CONFIG_IPQ_NSS_MAX_VIRTUAL_INTERFACES
+#define NSS_MAX_NET_INTERFACES (NSS_MAX_PHYSICAL_INTERFACES + NSS_MAX_VIRTUAL_INTERFACES)
+
+/**
+ * Default payload size for NSS buffers
+ */
+#define NSS_NBUF_PAYLOAD_SIZE CONFIG_IPQ_NSS_EMPTY_BUFFER_SIZE
+
+/**
+ * N2H/H2N Queue IDs
+ */
+#define NSS_IF_EMPTY_BUFFER_QUEUE 0
+#define NSS_IF_DATA_QUEUE 1
+#define NSS_IF_CMD_QUEUE 1
+
+
+/**
+ * NSS Interrupt Causes
+ */
+#define NSS_INTR_CAUSE_INVALID 0
+#define NSS_INTR_CAUSE_QUEUE 1
+#define NSS_INTR_CAUSE_NON_QUEUE 2
+
+/**
+ * NSS Core Status
+ */
+#define NSS_CORE_STATUS_SUCCESS 0
+#define NSS_CORE_STATUS_FAILURE 1
+#define NSS_CORE_STATUS_FAILURE_QUEUE 2
+
+/**
+ * NSS context magic
+ */
+#define NSS_CTX_MAGIC 0xDEDEDEDE
+
+/**
+ * NSS maximum clients
+ */
+#define NSS_MAX_CLIENTS 12
+
+/**
+ * Interrupt cause processing weights
+ */
+#define NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT 64
+#define NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT 64
+#define NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT 64
+#define NSS_TX_UNBLOCKED_PROCESSING_WEIGHT 1
+
+/**
+ * Statistics struct
+ *
+ * INFO: These numbers are based on previous generation chip
+ * 	These may change in future
+ */
+#define NSS_IPV4_CONNECTION_ENTRIES 256
+#define NSS_IPV6_CONNECTION_ENTRIES 256
+#define NSS_L2SWITCH_CONNECTION_ENTRIES 64
+#define NSS_PPPOE_NUM_SESSION_PER_INTERFACE 8
+					/* Number of maximum simultaneous PPPoE sessions per physical interface */
+#define NSS_PPPOE_NUM_SESSION_TOTAL (NSS_MAX_PHYSICAL_INTERFACES * NSS_PPPOE_NUM_SESSION_PER_INTERFACE)
+					/* Number of total PPPoE sessions */
+
+
+struct nss_ipv4_statistics {
+	uint8_t protocol;			/* Protocol number */
+	int32_t flow_interface;			/* Flow interface number */
+	uint32_t flow_mtu;			/* MTU of flow interface */
+	uint32_t flow_ip;			/* Flow IP address */
+	uint32_t flow_ip_xlate;			/* Flow IP address after NAT translation */
+	uint32_t flow_ident;			/* Flow ident (e.g. port) */
+	uint32_t flow_ident_xlate;		/* Flow ident (e.g. port) after NAT translation */
+	uint16_t flow_pppoe_session_id;		/* Flow direction`s PPPoE session ID. */
+	uint16_t flow_pppoe_remote_mac[3];	/* Flow direction`s PPPoE Server MAC address */
+	uint64_t flow_accelerated_rx_packets;
+						/* Number of flow interface RX packets accelerated */
+	uint64_t flow_accelerated_rx_bytes;
+						/* Number of flow interface RX bytes accelerated */
+	uint64_t flow_accelerated_tx_packets;
+						/* Number of flow interface TX packets accelerated */
+	uint64_t flow_accelerated_tx_bytes;
+						/* Number of flow interface TX bytes accelerated */
+	int32_t return_interface;		/* Return interface number */
+	uint32_t return_mtu;			/* MTU of return interface */
+	uint32_t return_ip;			/* Return IP address */
+	uint32_t return_ip_xlate;		/* Return IP address after NAT translation */
+	uint32_t return_ident;			/* Return ident (e.g. port) */
+	uint32_t return_ident_xlate;		/* Return ident (e.g. port) after NAT translation */
+	uint16_t return_pppoe_session_id;	/* Return direction's PPPoE session ID. */
+	uint16_t return_pppoe_remote_mac[3];	/* Return direction's PPPoE Server MAC address */
+	uint64_t return_accelerated_rx_packets;
+						/* Number of return interface RX packets accelerated */
+	uint64_t return_accelerated_rx_bytes;
+						/* Number of return interface RX bytes accelerated */
+	uint64_t return_accelerated_tx_packets;
+						/* Number of return interface TX packets accelerated */
+	uint64_t return_accelerated_tx_bytes;
+						/* Number of return interface TX bytes accelerated */
+	uint64_t last_sync;			/* Last sync time as jiffies */
+};
+
+struct nss_ipv6_statistics {
+	uint8_t protocol;			/* Protocol number */
+	int32_t flow_interface;			/* Flow interface number */
+	uint32_t flow_mtu;			/* MTU of flow interface */
+	uint32_t flow_ip[4];			/* Flow IP address */
+	uint32_t flow_ident;			/* Flow ident (e.g. port) */
+	uint16_t flow_pppoe_session_id;		/* Flow direction`s PPPoE session ID. */
+	uint16_t flow_pppoe_remote_mac[3];	/* Flow direction`s PPPoE Server MAC address */
+	uint64_t flow_accelerated_rx_packets;
+						/* Number of flow interface RX packets accelerated */
+	uint64_t flow_accelerated_rx_bytes;
+						/* Number of flow interface RX bytes accelerated */
+	uint64_t flow_accelerated_tx_packets;
+						/* Number of flow interface TX packets accelerated */
+	uint64_t flow_accelerated_tx_bytes;
+						/* Number of flow interface TX bytes accelerated */
+	uint32_t return_ip[4];			/* Return IP address */
+	uint32_t return_ident;			/* Return ident (e.g. port) */
+	int32_t return_interface;		/* Return interface number */
+	uint32_t return_mtu;			/* MTU of return interface */
+	uint16_t return_pppoe_session_id;	/* Return direction's PPPoE session ID. */
+	uint16_t return_pppoe_remote_mac[3];	/* Return direction's PPPoE Server MAC address */
+	uint64_t return_accelerated_rx_packets;
+						/* Number of return interface RX packets accelerated */
+	uint64_t return_accelerated_rx_bytes;
+						/* Number of return interface RX bytes accelerated */
+	uint64_t return_accelerated_tx_packets;
+						/* Number of return interface TX packets accelerated */
+	uint64_t return_accelerated_tx_bytes;
+						/* Number of return interface TX bytes accelerated */
+	uint64_t last_sync;			/* Last sync time as jiffies */
+};
+
+struct nss_l2switch_statistics {
+	int32_t interface_num;		/* Linux net device structure */
+	uint32_t rx_packet_count;	/* Number of packets RX'd */
+	uint32_t rx_byte_count;		/* Number of bytes RX'd */
+	uint64_t last_sync;		/* Last sync time as jiffies */
+	uint16_t addr[3];		/* MAC Adress */
+};
+
+struct nss_gmac_statistics {
+	uint64_t rx_bytes;		/* Number of RX bytes */
+	uint64_t rx_packets;		/* Number of RX packets */
+	uint64_t rx_errors;		/* Number of RX errors */
+	uint64_t rx_receive_errors;	/* Number of RX receive errors */
+	uint64_t rx_overflow_errors;	/* Number of RX overflow errors */
+	uint64_t rx_descriptor_errors;	/* Number of RX descriptor errors */
+	uint64_t rx_watchdog_timeout_errors;
+					/* Number of RX watchdog timeout errors */
+	uint64_t rx_crc_errors;		/* Number of RX CRC errors */
+	uint64_t rx_late_collision_errors;
+					/* Number of RX late collision errors */
+	uint64_t rx_dribble_bit_errors;	/* Number of RX dribble bit errors */
+	uint64_t rx_length_errors;	/* Number of RX length errors */
+	uint64_t rx_ip_header_errors;	/* Number of RX IP header errors */
+	uint64_t rx_ip_payload_errors;	/* Number of RX IP payload errors */
+	uint64_t rx_no_buffer_errors;	/* Number of RX no-buffer errors */
+	uint64_t rx_transport_csum_bypassed;
+					/* Number of RX packets where the transport checksum was bypassed */
+	uint64_t tx_bytes;		/* Number of TX bytes */
+	uint64_t tx_packets;		/* Number of TX packets */
+	uint64_t tx_collisions;		/* Number of TX collisions */
+	uint64_t tx_errors;		/* Number of TX errors */
+	uint64_t tx_jabber_timeout_errors;
+					/* Number of TX jabber timeout errors */
+	uint64_t tx_frame_flushed_errors;
+					/* Number of TX frame flushed errors */
+	uint64_t tx_loss_of_carrier_errors;
+					/* Number of TX loss of carrier errors */
+	uint64_t tx_no_carrier_errors;	/* Number of TX no carrier errors */
+	uint64_t tx_late_collision_errors;
+					/* Number of TX late collision errors */
+	uint64_t tx_excessive_collision_errors;
+					/* Number of TX excessive collision errors */
+	uint64_t tx_excessive_deferral_errors;
+					/* Number of TX excessive deferral errors */
+	uint64_t tx_underflow_errors;	/* Number of TX underflow errors */
+	uint64_t tx_ip_header_errors;	/* Number of TX IP header errors */
+	uint64_t tx_ip_payload_errors;	/* Number of TX IP payload errors */
+	uint64_t tx_dropped;		/* Number of TX dropped packets */
+	uint64_t hw_errs[10];		/* GMAC DMA error counters */
+	uint64_t rx_missed;		/* Number of RX packets missed by the DMA */
+	uint64_t fifo_overflows;	/* Number of RX FIFO overflows signalled by the DMA */
+	uint64_t gmac_total_ticks;	/* Total clock ticks spend inside the GMAC */
+	uint32_t gmac_worst_case_ticks;	/* Worst case iteration of the GMAC in ticks */
+	uint64_t gmac_iterations;	/* Number of iterations around the GMAC */
+};
+
+struct nss_pppoe_statistics {
+	struct nss_pppoe_statistics *next;
+					/* Next statistic structure */
+	uint16_t pppoe_session_id;	/* PPPoE session ID on which statistics based */
+	uint8_t pppoe_remote_mac[ETH_ALEN];
+					/* PPPoE server MAC address */
+	uint64_t ipv4_accelerated_rx_packets;
+					/* Number of IPv4 RX packets accelerated */
+	uint64_t ipv4_accelerated_rx_bytes;
+					/* Number of IPv4 RX bytes accelerated */
+	uint64_t ipv4_accelerated_tx_packets;
+					/* Number of IPv4 TX packets accelerated */
+	uint64_t ipv4_accelerated_tx_bytes;
+					/* Number of IPv4 TX bytes accelerated */
+	uint64_t ipv6_accelerated_rx_packets;
+					/* Number of IPv6 RX packets accelerated */
+	uint64_t ipv6_accelerated_rx_bytes;
+					/* Number of IPv6 RX packets accelerated */
+	uint64_t ipv6_accelerated_tx_packets;
+					/* Number of IPv6 TX packets accelerated */
+	uint64_t ipv6_accelerated_tx_bytes;
+					/* Number of IPv6 TX bytes accelerated */
+	uint64_t exception_events[NSS_EXCEPTION_EVENT_PPPOE_LAST];
+					/* Exception events based on this PPPoE session */
+};
+
+struct nss_private {
+	struct nss_ctx_instance *nss_ctx;
+	uint32_t magic;			/* Used to confirm this private area is an NA private area */
+	uint32_t status;
+	int32_t interface_num;		/* Interface number */
+	uint64_t host_rx_packets;	/* Number of RX packets received by host OS */
+	uint64_t host_rx_bytes;		/* Number of RX bytes received by host OS */
+	uint64_t host_tx_packets;	/* Number of TX packets sent by host OS */
+	uint64_t host_tx_bytes;		/* Number of TX bytes sent by host OS */
+	uint64_t ipv4_accelerated_rx_packets;
+					/* Accelerated IPv4 RX packets */
+	uint64_t ipv4_accelerated_rx_bytes;
+					/* Accelerated IPv4 RX bytes */
+	uint64_t ipv4_accelerated_tx_packets;
+					/* Accelerated IPv4 TX packets */
+	uint64_t ipv4_accelerated_tx_bytes;
+					/* Accelerated IPv4 TX bytes */
+	uint64_t ipv6_accelerated_rx_packets;
+					/* Accelerated IPv6 RX packets */
+	uint64_t ipv6_accelerated_rx_bytes;
+					/* Accelerated IPv6 RX bytes */
+	uint64_t ipv6_accelerated_tx_packets;
+					/* Accelerated IPv6 TX packets */
+	uint64_t ipv6_accelerated_tx_bytes;
+					/* Accelerated IPv6 TX bytes */
+	uint64_t exception_events_unknown[NSS_EXCEPTION_EVENT_UNKNOWN_LAST];
+					/* Unknown protocol exception events */
+	uint64_t exception_events_ipv4[NSS_EXCEPTION_EVENT_IPV4_LAST];
+					/* IPv4 protocol exception events */
+	uint64_t exception_events_ipv6[NSS_EXCEPTION_EVENT_IPV6_LAST];
+					/* IPv6 protocol exception events */
+};
+
+/*
+ * NSS core state
+ */
+enum nss_core_state {
+	NSS_CORE_STATE_UNINITIALIZED = 0,
+	NSS_CORE_STATE_INITIALIZED
+};
+
+/*
+ * Forward declarations
+ */
+struct nss_top_instance;
+struct nss_ctx_instance;
+struct int_ctx_instance;
+
+/*
+ * Interrupt context instance (one per IRQ per NSS core)
+ */
+struct int_ctx_instance {
+	struct nss_ctx_instance *nss_ctx;
+					/* Back pointer to NSS context of core that
+					owns this interrupt */
+	uint32_t irq;			/* HLOS IRQ number */
+	uint32_t shift_factor;		/* Shift factor for this IRQ number */
+	uint32_t int_cause;		/* Interrupt cause carried forward to BH */
+	struct tasklet_struct bh;	/* Bottom half handler */
+};
+
+/*
+ * H2N descriptor ring information
+ */
+struct hlos_h2n_desc_rings {
+	struct h2n_desc_if_instance desc_ring;	/* Descriptor ring */
+	spinlock_t lock;			/* Lock to save from simultaneous access */
+	uint32_t flags;				/* Flags */
+	uint64_t tx_q_full_cnt;			/* Descriptor queue full count */
+};
+
+#define NSS_H2N_DESC_RING_FLAGS_TX_STOPPED 0x1	/* Tx has been stopped for this queue */
+
+/*
+ * NSS context instance (one per NSS core)
+ */
+struct nss_ctx_instance {
+	struct nss_top_instance *nss_top;
+					/* Back pointer to NSS Top */
+	uint32_t id;			/* Core ID for this instance */
+	uint32_t nmap;			/* Pointer to NSS CSM registers */
+	uint32_t vmap;			/* Virt mem pointer to virtual register map */
+	uint32_t nphys;			/* Phys mem pointer to CSM register map */
+	uint32_t vphys;			/* Phys mem pointer to virtual register map */
+	enum nss_core_state state;	/* State of NSS core */
+	uint32_t c2c_start;		/* C2C start address */
+	struct int_ctx_instance int_ctx[2];
+					/* Interrupt context instances */
+	struct hlos_h2n_desc_rings h2n_desc_rings[16];
+					/* Host to NSS descriptor rings */
+	struct n2h_desc_if_instance n2h_desc_if[15];
+					/* NSS to Host descriptor rings */
+	uint32_t max_buf_size;		/* Maximum buffer size */
+	nss_queue_decongestion_callback_t queue_decongestion_callback[NSS_MAX_CLIENTS];
+					/* Queue decongestion callbacks */
+	void *queue_decongestion_ctx[NSS_MAX_CLIENTS];
+					/* Queue decongestion callback contexts */
+	spinlock_t decongest_cb_lock;	/* Lock to protect queue decongestion cb table */
+	uint32_t magic;
+					/* Magic protection */
+};
+
+/*
+ * Main NSS context structure (singleton)
+ */
+struct nss_top_instance {
+	uint8_t num_nss;		/* Number of NSS cores supported */
+	uint8_t num_phys_ports;		/* Number of physical ports supported */
+	spinlock_t lock;		/* Big lock for NSS driver */
+	spinlock_t stats_lock;		/* Statistics lock */
+	struct nss_ctx_instance nss[NSS_MAX_CORES];
+					/* NSS contexts */
+
+	/*
+	 * Network processing handler core ids (CORE0/CORE1)
+	 */
+	uint8_t ipv4_handler_id;
+	uint8_t ipv6_handler_id;
+	uint8_t l2switch_handler_id;
+	uint8_t crypto_handler_id;
+	uint8_t ipsec_handler_id;
+	uint8_t wlan_handler_id;
+	uint8_t phys_if_handler_id[4];
+	nss_ipv4_sync_callback_t ipv4_sync;
+					/* IPv4 sync callback function */
+	nss_ipv6_sync_callback_t ipv6_sync;
+					/* IPv6 sync callback function */
+	nss_l2switch_sync_callback_t l2switch_sync;
+					/* L2switch sync callback function */
+	nss_connection_expire_all_callback_t conn_expire;
+					/* Connection all expire callback function */
+	nss_crypto_callback_t crypto_callback;
+					/* crypto interface callback function */
+	nss_phys_if_rx_callback_t phys_if_rx_callback[NSS_MAX_PHYSICAL_INTERFACES];
+					/* Physical interface packet callback functions */
+	nss_phys_if_event_callback_t phys_if_event_callback[NSS_MAX_PHYSICAL_INTERFACES];
+					/* Physical interface event callback functions */
+	nss_profiler_callback_t profiler_callback[NSS_MAX_CORES];
+					/* Profiler interface callback function */
+	void *crypto_ctx;		/* Crypto interface context */
+	void *phys_if_ctx[NSS_MAX_PHYSICAL_INTERFACES];
+					/* Physical interface context */
+	void *profiler_ctx[NSS_MAX_CORES];
+					/* Profiler interface context */
+	uint64_t nbuf_alloc_err;	/* NBUF allocation errors */
+	bool napi_active;		/* Flag indicating if NAPI is currently active or not */
+	bool netdevice_notifier;	/* Flag indicating if netdevice notifier is registered */
+	uint32_t cache_dev_major;	/* Major number of char device */
+	uint64_t last_rx_jiffies;	/* Time of the last RX message from the NA in jiffies */
+	uint64_t ipv4_accelerated_rx_packets;
+					/* Accelerated IPv4 RX packets */
+	uint64_t ipv4_accelerated_rx_bytes;
+					/* Accelerated IPv4 RX bytes */
+	uint64_t ipv4_accelerated_tx_packets;
+					/* Accelerated IPv4 TX packets */
+	uint64_t ipv4_accelerated_tx_bytes;
+					/* Accelerated IPv4 TX bytes */
+	uint64_t ipv4_connection_create_requests;
+					/* Number of IPv4 connection create requests */
+	uint64_t ipv4_connection_create_collisions;
+					/* Number of IPv4 connection create requests that collided with existing entries */
+	uint64_t ipv4_connection_create_invalid_interface;
+					/* Number of IPv4 connection create requests that had invalid interface */
+	uint64_t ipv4_connection_destroy_requests;
+					/* Number of IPv4 connection destroy requests */
+	uint64_t ipv4_connection_destroy_misses;
+					/* Number of IPv4 connection destroy requests that missed the cache */
+	uint64_t ipv4_connection_hash_hits;
+					/* Number of IPv4 connection hash hits */
+	uint64_t ipv4_connection_hash_reorders;
+					/* Number of IPv4 connection hash reorders */
+	uint64_t ipv4_connection_flushes;
+					/* Number of IPv4 connection flushes */
+	uint64_t ipv4_connection_evictions;
+					/* Number of IPv4 connection evictions */
+	uint64_t ipv6_accelerated_rx_packets;
+					/* Accelerated IPv6 RX packets */
+	uint64_t ipv6_accelerated_rx_bytes;
+					/* Accelerated IPv6 RX bytes */
+	uint64_t ipv6_accelerated_tx_packets;
+					/* Accelerated IPv6 TX packets */
+	uint64_t ipv6_accelerated_tx_bytes;
+					/* Accelerated IPv6 TX bytes */
+	uint64_t ipv6_connection_create_requests;
+					/* Number of IPv6 connection create requests */
+	uint64_t ipv6_connection_create_collisions;
+					/* Number of IPv6 connection create requests that collided with existing entries */
+	uint64_t ipv6_connection_create_invalid_interface;
+					/* Number of IPv6 connection create requests that had invalid interface */
+	uint64_t ipv6_connection_destroy_requests;
+					/* Number of IPv6 connection destroy requests */
+	uint64_t ipv6_connection_destroy_misses;
+					/* Number of IPv6 connection destroy requests that missed the cache */
+	uint64_t ipv6_connection_hash_hits;
+					/* Number of IPv6 connection hash hits */
+	uint64_t ipv6_connection_hash_reorders;
+					/* Number of IPv6 connection hash reorders */
+	uint64_t ipv6_connection_flushes;
+					/* Number of IPv6 connection flushes */
+	uint64_t ipv6_connection_evictions;
+					/* Number of IPv6 connection evictions */
+	uint32_t l2switch_rx_packet_count;
+					/* Number of packets RX'd */
+	uint32_t l2switch_rx_byte_count;
+					/* Number of bytes RX'd */
+	uint32_t l2switch_virtual_rx_packet_count;
+					/* Number of packets RX'd from virtual hosts */
+	uint32_t l2switch_virtual_rx_byte_count;
+					/* Number of bytes RX'd from virtual hosts */
+	uint32_t l2switch_physical_rx_packet_count;
+					/* Number of packets RX'd from physical hosts */
+	uint32_t l2switch_physical_rx_byte_count;
+					/* Number of bytes RX'd from physical hosts */
+	uint32_t l2switch_create_requests;
+					/* Number of l2 switch entry create requests */
+	uint32_t l2switch_create_collisions;
+					/* Number of l2 switch entry create requests that collided with existing entries */
+	uint32_t l2switch_create_invalid_interface;
+					/* Number of l2 switch entry create requests that had invalid interface */
+	uint32_t l2switch_destroy_requests;
+					/* Number of l2 switch entry destroy requests */
+	uint32_t l2switch_destroy_misses;
+					/* Number of l2 switch entry destroy requests that missed the cache */
+	uint32_t l2switch_hash_hits;
+					/* Number of l2 switch entry hash hits */
+	uint32_t l2switch_hash_reorders;
+					/* Number of l2 switch entry hash reorders */
+	uint32_t l2switch_flushes;
+					/* Number of l2 switch entry flushes */
+	uint64_t l2switch_evictions;
+					/* Number of l2 switch entry evictions */
+	uint32_t pppoe_session_create_requests;
+					/* Number of PPPoE session create requests */
+	uint32_t pppoe_session_create_failures;
+					/* Number of PPPoE session create failures */
+	uint32_t pppoe_session_destroy_requests;
+					/* Number of PPPoE session destroy requests */
+	uint32_t pppoe_session_destroy_misses;
+					/* Number of PPPoE session destroy requests that missed the cache */
+	uint64_t pe_queue_dropped;	/* Number of packets dropped because the PE queue is too full */
+	uint64_t pe_total_ticks;	/* Total clock ticks spend inside the PE */
+	uint32_t pe_worst_case_ticks;	/* Worst case iteration of the PE in ticks */
+	uint64_t pe_iterations;		/* Number of iterations around the PE */
+	uint64_t except_queue_dropped;	/* Number of packets dropped because the exception queue is too full */
+	uint64_t except_total_ticks;	/* Total clock ticks spend inside the PE */
+	uint32_t except_worst_case_ticks;
+					/* Worst case iteration of the exception path in ticks */
+	uint64_t except_iterations;	/* Number of iterations around the PE */
+	uint32_t l2switch_queue_dropped;
+					/* Number of packets dropped because the L2 switch queue is too full */
+	uint64_t l2switch_total_ticks;	/* Total clock ticks spend inside the L2 switch */
+	uint32_t l2switch_worst_case_ticks;
+					/* Worst case iteration of the L2 switch in ticks */
+	uint64_t l2switch_iterations;	/* Number of iterations around the L2 switch */
+	uint64_t pbuf_alloc_fails;	/* Number of pbuf allocations that have failed */
+	uint64_t pbuf_payload_alloc_fails;
+					/* Number of pbuf allocations that have failed because there were no free payloads */
+	struct nss_gmac_statistics nss_gmac_statistics[NSS_MAX_PHYSICAL_INTERFACES];
+	struct nss_ipv4_statistics nss_ipv4_statistics[NSS_IPV4_CONNECTION_ENTRIES];
+	struct nss_ipv6_statistics nss_ipv6_statistics[NSS_IPV6_CONNECTION_ENTRIES];
+	struct nss_l2switch_statistics nss_l2switch_statistics[NSS_L2SWITCH_CONNECTION_ENTRIES];
+	struct nss_pppoe_statistics nss_pppoe_statistics[NSS_PPPOE_NUM_SESSION_TOTAL];
+					/* PPPoE interface statistics array */
+	struct nss_pppoe_statistics *nss_pppoe_statistics_head;
+					/* Head of PPPoE interface statistics */
+};
+
+extern void nss_core_handle_bh (unsigned long ctx);
+extern int32_t nss_core_send_buffer (struct nss_ctx_instance *nss_ctx, uint32_t if_num,
+					struct sk_buff *nbuf, uint16_t qid,
+					uint8_t buffer_type, uint16_t flags);
+extern int32_t nss_core_send_crypto(struct nss_ctx_instance *nss_ctx, void *buf, uint32_t buf_paddr, uint16_t len);
+extern void nss_rx_handle_status_pkt(struct nss_ctx_instance *nss_ctx, struct sk_buff *nbuf);
+extern void nss_rx_handle_crypto_buf(struct nss_ctx_instance *nss_ctx, uint32_t buf, uint32_t paddr, uint32_t len);
+
+#endif /* __NSS_CORE_H */
diff --git a/nss_hal/include/nss_hal.h b/nss_hal/include/nss_hal.h
new file mode 100755
index 0000000..e1ac3dd
--- /dev/null
+++ b/nss_hal/include/nss_hal.h
@@ -0,0 +1,69 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/**
+ * nss_hal.h
+ *	NSS HAL public declarations.
+ */
+
+#ifndef __NSS_HAL_H
+#define __NSS_HAL_H
+
+#include <nss_hal_pvt.h>
+
+/*
+ * nss_hal_common_reset()
+ */
+static inline void nss_hal_common_reset(void)
+{
+	__nss_hal_common_reset();
+}
+
+/*
+ * nss_hal_core_reset()
+ */
+static inline void nss_hal_core_reset(uint32_t core_id, uint32_t map, uint32_t addr)
+{
+	__nss_hal_core_reset(core_id, map, addr);
+}
+
+/*
+ * nss_hal_read_interrupt_cause()
+ */
+static inline void nss_hal_read_interrupt_cause(uint32_t map, uint32_t irq, uint32_t shift_factor, uint32_t *cause)
+{
+	__nss_hal_read_interrupt_cause(map, irq, shift_factor, cause);
+}
+
+/*
+ * nss_hal_clear_interrupt_cause()
+ */
+static inline void nss_hal_clear_interrupt_cause(uint32_t map, uint32_t irq, uint32_t shift_factor, uint32_t cause)
+{
+	__nss_hal_clear_interrupt_cause(map, irq, shift_factor, cause);
+}
+
+/*
+ * nss_hal_disable_interrupt()
+ */
+static inline void nss_hal_disable_interrupt(uint32_t map, uint32_t irq, uint32_t shift_factor, uint32_t cause)
+{
+	__nss_hal_disable_interrupt(map, irq, shift_factor, cause);
+}
+
+/*
+ * nss_hal_enable_interrupt()
+ */
+static inline void nss_hal_enable_interrupt(uint32_t map, uint32_t irq, uint32_t shift_factor, uint32_t cause)
+{
+	__nss_hal_enable_interrupt(map, irq, shift_factor, cause);
+}
+
+/*
+ * nss_hal_send_interrupt()
+ */
+static inline void nss_hal_send_interrupt(uint32_t map, uint32_t irq, uint32_t cause)
+{
+	__nss_hal_send_interrupt(map, irq, cause);
+}
+
+#endif /* __NSS_HAL_H */
diff --git a/nss_hal/ipq806x/nss_clocks.h b/nss_hal/ipq806x/nss_clocks.h
new file mode 100755
index 0000000..8ccf75f
--- /dev/null
+++ b/nss_hal/ipq806x/nss_clocks.h
@@ -0,0 +1,739 @@
+/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/*
+ * NOTE: This file is a place holder for now
+ *	This needs to be replaced by proper clock initialization code
+ */
+
+#ifndef __NSS_CLOCKS_H
+#define __NSS_CLOCKS_H
+
+#include <asm/io.h>
+#include <mach/msm_iomap.h>
+
+#define REG(off)	(MSM_CLK_CTL_BASE + (off))
+#define REG_GCC(off)	(MSM_APCS_GCC_BASE + (off))
+
+/* Peripheral clock registers. */
+#define GCC_RPU_CR				REG(0x0F80)
+#define GCC_RPU_EAR				REG(0x0F84)
+#define GCC_RPU_ESR				REG(0x0F88)
+#define GCC_RPU_ESRRESTORE			REG(0x0F8C)
+#define GCC_RPU_ESYNR0				REG(0x0F90)
+#define GCC_RPU_ESYNR1				REG(0x0F94)
+#define GCC_RPU_REV				REG(0x0FF4)
+#define GCC_RPU_IDR				REG(0x0FF8)
+#define GCC_RPU_RPU_ACR				REG(0x0FFC)
+#define AFAB_CLK_SRC_ACR			REG(0x1000)
+#define QDSS_STM_CLK_ACR			REG(0x100C)
+#define AFAB_CORE_CLK_ACR			REG(0x1010)
+#define SCSS_ACR				REG(0x1014)
+#define AFAB_EBI1_S_ACLK_ACR			REG(0x1018)
+#define AFAB_AXI_S_FCLK_ACR			REG(0x1020)
+#define SFAB_CORE_CLK_ACR			REG(0x1024)
+#define SFAB_AXI_S_FCLK_ACR			REG(0x1028)
+#define SFAB_AHB_S_FCLK_ACR			REG(0x102C)
+#define QDSS_AT_CLK_ACR				REG(0x1030)
+#define QDSS_TRACECLKIN_CLK_ACR			REG(0x1034)
+#define QDSS_TSCTR_CLK_ACR			REG(0x1038)
+#define SFAB_ADM0_M_ACLK_ACR			REG(0x103C)
+#define ADM0_ACR				REG(0x1040)
+#define QDSS_RESETS_ACR				REG(0x104C)
+#define IMEM0_ACLK_ACR				REG(0x1050)
+#define QDSS_HCLK_ACR				REG(0x1054)
+#define PCIE_ACR				REG(0x1058)
+#define SFAB_CLK_SRC_ACR			REG(0x105C)
+#define SFAB_LPASS_Q6_ACLK_ACR			REG(0x1074)
+#define SFAB_AFAB_M_ACLK_ACR			REG(0x107C)
+#define AFAB_SFAB_M_ACLK_ACR			REG(0x1084)
+#define SFAB_SATA_S_HCLK_ACR			REG(0x1090)
+#define DFAB_ACR				REG(0x1094)
+#define SFAB_DFAB_M_ACLK_ACR			REG(0x10A0)
+#define DFAB_SFAB_M_ACLK_ACR			REG(0x10A4)
+#define DFAB_SWAY_ACR				REG(0x10A8)
+#define DFAB_ARB_ACR				REG(0x10AC)
+#define PPSS_ACR				REG(0x10B0)
+#define PMEM_ACR				REG(0x10B4)
+#define DMA_BAM_ACR				REG(0x10B8)
+#define SIC_HCLK_ACR				REG(0x10BC)
+#define SPS_TIC_HCLK_ACR			REG(0x10C0)
+#define CFPB_ACR				REG(0x10C8)
+#define SFAB_CFPB_M_HCLK_ACR			REG(0x10D0)
+#define CFPB_MASTER_HCLK_ACR			REG(0x10D4)
+#define SFAB_CFPB_S_HCLK_ACR			REG(0x10D8)
+#define CFPB_SPLITTER_HCLK_ACR			REG(0x10DC)
+#define TSIF_ACR				REG(0x10E0)
+#define CE1_HCLK_ACR				REG(0x10E4)
+#define CE2_HCLK_ACR				REG(0x10E8)
+#define SFPB_HCLK_ACR				REG(0x10EC)
+#define SFAB_SFPB_M_HCLK_ACR			REG(0x10F0)
+#define SFAB_SFPB_S_HCLK_ACR			REG(0x10F4)
+#define RPM_ACR					REG(0x10F8)
+#define RPM_MSG_RAM_ACR				REG(0x10FC)
+#define PMIC_ACR				REG(0x1100)
+#define SDCn_ACR(n)				REG(0x1104+4*(n-1))
+#define SDC1_ACR				REG(0x1104)
+#define ACC_ACR					REG(0x111C)
+#define USB_HS1_ACR				REG(0x1120)
+#define USB_HSIC_ACR				REG(0x1124)
+#define DIM_BUS_ACR				REG(0x1128)
+#define USB_FS1_ACR				REG(0x112C)
+#define GSBI_COMMON_SIM_CLK_ACR			REG(0x1134)
+#define GSBIn_ACR(n)				REG(0x1138+4*(n-1))
+#define GSBI1_ACR				REG(0x1138)
+#define USB_HSIC_HSIC_CLK_CTL_ACR		REG(0x1168)
+#define SPDM_ACR				REG(0x116C)
+#define SEC_CTRL_ACR				REG(0x1170)
+#define TLMM_ACR				REG(0x1174)
+#define SATA_ACR				REG(0x1180)
+#define SATA_ACLK_ACR				REG(0x1184)
+#define SATA_PHY_CFG_ACR			REG(0x1188)
+#define GSS_ACR					REG(0x118C)
+#define TSSC_CLK_ACR				REG(0x1194)
+#define PDM_ACR					REG(0x1198)
+#define GPn_ACR(n)				REG(0x11A0+4*(n))
+#define GP0_ACR					REG(0x11A0)
+#define MPM_ACR					REG(0x11B4)
+#define RINGOSC_ACR				REG(0x11B8)
+#define EBI1_ACR				REG(0x11BC)
+#define SFAB_SMPSS_S_HCLK_ACR			REG(0x11C0)
+#define SCSS_DBG_STATUS_ACR			REG(0x11CC)
+#define PRNG_ACR				REG(0x11D0)
+#define PXO_SRC_CLK_CTL_ACR			REG(0x11D4)
+#define LPASS_XO_SRC_CLK_CTL_ACR		REG(0x11D8)
+#define GLOBAL_BUS_NS_ACR			REG(0x11DC)
+#define PLL11_DIV_SRC_ACR			REG(0x11E4)
+#define SPDM_CY_CLK_CTL_ACR			REG(0x11EC)
+#define RESET_ACR				REG(0x11F0)
+#define CLK_DBG_ACR				REG(0x11F4)
+#define CLK_HALT_ACR				REG(0x11F8)
+#define RPM_CLK_VOTE_ACR			REG(0x11FC)
+#define LPA_Q6_CLK_VOTE_ACR			REG(0x1204)
+#define APCS_CLK_VOTE_ACR			REG(0x1208)
+#define SPARE_CLK_VOTE_ACR			REG(0x120C)
+#define APCS_U_CLK_VOTE_ACR			REG(0x1210)
+#define PLL0_ACR				REG(0x1218)
+#define PLL5_ACR				REG(0x121C)
+#define PLL8_ACR				REG(0x1228)
+#define GPLL1_ACR				REG(0x122C)
+#define EBI1_PLL_ACR				REG(0x1230)
+#define PLL18_ACR				REG(0x1234)
+#define PLL14_ACR				REG(0x1238)
+#define SC_PLL0_ACR				REG(0x1240)
+#define SC_PLL1_ACR				REG(0x1248)
+#define SC_PLL2_ACR				REG(0x1250)
+#define SC_PLL3_ACR				REG(0x1258)
+#define SC_L2_PLL_ACR				REG(0x1260)
+#define PLL_LOCK_DET_ACR			REG(0x1284)
+#define PLL_ENA_SPARE_ACR			REG(0x128C)
+#define PLL_ENA_GSS_ACR				REG(0x1290)
+#define PLL_ENA_RPM_ACR				REG(0x1294)
+#define PLL_ENA_APCS_ACR			REG(0x1298)
+#define PLL_ENA_APCS_U_ACR			REG(0x129C)
+#define PLL_ENA_RIVA_ACR			REG(0x12A0)
+#define PLL_ENA_LPASS_DSP_ACR			REG(0x12A4)
+#define PLL_ENA_SPS_ACR				REG(0x12A8)
+#define FABS_RESET_ACR				REG(0x12AC)
+#define RIVA_ACR				REG(0x12BC)
+#define XPU_ACR					REG(0x12C0)
+#define TSENS_ACR				REG(0x12C4)
+#define TSENS_CONFIG_ACR			REG(0x12C8)
+#define TSENS_STATUS_ACR			REG(0x12CC)
+#define CE5_CORE_CLK_ACR			REG(0x12D8)
+#define SFAB_AHB_S_FCLK2_ACR			REG(0x12DC)
+#define USB_HS3_ACR				REG(0x12E0)
+#define USB_HS4_ACR				REG(0x12E4)
+#define GSS_CLK_VOTE_ACR			REG(0x12E8)
+#define RIVA_CLK_VOTE_ACR			REG(0x12FC)
+#define SPDM_CY_CLK_CTL2_ACR			REG(0x1300)
+#define APCS_WDOG_EXPIRED_ENA_ACR		REG(0x1320)
+#define RPM_WDOG_EXPIRED_ENA_ACR		REG(0x1340)
+#define PCIE_ALT_REF_CLK_ACR			REG(0x1344)
+#define SPARE0_ACR				REG(0x1348)
+#define SPARE1_ACR				REG(0x134C)
+#define PCIE_1_ACR				REG(0x1350)
+#define PCIE_1_ALT_REF_CLK_ACR			REG(0x1354)
+#define PCIE_2_ACR				REG(0x1358)
+#define PCIE_2_ALT_REF_CLK_ACR			REG(0x135C)
+#define EBI2_CLK_ACR				REG(0x1360)
+#define USB30_CLK_ACR				REG(0x1364)
+#define USB30_1_CLK_ACR				REG(0x1368)
+#define NSS_RESET_ACR				REG(0x136C)
+#define NSSFAB0_CLK_SRC_ACR			REG(0x1370)
+#define NSSFB0_CLK_CTL_ACR			REG(0x1374)
+#define NSSFAB_GLOBAL_BUS_NS_ACR		REG(0x1378)
+#define NSSFAB1_CLK_SRC_ACR			REG(0x137C)
+#define NSSFB1_CLK_CTL_ACR			REG(0x1380)
+#define CLK_HALT_1_ACR				REG(0x1384)
+#define UBI32_MPT_CLK_CTL_ACR			REG(0x1388)
+#define CE5_HCLK_ACR				REG(0x138C)
+#define NSSFPB_CLK_CTL_ACR			REG(0x1390)
+#define GMAC_COREn_ACR(n)			REG(0x1394+4*(n-1))
+#define GMAC_CORE1_ACR				REG(0x1394)
+#define AFAB_CLK_SRC0_NS			REG(0x2000)
+#define AFAB_CLK_SRC1_NS			REG(0x2004)
+#define AFAB_CLK_SRC_CTL			REG(0x2008)
+#define QDSS_STM_CLK_CTL			REG(0x2060)
+#define QDSS_REQ_STATUS				REG(0x2064)
+#define AFAB_CORE_CLK_CTL			REG(0x2080)
+#define SCSS_ACLK_CTL				REG(0x20A0)
+#define SCSS_HCLK_CTL				REG(0x20A4)
+#define SCSS_XO_SRC_CLK_CTL			REG(0x20AC)
+#define SCSS_AFAB_PORT_RESET			REG(0x20B8)
+#define AFAB_EBI1_CH0_ACLK_CTL			REG(0x20C0)
+#define AFAB_EBI1_CH1_ACLK_CTL			REG(0x20C4)
+#define AFAB_AXI_S0_FCLK_CTL			REG(0x2100)
+#define AFAB_AXI_S1_FCLK_CTL			REG(0x2104)
+#define AFAB_AXI_S2_FCLK_CTL			REG(0x2108)
+#define AFAB_AXI_S3_FCLK_CTL			REG(0x210C)
+#define AFAB_AXI_S4_FCLK_CTL			REG(0x2110)
+#define SFAB_CORE_CLK_CTL			REG(0x2120)
+#define SFAB_AXI_S0_FCLK_CTL			REG(0x2140)
+#define SFAB_AXI_S1_FCLK_CTL			REG(0x2144)
+#define SFAB_AXI_S2_FCLK_CTL			REG(0x2148)
+#define SFAB_AXI_S3_FCLK_CTL			REG(0x214C)
+#define SFAB_AXI_S4_FCLK_CTL			REG(0x2150)
+#define SFAB_AXI_S5_FCLK_CTL			REG(0x2154)
+#define SFAB_AHB_S0_FCLK_CTL			REG(0x2160)
+#define SFAB_AHB_S1_FCLK_CTL			REG(0x2164)
+#define SFAB_AHB_S2_FCLK_CTL			REG(0x2168)
+#define SFAB_AHB_S3_FCLK_CTL			REG(0x216C)
+#define SFAB_AHB_S4_FCLK_CTL			REG(0x2170)
+#define SFAB_AHB_S5_FCLK_CTL			REG(0x2174)
+#define SFAB_AHB_S6_FCLK_CTL			REG(0x2178)
+#define SFAB_AHB_S7_FCLK_CTL			REG(0x217C)
+#define QDSS_AT_CLK_SRC0_NS			REG(0x2180)
+#define QDSS_AT_CLK_SRC1_NS			REG(0x2184)
+#define QDSS_AT_CLK_SRC_CTL			REG(0x2188)
+#define QDSS_AT_CLK_NS				REG(0x218C)
+#define QDSS_AT_CLK_FS				REG(0x2190)
+#define QDSS_TRACECLKIN_CLK_SRC0_NS		REG(0x21A0)
+#define QDSS_TRACECLKIN_CLK_SRC1_NS		REG(0x21A4)
+#define QDSS_TRACECLKIN_CLK_SRC_CTL		REG(0x21A8)
+#define QDSS_TRACECLKIN_CTL			REG(0x21AC)
+#define QDSS_TSCTR_CLK_SRC0_NS			REG(0x21C0)
+#define QDSS_TSCTR_CLK_SRC1_NS			REG(0x21C4)
+#define QDSS_TSCTR_CLK_SRC_CTL			REG(0x21C8)
+#define QDSS_TSCTR_CTL				REG(0x21CC)
+#define SFAB_ADM0_M0_ACLK_CTL			REG(0x21E0)
+#define SFAB_ADM0_M1_ACLK_CTL			REG(0x21E4)
+#define SFAB_ADM0_M2_HCLK_CTL			REG(0x21E8)
+#define ADM0_CLK_CTL				REG(0x2200)
+#define ADM0_CLK_FS				REG(0x2204)
+#define ADM0_PBUS_CLK_CTL_REG			REG(0x2208)
+#define ADM0_RESET				REG(0x220C)
+#define QDSS_RESETS				REG(0x2260)
+#define IMEM0_ACLK_CTL				REG(0x2280)
+#define IMEM0_ACLK_FS				REG(0x2284)
+#define QDSS_HCLK_CTL				REG(0x22A0)
+#define PCIE_ACLK_CTL_REG			REG(0x22C0)
+#define PCIE_ACLK_FS				REG(0x22C4)
+#define PCIE_AUX_CLK_CTL			REG(0x22C8)
+#define PCIE_HCLK_CTL_REG			REG(0x22CC)
+#define PCIE_PCLK_CTL_REG			REG(0x22D0)
+#define PCIE_PCLK_FS				REG(0x22D4)
+#define PCIE_SFAB_PORT_RESET			REG(0x22D8)
+#define PCIE_RESET				REG(0x22DC)
+#define SFAB_CLK_SRC0_NS			REG(0x22E0)
+#define SFAB_CLK_SRC1_NS			REG(0x22E4)
+#define SFAB_CLK_SRC_CTL			REG(0x22E8)
+#define SFAB_LPASS_Q6_ACLK_CTL			REG(0x23A0)
+#define SFAB_AFAB_M_ACLK_CTL			REG(0x23E0)
+#define AFAB_SFAB_M0_ACLK_CTL			REG(0x2420)
+#define AFAB_SFAB_M1_ACLK_CTL			REG(0x2424)
+#define SFAB_SATA_S_HCLK_CTL_REG		REG(0x2480)
+#define DFAB_CLK_SRC0_NS			REG(0x24A0)
+#define DFAB_CLK_SRC1_NS			REG(0x24A4)
+#define DFAB_CLK_SRC_CTL			REG(0x24A8)
+#define DFAB_CORE_CLK_CTL			REG(0x24AC)
+#define SFAB_DFAB_M_ACLK_CTL			REG(0x2500)
+#define DFAB_SFAB_M_ACLK_CTL			REG(0x2520)
+#define DFAB_SWAY0_HCLK_CTL			REG(0x2540)
+#define DFAB_SWAY1_HCLK_CTL			REG(0x2544)
+#define DFAB_ARB0_HCLK_CTL			REG(0x2560)
+#define DFAB_ARB1_HCLK_CTL			REG(0x2564)
+#define PPSS_HCLK_CTL				REG(0x2580)
+#define PPSS_HCLK_FS				REG(0x2584)
+#define PPSS_PROC_CLK_CTL			REG(0x2588)
+#define PPSS_TIMER0_CLK_CTL			REG(0x258C)
+#define PPSS_TIMER1_CLK_CTL			REG(0x2590)
+#define PPSS_RESET				REG(0x2594)
+#define PMEM_ACLK_CTL_REG			REG(0x25A0)
+#define PMEM_ACLK_FS				REG(0x25A4)
+#define DMA_BAM_HCLK_CTL			REG(0x25C0)
+#define DMA_BAM_HCLK_FS				REG(0x25C4)
+#define SIC_HCLK_CTL				REG(0x25E0)
+#define SPS_TIC_HCLK_CTL			REG(0x2600)
+#define CFPB_2X_CLK_SRC0_NS			REG(0x2640)
+#define CFPB_2X_CLK_SRC1_NS			REG(0x2644)
+#define CFPB_2X_CLK_SRC_CTL			REG(0x2648)
+#define CFPB_CLK_NS				REG(0x264C)
+#define CFPB0_HCLK_CTL				REG(0x2650)
+#define CFPB1_HCLK_CTL				REG(0x2654)
+#define CFPB2_HCLK_CTL				REG(0x2658)
+#define SFAB_CFPB_M_HCLK_CTL			REG(0x2680)
+#define CFPB_MASTER_HCLK_CTL			REG(0x26A0)
+#define SFAB_CFPB_S_HCLK_CTL			REG(0x26C0)
+#define CFPB_SPLITTER_HCLK_CTL			REG(0x26E0)
+#define TSIF_HCLK_CTL_REG			REG(0x2700)
+#define TSIF_HCLK_FS				REG(0x2704)
+#define TSIF_INACTIVITY_TIMERS_CLK_CTL		REG(0x2708)
+#define TSIF_REF_CLK_MD_REG			REG(0x270C)
+#define TSIF_REF_CLK_NS_REG			REG(0x2710)
+#define CE1_HCLK_CTL_REG			REG(0x2720)
+#define CE1_CORE_CLK_CTL_REG			REG(0x2724)
+#define CE1_SLEEP_CLK_CTL			REG(0x2728)
+#define CE2_HCLK_CTL				REG(0x2740)
+#define CE2_CORE_CLK_CTL			REG(0x2744)
+#define SFPB_HCLK_SRC0_NS			REG(0x2760)
+#define SFPB_HCLK_SRC1_NS			REG(0x2764)
+#define SFPB_HCLK_SRC_CTL			REG(0x2768)
+#define SFPB_HCLK_CTL				REG(0x276C)
+#define SFAB_SFPB_M_HCLK_CTL			REG(0x2780)
+#define SFAB_SFPB_S_HCLK_CTL			REG(0x27A0)
+#define RPM_PROC_CLK_CTL			REG(0x27C0)
+#define RPM_BUS_HCLK_CTL			REG(0x27C4)
+#define RPM_BUS_HCLK_FS				REG(0x27C8)
+#define RPM_SLEEP_CLK_CTL			REG(0x27CC)
+#define RPM_TIMER_CLK_CTL			REG(0x27D0)
+#define RPM_MSG_RAM_HCLK_CTL_REG		REG(0x27E0)
+#define RPM_MSG_RAM_HCLK_FS			REG(0x27E4)
+#define PMIC_ARB0_HCLK_CTL			REG(0x2800)
+#define PMIC_ARB1_HCLK_CTL			REG(0x2804)
+#define PMIC_ARB1_HCLK_FS			REG(0x2808)
+#define PMIC_SSBI2_NS				REG(0x280C)
+#define PMIC_SSBI2_CLK_FS			REG(0x280D)
+#define SDCn_HCLK_CTL_REG(n)			REG(0x2820+32*(n-1))
+#define SDC1_HCLK_CTL				REG(0x2820)
+#define SDCn_HCLK_FS(n)				REG(0x2824+32*(n-1))
+#define SDC1_HCLK_FS				REG(0x2824)
+#define SDCn_APPS_CLK_MD_REG(n)			REG(0x2828+32*(n-1))
+#define SDC1_APPS_CLK_MD			REG(0x2828)
+#define SDCn_APPS_CLK_NS_REG(n)			REG(0x282C+32*(n-1))
+#define SDC1_APPS_CLK_NS			REG(0x282C)
+#define SDCn_RESET_REG(n)			REG(0x2830+32*(n-1))
+#define SDC1_RESET				REG(0x2830)
+#define SDCn_APPS_CLK_FS(n)			REG(0x2834+32*(n-1))
+#define SDC1_APPS_CLK_FS			REG(0x2834)
+#define QMC_ACC					REG(0x28E0)
+#define ACC_HPIMEM_RF8441			REG(0x28E4)
+#define ARR_STBY_N				REG(0x28E8)
+#define NSS_ACC_REG				REG(0x28EC)
+#define USB_HS1_HCLK_CTL_REG			REG(0x2900)
+#define USB_HS1_HCLK_FS_REG			REG(0x2904)
+#define USB_HS1_XCVR_FS_CLK_MD_REG		REG(0x2908)
+#define USB_HS1_XCVR_FS_CLK_NS_REG		REG(0x290C)
+#define USB_HS1_RESET_REG			REG(0x2910)
+#define USB_HSIC_HCLK_CTL_REG			REG(0x2920)
+#define USB_HSIC_XCVR_FS_CLK_MD_REG		REG(0x2924)
+#define USB_HSIC_XCVR_FS_CLK_NS_REG		REG(0x2928)
+#define USB_HSIC_SYSTEM_CLK_CTL_REG		REG(0x292C)
+#define USB_HSIC_SYSTEM_CLK_FS			REG(0x2930)
+#define USB_HSIC_RESET_REG			REG(0x2934)
+#define VDD_USB_HSIC_GFS_CTL			REG(0x2938)
+#define VDD_USB_HSIC_GFS_CTL_STATUS		REG(0x293C)
+#define CFPB0_C0_HCLK_CTL			REG(0x2940)
+#define CFPB0_D0_HCLK_CTL			REG(0x2944)
+#define CFPB0_C1_HCLK_CTL			REG(0x2948)
+#define CFPB0_D1_HCLK_CTL			REG(0x294C)
+#define USB_FS1_HCLK_CTL_REG			REG(0x2960)
+#define USB_FS1_XCVR_FS_CLK_MD_REG		REG(0x2964)
+#define USB_FS1_XCVR_FS_CLK_NS_REG		REG(0x2968)
+#define USB_FS1_SYSTEM_CLK_CTL_REG		REG(0x296C)
+#define USB_FS1_SYSTEM_CLK_FS_REG		REG(0x2970)
+#define USB_FS1_RESET_REG			REG(0x2974)
+#define GSBI_COMMON_SIM_CLK_NS			REG(0x29A0)
+#define GSBIn_HCLK_CTL_REG(n)			REG(0x29C0+32*(n-1))
+#define GSBI1_HCLK_CTL				REG(0x29C0)
+#define GSBIn_HCLK_FS(n)			REG(0x29C4+32*(n-1))
+#define GSBI1_HCLK_FS				REG(0x29C4)
+#define GSBIn_QUP_APPS_MD_REG(n)		REG(0x29C8+32*(n-1))
+#define GSBI1_QUP_APPS_MD			REG(0x29C8)
+#define GSBIn_QUP_APPS_NS_REG(n)		REG(0x29CC+32*(n-1))
+#define GSBI1_QUP_APPS_NS			REG(0x29CC)
+#define GSBIn_UART_APPS_MD_REG(n)		REG(0x29D0+32*(n-1))
+#define GSBI1_UART_APPS_MD			REG(0x29D0)
+#define GSBIn_UART_APPS_NS_REG(n)		REG(0x29D4+32*(n-1))
+#define GSBI1_UART_APPS_NS			REG(0x29D4)
+#define GSBIn_SIM_CLK_CTL(n)			REG(0x29D8+32*(n-1))
+#define GSBI1_SIM_CLK_CTL			REG(0x29D8)
+#define GSBIn_RESET_REG(n)			REG(0x29DC+32*(n-1))
+#define GSBI1_RESET				REG(0x29DC)
+#define USB_HSIC_HSIC_CLK_SRC_CTL_REG		REG(0x2B40)
+#define USB_HSIC_HSIC_CLK_CTL_REG		REG(0x2B44)
+#define USB_HSIC_HSIO_CAL_CLK_CTL_REG		REG(0x2B48)
+#define SPDM_CFG_HCLK_CTL			REG(0x2B60)
+#define SPDM_MSTR_HCLK_CTL			REG(0x2B64)
+#define SPDM_FF_CLK_CTL				REG(0x2B68)
+#define SPDM_RESET				REG(0x2B6C)
+#define SEC_CTRL_CLK_CTL			REG(0x2B80)
+#define SEC_CTRL_CLK_FS				REG(0x2B84)
+#define SEC_CTRL_ACC_CLK_SRC0_NS		REG(0x2B88)
+#define SEC_CTRL_ACC_CLK_SRC1_NS		REG(0x2B8C)
+#define SEC_CTRL_ACC_CLK_SRC_CTL		REG(0x2B90)
+#define SEC_CTRL_ACC_CLK_CTL			REG(0x2B94)
+#define SEC_CTRL_ACC_CLK_FS			REG(0x2B98)
+#define TLMM_HCLK_CTL				REG(0x2BA0)
+#define TLMM_CLK_CTL				REG(0x2BA4)
+#define SATA_HCLK_CTL_REG			REG(0x2C00)
+#define SATA_HCLK_FS				REG(0x2C04)
+#define SATA_CLK_SRC_NS_REG			REG(0x2C08)
+#define SATA_RXOOB_CLK_CTL_REG			REG(0x2C0C)
+#define SATA_PMALIVE_CLK_CTL_REG		REG(0x2C10)
+#define SATA_PHY_REF_CLK_CTL_REG		REG(0x2C14)
+#define SATA_SFAB_M_PORT_RESET			REG(0x2C18)
+#define SATA_RESET				REG(0x2C1C)
+#define SATA_ACLK_CTL_REG			REG(0x2C20)
+#define SATA_ACLK_FS				REG(0x2C24)
+#define SATA_PHY_CFG_CLK_CTL_REG		REG(0x2C40)
+#define GSS_SLP_CLK_CTL				REG(0x2C60)
+#define GSS_RESET				REG(0x2C64)
+#define GSS_CLAMP_ENA				REG(0x2C68)
+#define GSS_CXO_SRC_CTL				REG(0x2C74)
+#define TSSC_CLK_CTL_REG			REG(0x2CA0)
+#define PDM_CLK_NS_REG				REG(0x2CC0)
+#define GPn_MD_REG(n)				REG(0x2D00+32*(n))
+#define GP0_MD					REG(0x2D00)
+#define GPn_NS_REG(n)				REG(0x2D24+32*(n))
+#define GP0_NS					REG(0x2D24)
+#define MPM_CLK_CTL				REG(0x2DA0)
+#define MPM_RESET				REG(0x2DA4)
+#define RINGOSC_NS_REG				REG(0x2DC0)
+#define RINGOSC_TCXO_CTL_REG			REG(0x2DC4)
+#define RINGOSC_STATUS_REG			REG(0x2DCC)
+#define EBI1_CLK_SRC0_NS			REG(0x2DE0)
+#define EBI1_CLK_SRC1_NS			REG(0x2DE4)
+#define EBI1_CLK_SRC_CTL			REG(0x2DE8)
+#define EBI1_CLK_CTL				REG(0x2DEC)
+#define EBI1_FRQSW_CTL				REG(0x2DF0)
+#define EBI1_FRQSW_STATUS			REG(0x2DF4)
+#define EBI1_FRQSW_REQ_ACK_TIMER		REG(0x2DF8)
+#define EBI1_XO_SRC_CTL				REG(0x2DFC)
+#define SFAB_SMPSS_S_HCLK_CTL			REG(0x2E00)
+#define SCSS_DBG_STATUS_REQ			REG(0x2E60)
+#define SCSS_DBG_STATUS_CORE_PWRDUP		REG(0x2E64)
+#define USB_PHY0_RESET_REG			REG(0x2E20)	// This is not there in the flat file??
+#define PRNG_CLK_NS_REG				REG(0x2E80)
+#define PXO_SRC_CLK_CTL_REG			REG(0x2EA0)
+#define LPASS_XO_SRC_CLK_CTL			REG(0x2EC0)
+#define GLOBAL_BUS_NS				REG(0x2EE0)
+#define PLL11_DIV_SRC_CTL			REG(0x2F20)
+#define SPDM_CY_PORT0_CLK_CTL			REG(0x2F60)
+#define SPDM_CY_PORT1_CLK_CTL			REG(0x2F64)
+#define SPDM_CY_PORT2_CLK_CTL			REG(0x2F68)
+#define SPDM_CY_PORT3_CLK_CTL			REG(0x2F6C)
+#define SPDM_CY_PORT4_CLK_CTL			REG(0x2F70)
+#define SPDM_CY_PORT5_CLK_CTL			REG(0x2F74)
+#define SPDM_CY_PORT6_CLK_CTL			REG(0x2F78)
+#define SPDM_CY_PORT7_CLK_CTL			REG(0x2F7C)
+#define RESET_ALL				REG(0x2F80)
+#define RESET_STATUS				REG(0x2F84)
+#define PLLTEST_PAD_CFG_REG			REG(0x2FA4)
+#define CLKTEST_PAD_CFG				REG(0x2FA8)
+#define JITTER_PROBE				REG(0x2FAC)
+#define JITTER_PROBE_VAL			REG(0x2FB0)
+#define CLK_HALT_AFAB_SFAB_STATEA_REG		REG(0x2FC0)
+#define CLK_HALT_AFAB_SFAB_STATEB_REG		REG(0x2FC4)
+#define CLK_HALT_DFAB_STATE_REG			REG(0x2FC8)
+#define CLK_HALT_CFPB_STATEA_REG		REG(0x2FCC)
+#define CLK_HALT_CFPB_STATEB_REG		REG(0x2FD0)
+#define CLK_HALT_CFPB_STATEC_REG		REG(0x2FD4)
+#define CLK_HALT_SFPB_MISC_STATE_REG		REG(0x2FD8)
+#define CLK_HALT_GSS_KPSS_MISC_STATE		REG(0x2FDC)
+#define CLK_HALT_MSS_SMPSS_MISC_STATE_REG	CLK_HALT_GSS_KPSS_MISC_STATE
+#define RPM_CLK_BRANCH_ENA_VOTE			REG(0x2FE0)
+#define RPM_CLK_SLEEP_ENA_VOTE			REG(0x2FE4)
+#define LPA_Q6_CLK_BRANCH_ENA_VOTE		REG(0x3020)
+#define LPA_Q6_CLK_SLEEP_ENA_VOTE		REG(0x3024)
+#define APCS_CLK_BRANCH_ENA_VOTE		REG(0x3040)
+#define APCS_CLK_SLEEP_ENA_VOTE			REG(0x3044)
+#define SPARE_CLK_BRANCH_ENA_VOTE		REG(0x3060)
+#define SC0_U_CLK_BRANCH_ENA_VOTE_REG		REG(0x3080)
+#define APCS_U_CLK_SLEEP_ENA_VOTE		REG(0x3084)
+#define PLL0_MODE				REG(0x30C0)
+#define PLL0_L_VAL				REG(0x30C4)
+#define PLL0_M_VAL				REG(0x30C8)
+#define PLL0_N_VAL				REG(0x30CC)
+#define PLL0_TEST_CTL				REG(0x30D0)
+#define PLL0_CONFIG				REG(0x30D4)
+#define PLL0_STATUS				REG(0x30D8)
+#define PLL5_MODE				REG(0x30E0)
+#define PLL5_L_VAL				REG(0x30E4)
+#define PLL5_M_VAL				REG(0x30E8)
+#define PLL5_N_VAL				REG(0x30EC)
+#define PLL5_TEST_CTL				REG(0x30F0)
+#define PLL5_CONFIG				REG(0x30F4)
+#define PLL5_STATUS				REG(0x30F8)
+#define PLL8_MODE				REG(0x3140)
+#define PLL8_L_VAL				REG(0x3144)
+#define PLL8_M_VAL				REG(0x3148)
+#define PLL8_N_VAL				REG(0x314C)
+#define PLL8_TEST_CTL				REG(0x3150)
+#define PLL8_CONFIG				REG(0x3154)
+#define BB_PLL8_STATUS_REG			REG(0x3158)
+#define GPLL1_MODE				REG(0x3160)
+#define GPLL1_L_VAL				REG(0x3164)
+#define GPLL1_M_VAL				REG(0x3168)
+#define GPLL1_N_VAL				REG(0x316C)
+#define GPLL1_TEST_CTL				REG(0x3170)
+#define GPLL1_CONFIG				REG(0x3174)
+#define GPLL1_STATUS				REG(0x3178)
+#define EBI1_PLL_MODE				REG(0x3180)
+#define EBI1_PLL_L_VAL				REG(0x3184)
+#define EBI1_PLL_M_VAL				REG(0x3188)
+#define EBI1_PLL_N_VAL				REG(0x318C)
+#define EBI1_PLL_TEST_CTL			REG(0x3190)
+#define EBI1_PLL_CONFIG				REG(0x3194)
+#define EBI1_PLL_STATUS				REG(0x3198)
+#define PLL18_MODE				REG(0x31A0)
+#define PLL18_L_VAL				REG(0x31A4)
+#define PLL18_M_VAL				REG(0x31A8)
+#define PLL18_N_VAL				REG(0x31AC)
+#define PLL18_TEST_CTL				REG(0x31B0)
+#define PLL18_CONFIG				REG(0x31B4)
+#define PLL18_STATUS				REG(0x31B8)
+#define BB_PLL14_MODE_REG			REG(0x31C0)
+#define BB_PLL14_L_VAL_REG			REG(0x31C4)
+#define BB_PLL14_M_VAL_REG			REG(0x31C8)
+#define BB_PLL14_N_VAL_REG			REG(0x31CC)
+#define PLL14_TEST_CTL				REG(0x31D0)
+#define BB_PLL14_CONFIG_REG			REG(0x31D4)
+#define BB_PLL14_STATUS_REG			REG(0x31D8)
+#define SC_PLL0_MODE				REG(0x3200)
+#define SC_PLL0_CONFIG_CTL			REG(0x3204)
+#define SC_PLL0_L_VAL				REG(0x3208)
+#define SC_PLL0_M_VAL				REG(0x320C)
+#define SC_PLL0_N_VAL				REG(0x3210)
+#define SC_PLL0_DROOP_CTL			REG(0x3214)
+#define SC_PLL0_TEST_CTL			REG(0x3218)
+#define SC_PLL0_STATUS				REG(0x321C)
+#define SC_PLL1_MODE				REG(0x3240)
+#define SC_PLL1_CONFIG_CTL			REG(0x3244)
+#define SC_PLL1_L_VAL				REG(0x3248)
+#define SC_PLL1_M_VAL				REG(0x324C)
+#define SC_PLL1_N_VAL				REG(0x3250)
+#define SC_PLL1_DROOP_CTL			REG(0x3254)
+#define SC_PLL1_TEST_CTL			REG(0x3258)
+#define SC_PLL1_STATUS				REG(0x325C)
+#define SC_PLL2_MODE				REG(0x3280)
+#define SC_PLL2_CONFIG_CTL			REG(0x3284)
+#define SC_PLL2_L_VAL				REG(0x3288)
+#define SC_PLL2_M_VAL				REG(0x328C)
+#define SC_PLL2_N_VAL				REG(0x3290)
+#define SC_PLL2_DROOP_CTL			REG(0x3294)
+#define SC_PLL2_TEST_CTL			REG(0x3298)
+#define SC_PLL2_STATUS				REG(0x329C)
+#define SC_PLL3_MODE				REG(0x32C0)
+#define SC_PLL3_CONFIG_CTL			REG(0x32C4)
+#define SC_PLL3_L_VAL				REG(0x32C8)
+#define SC_PLL3_M_VAL				REG(0x32CC)
+#define SC_PLL3_N_VAL				REG(0x32D0)
+#define SC_PLL3_DROOP_CTL			REG(0x32D4)
+#define SC_PLL3_TEST_CTL			REG(0x32D8)
+#define SC_PLL3_STATUS				REG(0x32DC)
+#define SC_L2_PLL_MODE				REG(0x3300)
+#define SC_L2_PLL_CONFIG_CTL			REG(0x3304)
+#define SC_L2_PLL_L_VAL				REG(0x3308)
+#define SC_L2_PLL_M_VAL				REG(0x330C)
+#define SC_L2_PLL_N_VAL				REG(0x3310)
+#define SC_L2_PLL_DROOP_CTL			REG(0x3314)
+#define SC_L2_PLL_TEST_CTL			REG(0x3318)
+#define SC_L2_PLL_STATUS			REG(0x331C)
+#define PLL_LOCK_DET_STATUS			REG(0x3420)
+#define PLL_LOCK_DET_MASK			REG(0x3424)
+#define PLL_ENA_SPARE				REG(0x3460)
+#define PLL_ENA_GSS				REG(0x3480)
+#define PLL_ENA_RPM				REG(0x34A0)
+#define BB_PLL_ENA_SC0_REG			REG(0x34C0)
+#define PLL_ENA_APCS_U				REG(0x34E0)
+#define PLL_ENA_RIVA				REG(0x3500)
+#define PLL_ENA_LPASS_DSP			REG(0x3520)
+#define PLL_ENA_SPS				REG(0x3540)
+#define FABS_RESET				REG(0x3560)
+#define RIVA_RESET				REG(0x35E0)
+#define XPU_RESET				REG(0x3600)
+#define TSENS_CNTL				REG(0x3620)
+#define TSENS_THRESHOLD				REG(0x3624)
+#define TSENS_S0_STATUS				REG(0x3628)
+#define TSENS_S1_STATUS				REG(0x362C)
+#define TSENS_S2_STATUS				REG(0x3630)
+#define TSENS_S3_STATUS				REG(0x3634)
+#define TSENS_S4_STATUS				REG(0x3638)
+#define TSENS_INT_STATUS			REG(0x363C)
+#define TSENS_CONFIG				REG(0x3640)
+#define TSENS_TEST_CNTL				REG(0x3644)
+#define TSENS_STATUS_CNTL			REG(0x3660)
+#define TSENS_S5_STATUS				REG(0x3664)
+#define TSENS_S6_STATUS				REG(0x3668)
+#define TSENS_S7_STATUS				REG(0x366C)
+#define TSENS_S8_STATUS				REG(0x3670)
+#define TSENS_S9_STATUS				REG(0x3674)
+#define TSENS_S10_STATUS			REG(0x3678)
+#define CE5_CORE_CLK_SRC_CTL			REG(0x36C0)
+#define CE5_CORE_CLK_SRC0_NS			REG(0x36C4)
+#define CE5_CLK_SRC_NS_REG			CE5_CORE_CLK_SRC0_NS
+#define CE5_CORE_CLK_SRC1_NS			REG(0x36C8)
+#define CE5_CORE_CLK_CTL_REG			REG(0x36CC)
+#define CE5_CORE_CLK_FS				REG(0x36D0)
+#define CE3_SLEEP_CLK_CTL			REG(0x36D0)
+#define SFAB_AHB_S8_FCLK_CTL			REG(0x36E0)
+#define USB_HS3_HCLK_CTL_REG			REG(0x3700)
+#define USB_HS3_HCLK_FS_REG			REG(0x3704)
+#define USB_HS3_XCVR_FS_CLK_MD_REG		REG(0x3708)
+#define USB_HS3_XCVR_FS_CLK_NS_REG		REG(0x370C)
+#define USB_HS3_RESET_REG			REG(0x3710)
+#define USB_HS4_HCLK_CTL_REG			REG(0x3720)
+#define USB_HS4_HCLK_FS_REG			REG(0x3724)
+#define USB_HS4_XCVR_FS_CLK_MD_REG		REG(0x3728)
+#define USB_HS4_XCVR_FS_CLK_NS_REG		REG(0x372C)
+#define USB_HS4_RESET_REG			REG(0x3730)
+#define GSS_CLK_BRANCH_ENA_VOTE			REG(0x3740)
+#define GSS_CLK_SLEEP_ENA_VOTE			REG(0x3744)
+#define RIVA_CLK_BRANCH_ENA_VOTE		REG(0x37E4)
+#define SPDM_CY_PORT8_CLK_CTL			REG(0x3800)
+#define APCS_WDT0_CPU0_WDOG_EXPIRED_ENABLE	REG(0x3820)
+#define APCS_WDT1_CPU0_WDOG_EXPIRED_ENABLE	REG(0x3824)
+#define APCS_WDT0_CPU1_WDOG_EXPIRED_ENABLE	REG(0x3828)
+#define APCS_WDT1_CPU1_WDOG_EXPIRED_ENABLE	REG(0x382C)
+#define APCS_WDT0_CPU2_WDOG_EXPIRED_ENABLE	REG(0x3830)
+#define APCS_WDT1_CPU2_WDOG_EXPIRED_ENABLE	REG(0x3834)
+#define APCS_WDT0_CPU3_WDOG_EXPIRED_ENABLE	REG(0x3838)
+#define APCS_WDT1_CPU3_WDOG_EXPIRED_ENABLE	REG(0x383C)
+#define RPM_WDOG_EXPIRED_ENABLE			REG(0x3840)
+#define PCIE_ALT_REF_CLK_NS			REG(0x3860)
+#define SPARE0					REG(0x3880)
+#define SPARE1					REG(0x3884)
+#define SPARE2					REG(0x3888)
+#define SPARE3					REG(0x388C)
+#define SPARE4					REG(0x38A0)
+#define SPARE5					REG(0x38A4)
+#define SPARE6					REG(0x38A8)
+#define SPARE7					REG(0x38AC)
+#define PCIE_1_ACLK_CTL_REG			REG(0x3A80)
+#define PCIE_1_ACLK_FS				REG(0x3A84)
+#define PCIE_1_AUX_CLK_CTL			REG(0x3A88)
+#define PCIE_1_HCLK_CTL_REG			REG(0x3A8C)
+#define PCIE_1_PCLK_CTL_REG			REG(0x3A90)
+#define PCIE_1_PCLK_FS				REG(0x3A94)
+#define PCIE_1_SFAB_PORT_RESET			REG(0x3A98)
+#define PCIE_1_RESET				REG(0x3A9C)
+#define PCIE_1_ALT_REF_CLK_NS			REG(0x3AA0)
+#define PCIE_2_ACLK_CTL_REG			REG(0x3AC0)
+#define PCIE_2_ACLK_FS				REG(0x3AC4)
+#define PCIE_2_AUX_CLK_CTL			REG(0x3AC8)
+#define PCIE_2_HCLK_CTL_REG			REG(0x3ACC)
+#define PCIE_2_PCLK_CTL_REG			REG(0x3AD0)
+#define PCIE_2_PCLK_FS				REG(0x3AD4)
+#define PCIE_2_SFAB_PORT_RESET			REG(0x3AD8)
+#define PCIE_2_RESET				REG(0x3ADC)
+#define PCIE_2_ALT_REF_CLK_NS			REG(0x3AE0)
+#define EBI2_CLK_CTL				REG(0x3B00)
+#define EBI2_CLK_FS				REG(0x3B04)
+#define USB30_ACLK_FS				REG(0x3B20)
+#define USB30_RESET				REG(0x3B24)
+#define USB30_SFAB_PORT_RESET			REG(0x3B28)
+#define USB30_SLEEP_CLK_CTL			REG(0x3B2C)
+#define USB30_MOC_UTMI_CLK_MD			REG(0x3B30)
+#define USB30_MOC_UTMI_CLK_NS			REG(0x3B34)
+#define USB30_MASTER_CLK_MD			REG(0x3B38)
+#define USB30_MASTER_CLK_NS			REG(0x3B3C)
+#define USB30_1_ACLK_FS				REG(0x3B40)
+#define USB30_1_RESET				REG(0x3B44)
+#define USB30_1_SFAB_PORT_RESET			REG(0x3B48)
+#define NSS_RESET_SPARE				REG(0x3B60)
+#define NSSFB0_CLK_SRC_CTL			REG(0x3B80)
+#define NSSFB0_CLK_SRC0_NS			REG(0x3B84)
+#define NSSFB0_CLK_SRC1_NS			REG(0x3B88)
+#define NSSFB0_CLK_CTL				REG(0x3BA0)
+#define NSSFAB_GLOBAL_BUS_NS			REG(0x3BC0)
+#define NSSFB1_CLK_SRC_CTL			REG(0x3BE0)
+#define NSSFB1_CLK_SRC0_NS			REG(0x3BE4)
+#define NSSFB1_CLK_SRC1_NS			REG(0x3BE8)
+#define NSSFB1_CLK_CTL				REG(0x3C00)
+#define CLK_HALT_NSSFAB0_NSSFAB1_STATEA		REG(0x3C20)
+#define UBI32_MPT0_CLK_CTL			REG(0x3C40)
+#define UBI32_MPT1_CLK_CTL			REG(0x3C44)
+#define CE5_HCLK_SRC_CTL			REG(0x3C60)
+#define CE5_HCLK_SRC0_NS			REG(0x3C64)
+#define CE5_HCLK_SRC1_NS			REG(0x3C68)
+#define CE5_HCLK_CTL				REG(0x3C6C)
+#define NSSFPB_CLK_CTL				REG(0x3C80)
+#define NSSFPB_CLK_SRC_CTL			REG(0x3C84)
+#define NSSFPB_CLK_SRC0_NS			REG(0x3C88)
+#define NSSFPB_CLK_SRC1_NS			REG(0x3C8C)
+#define GMAC_COREn_CLK_SRC_CTL(n)		REG(0x3CA0+32*(n-1))
+#define GMAC_CORE1_CLK_SRC_CTL			REG(0x3CA0)
+#define GMAC_COREn_CLK_SRC0_MD(n)		REG(0x3CA4+32*(n-1))
+#define GMAC_CORE1_CLK_SRC0_MD			REG(0x3CA4)
+#define GMAC_COREn_CLK_SRC1_MD(n)		REG(0x3CA8+32*(n-1))
+#define GMAC_CORE1_CLK_SRC1_MD			REG(0x3CA8)
+#define GMAC_COREn_CLK_SRC0_NS(n)		REG(0x3CAC+32*(n-1))
+#define GMAC_CORE1_CLK_SRC0_NS			REG(0x3CAC)
+#define GMAC_COREn_CLK_SRC1_NS(n)		REG(0x3CB0+32*(n-1))
+#define GMAC_CORE1_CLK_SRC1_NS			REG(0x3CB0)
+#define GMAC_COREn_CLK_CTL(n)			REG(0x3CB4+32*(n-1))
+#define GMAC_CORE1_CLK_CTL			REG(0x3CB4)
+#define GMAC_COREn_CLK_FS(n)			REG(0x3CB8+32*(n-1))
+#define GMAC_CORE1_CLK_FS			REG(0x3CB8)
+#define GMAC_COREn_RESET(n)			REG(0x3CBC+32*(n-1))
+#define GMAC_CORE1_RESET			REG(0x3CBC)
+#define UBI32_COREn_CLK_SRC_CTL(n)		REG(0x3D20+32*(n-1))
+#define UBI32_CORE1_CLK_SRC_CTL			REG(0x3D20)
+#define UBI32_COREn_CLK_SRC0_MD(n)		REG(0x3D24+32*(n-1))
+#define UBI32_CORE1_CLK_SRC0_MD			REG(0x3D24)
+#define UBI32_COREn_CLK_SRC1_MD(n)		REG(0x3D28+32*(n-1))
+#define UBI32_CORE1_CLK_SRC1_MD			REG(0x3D28)
+#define UBI32_COREn_CLK_SRC0_NS(n)		REG(0x3D2C+32*(n-1))
+#define UBI32_CORE1_CLK_SRC0_NS			REG(0x3D2C)
+#define UBI32_COREn_CLK_SRC1_NS(n)		REG(0x3D30+32*(n-1))
+#define UBI32_CORE1_CLK_SRC1_NS			REG(0x3D30)
+#define UBI32_COREn_CLK_CTL(n)			REG(0x3D34+32*(n-1))
+#define UBI32_CORE1_CLK_CTL			REG(0x3D34)
+#define UBI32_COREn_CLK_FS(n)			REG(0x3D38+32*(n-1))
+#define UBI32_CORE1_CLK_FS			REG(0x3D38)
+#define UBI32_COREn_RESET_CLAMP(n)		REG(0x3D3C+32*(n-1))
+#define UBI32_CORE1_RESET_CLAMP			REG(0x3D3C)
+#define NSS_250MHZ_CLK_SRC_CTL			REG(0x3D60)
+#define NSS_250MHZ_CLK_SRC0_NS			REG(0x3D64)
+#define NSS_250MHZ_CLK_SRC1_NS			REG(0x3D68)
+#define NSS_250MHZ_CLK_SRC0_MD			REG(0x3D6C)
+#define NSS_250MHZ_CLK_SRC1_MD			REG(0x3D70)
+#define NSS_250MHZ_CLK_CTL			REG(0x3D74)
+#define CE5_ACLK_SRC_CTL			REG(0x3D80)
+#define CE5_ACLK_SRC0_NS			REG(0x3D84)
+#define CE5_ACLK_SRC1_NS			REG(0x3D88)
+#define CE5_ACLK_CTL				REG(0x3D8C)
+#define PLL_ENA_NSS				REG(0x3DA0)
+#define NSSTCM_CLK_SRC_CTL			REG(0x3DC0)
+#define NSSTCM_CLK_SRC0_NS			REG(0x3DC4)
+#define NSSTCM_CLK_SRC1_NS			REG(0x3DC8)
+#define NSSTCM_CLK_FS				REG(0x3DCC)
+#define NSSTCM_CLK_CTL				REG(0x3DD0)
+#define CE5_CORE_0_RESET			REG(0x3E00)
+#define CE5_CORE_1_RESET			REG(0x3E04)
+#define CE5_CORE_2_RESET			REG(0x3E08)
+#define CE5_CORE_3_RESET			REG(0x3E0C)
+#define CE5_AHB_RESET				REG(0x3E10)
+#define NSS_RESET				REG(0x3E20)
+#define GMAC_AHB_RESET				REG(0x3E24)
+#define MACSEC_CORE1_RESET			REG(0x3E28)
+#define MACSEC_CORE2_RESET			REG(0x3E2C)
+#define MACSEC_CORE3_RESET			REG(0x3E30)
+#define NSS_TCM_RESET				REG(0x3E40)
+#define MVS_CNTRL				REG(0x3DF0)
+#define CLK_TEST_REG				REG(0x2FA0)
+#define GCC_APCS_CLK_DIAG			REG_GCC(0x001C)
+
+#endif /* __NSS_CLOCKS_H */
diff --git a/nss_hal/ipq806x/nss_hal_pvt.c b/nss_hal/ipq806x/nss_hal_pvt.c
new file mode 100755
index 0000000..5a808d7
--- /dev/null
+++ b/nss_hal/ipq806x/nss_hal_pvt.c
@@ -0,0 +1,159 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/**
+ * nss_hal_pvt.c
+ *	NSS HAL private APIs.
+ */
+
+#include "nss_hal_pvt.h"
+#include "nss_clocks.h"
+
+/*
+ * clk_reg_write_32()
+ *	Write clock register
+ */
+static inline void clk_reg_write_32(void *addr, uint32_t val)
+{
+	writel(val, addr);
+}
+
+/*
+ * __nss_hal_common_reset
+ *	Do reset/clock configuration common to all cores
+ *
+ * WARNING: This function is a place holder. It will be updated soon.
+ */
+void __nss_hal_common_reset(void)
+{
+	/*
+	 * Enable NSS Fabric1 clock
+	 * PLL0 (800 MHZ) and div is set to 4. (effective clk fre is 200 MHZ).
+	 */
+	clk_reg_write_32(NSSFB1_CLK_SRC0_NS, 0x1a);
+	clk_reg_write_32(NSSFB1_CLK_SRC1_NS, 0x1a);
+
+	/*
+	 * NSS Fabric1 Branch enable and fabric clock gating enabled.
+	 */
+	clk_reg_write_32(NSSFB1_CLK_CTL, 0x50);
+
+	/*
+	 * Enable NSS Fabric0 clock
+	 * PLL0 (800 MHZ) and div is set to 2. (effective clk fre is 400 MHZ).
+	 */
+	clk_reg_write_32(NSSFB0_CLK_SRC0_NS, 0x0a);
+	clk_reg_write_32(NSSFB0_CLK_SRC1_NS, 0x0a);
+
+	/*
+	 * NSS Fabric0 Branch enable and fabric clock gating enabled.
+	 */
+	clk_reg_write_32(NSSFB0_CLK_CTL, 0x50);
+
+	/*
+	 * Enable NSS TCM clock
+	 * Enable TCM clock root source.
+	 */
+	clk_reg_write_32(NSSTCM_CLK_SRC_CTL, 0x2);
+
+	/*
+	 * PLL0 (800 MHZ) and div is set to 2. (effective clk fre is 400 MHZ).
+	 */
+	clk_reg_write_32(NSSTCM_CLK_SRC0_NS, 0xa);
+	clk_reg_write_32(NSSTCM_CLK_SRC1_NS, 0xa);
+
+	/*
+	 * NSS TCM Branch enable and fabric clock gating enabled.
+	 */
+	clk_reg_write_32(NSSTCM_CLK_CTL, 0x50);
+
+	/*
+	 * Enable global NSS clock branches.
+	 * NSS global Fab Branch enable and fabric clock gating enabled.
+	 */
+	clk_reg_write_32(NSSFAB_GLOBAL_BUS_NS, 0xf);
+
+	/*
+	 * clock source is pll0_out_main (800 MHZ). SRC_SEL is 2 (3'b010)
+	 * src_div selected is Div-6 (4'b0101).
+	 */
+	clk_reg_write_32(NSSFPB_CLK_SRC0_NS, 0x2a);
+	clk_reg_write_32(NSSFPB_CLK_SRC1_NS, 0x2a);
+
+	/*
+	 * NSS FPB block granch & clock gating enabled.
+	 */
+	clk_reg_write_32(NSSFPB_CLK_CTL, 0x50);
+
+	/*
+	 * Send reset interrupt to NSS
+	 */
+	clk_reg_write_32(NSS_RESET, 0x0);
+}
+
+/*
+ * __nss_hal_core_reset
+ *
+ * WARNING: This function is a place holder. It will be updated soon.
+ */
+void __nss_hal_core_reset(uint32_t core_id, uint32_t map, uint32_t addr)
+{
+	/*
+	 * UBI coren clock branch enable.
+	 */
+	clk_reg_write_32(UBI32_COREn_CLK_SRC_CTL(core_id), 0x02);
+
+	/*
+	 * M val is 0x01 and NOT_2D value is 0xfd.
+	 */
+	clk_reg_write_32(UBI32_COREn_CLK_SRC0_MD(core_id), 0x100fd);
+	clk_reg_write_32(UBI32_COREn_CLK_SRC1_MD(core_id), 0x100fd);
+
+	/*
+	 * Dual edge, pll0, NOT(N_M) = 0xfe.
+	 */
+	clk_reg_write_32(UBI32_COREn_CLK_SRC0_NS(core_id), 0x00fe0142);
+	clk_reg_write_32(UBI32_COREn_CLK_SRC1_NS(core_id), 0x00fe0142);
+
+	/*
+	 * UBI32 coren clock control branch.
+	 */
+	clk_reg_write_32(UBI32_COREn_CLK_FS(core_id), 0x4f);
+
+	/*
+	 * UBI32 coren clock control branch.
+	 */
+	clk_reg_write_32(UBI32_COREn_CLK_CTL(core_id), 0x10);
+
+	/*
+	 * Enable mpt clock
+	 */
+	clk_reg_write_32(UBI32_MPT0_CLK_CTL, 0x10);
+
+	/*
+	 * Remove ubi32 clamp
+	 */
+	clk_reg_write_32(UBI32_COREn_RESET_CLAMP(core_id), 0x0);
+
+	/*
+	* Apply ubi32 core reset
+	*/
+	nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 1);
+
+	/*
+	 * Program address configuration
+	 */
+	nss_write_32(map, NSS_REGS_CORE_AMC_OFFSET, 1);
+	nss_write_32(map, NSS_REGS_CORE_BAR_OFFSET, 0x3c000000);
+	nss_write_32(map, NSS_REGS_CORE_BOOT_ADDR_OFFSET, addr);
+
+	/*
+	 * Crypto, GMAC and C2C interrupts are level sensitive
+	 */
+	nss_write_32(map, NSS_REGS_CORE_INT_STAT2_TYPE_OFFSET, 0xFFFF);
+	nss_write_32(map, NSS_REGS_CORE_INT_STAT3_TYPE_OFFSET, 0x3FC000);
+
+	/*
+	 * De-assert ubi32 core reset
+	 */
+	nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0);
+}
diff --git a/nss_hal/ipq806x/nss_hal_pvt.h b/nss_hal/ipq806x/nss_hal_pvt.h
new file mode 100755
index 0000000..eb573be
--- /dev/null
+++ b/nss_hal/ipq806x/nss_hal_pvt.h
@@ -0,0 +1,62 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/**
+ * nss_hal_pvt.h
+ *	NSS HAL private declarations.for IPQ806x platform
+ */
+
+#ifndef __NSS_HAL_PVT_H
+#define __NSS_HAL_PVT_H
+
+#include "nss_regs.h"
+#include <linux/types.h>
+
+#define NSS_HAL_SUPPORTED_INTERRUPTS (NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE | \
+					NSS_REGS_N2H_INTR_STATUS_DATA_COMMAND_QUEUE | \
+					NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS)
+
+/*
+ * __nss_hal_read_interrupt_cause()
+ */
+static inline void __nss_hal_read_interrupt_cause(uint32_t map, uint32_t irq __attribute__ ((unused)), uint32_t shift_factor, uint32_t *cause)
+{
+	uint32_t value = nss_read_32(map, NSS_REGS_N2H_INTR_STATUS_OFFSET);
+	*cause = (((value)>> shift_factor) & 0x7FFF);
+}
+
+/*
+ * __nss_hal_clear_interrupt_cause()
+ */
+static inline void __nss_hal_clear_interrupt_cause(uint32_t map, uint32_t irq __attribute__ ((unused)), uint32_t shift_factor, uint32_t cause)
+{
+	nss_write_32(map, NSS_REGS_N2H_INTR_CLR_OFFSET, (cause << shift_factor));
+}
+
+/*
+ * __nss_hal_disable_interrupt()
+ */
+static inline void __nss_hal_disable_interrupt(uint32_t map, uint32_t irq __attribute__ ((unused)), uint32_t shift_factor, uint32_t cause)
+{
+	nss_write_32(map, NSS_REGS_N2H_INTR_MASK_CLR_OFFSET, (cause << shift_factor));
+}
+
+/*
+ * __nss_hal_enable_interrupt()
+ */
+static inline void __nss_hal_enable_interrupt(uint32_t map, uint32_t irq __attribute__ ((unused)), uint32_t shift_factor, uint32_t cause)
+{
+	nss_write_32(map, NSS_REGS_N2H_INTR_MASK_SET_OFFSET, (cause << shift_factor));
+}
+
+/*
+ * __nss_hal_send_interrupt()
+ */
+static inline void __nss_hal_send_interrupt(uint32_t map, uint32_t irq __attribute__ ((unused)), uint32_t cause)
+{
+	nss_write_32(map, NSS_REGS_C2C_INTR_SET_OFFSET, cause);
+}
+
+extern void __nss_hal_core_reset(uint32_t core_id, uint32_t map, uint32_t addr);
+extern void __nss_hal_common_reset(void);
+
+#endif /* __NSS_HAL_PVT_H */
diff --git a/nss_hal/ipq806x/nss_regs.h b/nss_hal/ipq806x/nss_regs.h
new file mode 100755
index 0000000..a8a8c3b
--- /dev/null
+++ b/nss_hal/ipq806x/nss_regs.h
@@ -0,0 +1,71 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/**
+ * nss_regs.h
+ *	NSS register definitions.
+ */
+
+#ifndef __NSS_REGS_H
+#define __NSS_REGS_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/*
+ * Hardware register offsets
+ */
+#define NSS_REGS_CORE_ID_OFFSET			0x0000
+#define NSS_REGS_RESET_CTRL_OFFSET		0x0004
+#define NSS_REGS_CORE_BAR_OFFSET		0x0008
+#define NSS_REGS_CORE_AMC_OFFSET		0x000c
+#define NSS_REGS_CORE_BOOT_ADDR_OFFSET		0x0010
+#define NSS_REGS_C2C_INTR_STATUS_OFFSET		0x0014
+#define NSS_REGS_C2C_INTR_SET_OFFSET		0x0018
+#define NSS_REGS_C2C_INTR_CLR_OFFSET		0x001c
+#define NSS_REGS_N2H_INTR_STATUS_OFFSET		0x0020
+#define NSS_REGS_N2H_INTR_SET_OFFSET		0x0024
+#define NSS_REGS_N2H_INTR_CLR_OFFSET		0x0028
+#define NSS_REGS_N2H_INTR_MASK_OFFSET		0x002c
+#define NSS_REGS_N2H_INTR_MASK_SET_OFFSET	0x0030
+#define NSS_REGS_N2H_INTR_MASK_CLR_OFFSET	0x0034
+#define NSS_REGS_CORE_INT_STAT0_TYPE_OFFSET	0x0038
+#define NSS_REGS_CORE_INT_STAT1_TYPE_OFFSET	0x003c
+#define NSS_REGS_CORE_INT_STAT2_TYPE_OFFSET	0x0040
+#define NSS_REGS_CORE_INT_STAT3_TYPE_OFFSET	0x0044
+#define NSS_REGS_CORE_IFETCH_RANGE_OFFSET	0x0048
+
+/*
+ * Defines for N2H interrupts
+ */
+#define NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFER_QUEUE	0x0001
+#define NSS_REGS_N2H_INTR_STATUS_DATA_COMMAND_QUEUE	0x0002
+#define NSS_REGS_N2H_INTR_STATUS_EMPTY_BUFFERS_SOS	0x0400
+#define NSS_REGS_N2H_INTR_STATUS_TX_UNBLOCKED		0x0800
+
+/*
+ * Defines for H2N interrupts
+ */
+#define NSS_REGS_H2N_INTR_STATUS_EMPTY_BUFFER_QUEUE	0x0001
+#define NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE	0x0002
+#define NSS_REGS_H2N_INTR_STATUS_RESET			0x0400	/** Unused */
+#define NSS_REGS_H2N_INTR_STATUS_TX_UNBLOCKED		0x0800
+
+/*
+ * nss_read_32()
+ *	Read NSS register
+ */
+static inline uint32_t nss_read_32(uint32_t addr, uint32_t offs)
+{
+	return readl((addr + offs));
+}
+
+/*
+ * nss_write_32()
+ *	Write NSS register
+ */
+static inline void nss_write_32(uint32_t addr, uint32_t offs, uint32_t val)
+{
+	writel(val, (addr + offs));
+}
+
+#endif /* __NSS_REGS_H */
diff --git a/nss_hlos_if.h b/nss_hlos_if.h
new file mode 100644
index 0000000..989185f
--- /dev/null
+++ b/nss_hlos_if.h
@@ -0,0 +1,941 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/*
+ * nss_hlos_if.h
+ *	NSS to HLOS interface definitions.
+ */
+
+#ifndef __NSS_HLOS_IF_H
+#define __NSS_HLOS_IF_H
+
+/*
+ * LRO modes
+ */
+enum nss_lro_modes {
+	NSS_LRO_MODE_DISABLED, /* Indicates that LRO is not enabled on either direction of this connection */
+	NSS_LRO_MODE_ORIG, /* Indicates that LRO is enabled on original direction of this connection */
+	NSS_LRO_MODE_REPLY /* Indicates that LRO is enabled on reply direction of this connection */
+};
+
+/*
+ * NA IPv4 rule creation flags.
+ */
+#define NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK 0x01
+					/* Do not perform sequence number checks */
+
+/*
+ * The NSS IPv4 rule creation structure.
+ */
+struct nss_ipv4_rule_create {
+	uint8_t protocol;			/* Protocol number */
+	int32_t flow_interface_num;		/* Flow interface number */
+	uint32_t flow_ip;			/* Flow IP address */
+	uint32_t flow_ip_xlate;			/* Translated flow IP address */
+	uint32_t flow_ident;			/* Flow ident (e.g. port) */
+	uint32_t flow_ident_xlate;		/* Translated flow ident (e.g. port) */
+	uint16_t flow_mac[3];			/* Flow MAC address */
+	uint8_t flow_window_scale;		/* Flow direction's window scaling factor */
+	uint32_t flow_max_window;		/* Flow direction's largest seen window */
+	uint32_t flow_end;			/* Flow direction's largest seen sequence + segment length */
+	uint32_t flow_max_end;			/* Flow direction's largest seen ack + max(1, win) */
+	uint32_t flow_mtu;			/* Flow interface`s MTU */
+	uint16_t flow_pppoe_session_id;		/* PPPoE session ID. */
+	uint16_t flow_pppoe_remote_mac[3];	/* PPPoE Server MAC address */
+	int32_t return_interface_num;		/* Return interface number */
+	uint32_t return_ip;			/* Return IP address */
+	uint32_t return_ip_xlate;		/* Translated return IP address */
+	uint32_t return_ident;			/* Return ident (e.g. port) */
+	uint32_t return_ident_xlate;		/* Translated return ident (e.g. port) */
+	uint16_t return_mac[3];			/* Return MAC address */
+	uint8_t return_window_scale;		/* Return direction's window scaling factor */
+	uint32_t return_max_window;		/* Return direction's largest seen window */
+	uint32_t return_end;			/* Return direction's largest seen sequence + segment length */
+	uint32_t return_max_end;		/* Return direction's largest seen ack + max(1, win) */
+	uint32_t return_mtu;			/* Return interface`s MTU */
+	uint16_t return_pppoe_session_id;	/* PPPoE session ID. */
+	uint16_t return_pppoe_remote_mac[3];	/* PPPoE Server MAC address */
+	uint8_t flags;				/* Bit flags associated with the rule */
+	enum nss_lro_modes lro_mode;	/* LRO mode for this connection */
+};
+
+/*
+ * The NA IPv4 rule destruction structure.
+ */
+struct nss_ipv4_rule_destroy {
+	uint8_t protocol;		/* Protocol number */
+	uint32_t flow_ip;		/* Flow IP address */
+	uint32_t flow_ident;		/* Flow ident (e.g. port) */
+	uint32_t return_ip;		/* Return IP address */
+	uint32_t return_ident;		/* Return ident (e.g. port) */
+};
+
+/*
+ * NA IPv6 rule creation flags.
+ */
+#define NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK 0x01
+					/* Do not perform sequence number checks */
+
+/*
+ * The NA IPv6 rule creation structure.
+ */
+struct nss_ipv6_rule_create {
+	uint8_t protocol;			/* Protocol number */
+	int32_t flow_interface_num;		/* Flow interface number */
+	uint32_t flow_ip[4];			/* Flow IP address */
+	uint32_t flow_ident;			/* Flow ident (e.g. port) */
+	uint16_t flow_mac[3];			/* Flow MAC address */
+	uint8_t flow_window_scale;		/* Flow direction's window scaling factor */
+	uint32_t flow_max_window;		/* Flow direction's largest seen window */
+	uint32_t flow_end;			/* Flow direction's largest seen sequence + segment length */
+	uint32_t flow_max_end;			/* Flow direction's largest seen ack + max(1, win) */
+	uint32_t flow_mtu;			/* Flow interface`s MTU */
+	uint16_t flow_pppoe_session_id;		/* PPPoE session ID. */
+	uint16_t flow_pppoe_remote_mac[3];	/* PPPoE Server MAC address */
+	int32_t return_interface_num;		/* Return interface number */
+	uint32_t return_ip[4];			/* Return IP address */
+	uint32_t return_ident;			/* Return ident (e.g. port) */
+	uint16_t return_mac[3];			/* Return MAC address */
+	uint8_t return_window_scale;		/* Return direction's window scaling factor */
+	uint32_t return_max_window;		/* Return direction's largest seen window */
+	uint32_t return_end;			/* Return direction's largest seen sequence + segment length */
+	uint32_t return_max_end;		/* Return direction's largest seen ack + max(1, win) */
+	uint32_t return_mtu;			/* Return interface`s MTU */
+	uint16_t return_pppoe_session_id;	/* PPPoE session ID. */
+	uint16_t return_pppoe_remote_mac[3];	/* PPPoE Server MAC address */
+	uint8_t flags;				/* Bit flags associated with the rule */};
+
+/*
+ * The NSS IPv6 rule destruction structure.
+ */
+struct nss_ipv6_rule_destroy {
+	uint8_t protocol;		/* Protocol number */
+	uint32_t flow_ip[4];		/* Flow IP address */
+	uint32_t flow_ident;		/* Flow ident (e.g. port) */
+	uint32_t return_ip[4];		/* Return IP address */
+	uint32_t return_ident;		/* Return ident (e.g. port) */
+};
+
+/*
+ * L2 switch entry creation structure.
+ */
+struct nss_l2switch_rule_create {
+	int32_t interface_num;		/* Interface number */
+	uint16_t addr[3];		/* Destination MAC address */
+	uint8_t state;			/* State of interfece */
+	uint8_t priority;		/* Priority of interface */
+};
+
+/*
+ * L2 switch entry destruction structure.
+ */
+struct nss_l2switch_rule_destroy {
+	int32_t interface_num;		/* Interface number */
+	uint16_t mac_addr[3];		/* Destination MAC address */
+};
+
+/*
+ * The NA MAC address structure.
+ */
+struct nss_mac_address_set {
+	int32_t interface_num;		/* physical interface number */
+	uint8_t mac_addr[ETH_ALEN];	/* MAC address */
+};
+
+/*
+ * The NA virtual interface creation structure.
+ */
+struct nss_virtual_interface_create {
+	int32_t interface_num;		/* Virtual interface number */
+	uint32_t flags;			/* Interface flags */
+	uint8_t mac_addr[ETH_ALEN];	/* MAC address */
+};
+
+/*
+ * The NA virtual interface destruction structure.
+ */
+struct nss_virtual_interface_destroy {
+	int32_t interface_num;		/* Virtual interface number */
+};
+
+/*
+ * The NA PPPoE rule destruction structure.
+ */
+struct nss_pppoe_rule_destroy {
+	uint16_t pppoe_session_id;	/* PPPoE session ID */
+	uint16_t pppoe_remote_mac[3];	/* PPPoE server MAC address */
+};
+
+/*
+ * Link state notification to NSS
+ */
+struct nss_if_link_state_notify {
+	uint32_t state;			/* Link State (UP/DOWN), speed/duplex settings */
+	uint32_t interface_num;		/* Interface for which link state will be sent */
+};
+
+/*
+ * Interface open command
+ */
+struct nss_if_open {
+	uint32_t tx_desc_ring;		/* Tx descriptor ring address */
+	uint32_t rx_desc_ring;		/* Rx descriptor ring address */
+	uint32_t interface_num;		/* Interface to open */
+};
+
+/*
+ * Interface close command
+ */
+struct nss_if_close {
+	uint32_t interface_num;		/* Interface to close */
+};
+
+/*
+ * Crypto open command
+ */
+struct nss_crypto_open {
+	uint32_t len;			/* Valid information length */
+	uint8_t buf[1];			/* Buffer */
+};
+
+/*
+ * Crypto open command
+ */
+struct nss_crypto_close {
+	uint32_t eng;			/* Engine number */
+};
+
+/*
+ * The MSS (Maximum Segment Size) structure.
+ */
+struct nss_mss_set {
+	uint16_t mss;			/* MSS value */
+	int32_t interface_num;		/* Interface for which MSS will be set */
+};
+
+/*
+ * NSS Tx Map
+ */
+struct nss_c2c_tx_map {
+	uint32_t c2c_start;		/* Peer core C2C Rx queue start address */
+	uint32_t c2c_int_addr;		/* Peer core C2C interrupt register address */
+};
+
+/*
+ * IPsec Tx rule create
+ */
+struct nss_ipsec_tx_rule_create {
+	uint32_t spi;			/* SPI index */
+	uint32_t replay;		/* Replay value */
+	uint32_t src_addr;		/* Src IP address */
+	uint32_t dest_addr;		/* Dst IP address */
+	uint32_t ses_idx;		/* Session index */
+};
+
+/*
+ * IPsec Tx rule destroy
+ */
+struct nss_ipsec_tx_rule_destroy {
+	uint32_t src_addr;		/* Src IP address */
+	uint32_t dest_addr;		/* Dst IP address */
+	uint32_t ses_idx;		/* Session index */
+};
+
+/*
+ * IPsec Rx rule create
+ */
+struct nss_ipsec_rx_rule_create {
+	uint32_t spi;			/* SPI index */
+	uint32_t replay;		/* Replay value */
+	uint32_t src_addr;		/* Src IP address */
+	uint32_t dest_addr;		/* Dst IP address */
+	uint32_t ses_idx;		/* Session index */
+};
+
+/*
+ * IPsec Rx rule destroy
+ */
+struct nss_ipsec_rx_rule_destroy {
+	uint32_t src_addr;		/* Src IP address */
+	uint32_t dest_addr;		/* Dst IP address */
+	uint32_t ses_idx;		/* Session index */
+};
+
+/*
+ * Profiler Tx command
+ */
+struct nss_profiler_tx {
+	uint32_t len;		/* Valid information length */
+	uint8_t buf[1];		/* Buffer */
+};
+
+/*
+ * Types of TX metadata.
+ */
+enum nss_tx_metadata_types {
+	NSS_TX_METADATA_TYPE_IPV4_RULE_CREATE,
+	NSS_TX_METADATA_TYPE_IPV4_RULE_DESTROY,
+	NSS_TX_METADATA_TYPE_IPV6_RULE_CREATE,
+	NSS_TX_METADATA_TYPE_IPV6_RULE_DESTROY,
+	NSS_TX_METADATA_TYPE_L2SWITCH_RULE_CREATE,
+	NSS_TX_METADATA_TYPE_L2SWITCH_RULE_DESTROY,
+	NSS_TX_METADATA_TYPE_MAC_ADDR_SET,
+	NSS_TX_METADATA_TYPE_VIRTUAL_INTERFACE_CREATE,
+	NSS_TX_METADATA_TYPE_VIRTUAL_INTERFACE_DESTROY,
+	NSS_TX_METADATA_TYPE_DESTROY_ALL_L3_RULES,
+	NSS_TX_METADATA_TYPE_DESTROY_ALL_L2SWITCH_RULES,
+	NSS_TX_METADATA_TYPE_DESTROY_ALL_RULES_BY_PPPOE_SESSION,
+	NSS_TX_METADATA_TYPE_INTERFACE_OPEN,
+	NSS_TX_METADATA_TYPE_INTERFACE_CLOSE,
+	NSS_TX_METADATA_TYPE_INTERFACE_LINK_STATE_NOTIFY,
+	NSS_TX_METADATA_TYPE_CRYPTO_OPEN,
+	NSS_TX_METADATA_TYPE_CRYPTO_CLOSE,
+	NSS_TX_METADATA_TYPE_MSS_SET,
+	NSS_TX_METADATA_TYPE_C2C_TX_MAP,
+	NSS_TX_METADATA_TYPE_IPSEC_TX_RULE_CREATE,
+	NSS_TX_METADATA_TYPE_IPSEC_TX_RULE_DESTROY,
+	NSS_TX_METADATA_TYPE_IPSEC_RX_RULE_CREATE,
+	NSS_TX_METADATA_TYPE_IPSEC_RX_RULE_DESTROY,
+	NSS_TX_METADATA_TYPE_PROFILER_TX,
+};
+
+/*
+ * Structure that describes all TX metadata objects.
+ */
+struct nss_tx_metadata_object {
+	enum nss_tx_metadata_types type;	/* Object type */
+	union {				/* Sub-message type */
+		struct nss_ipv4_rule_create ipv4_rule_create;
+		struct nss_ipv4_rule_destroy ipv4_rule_destroy;
+		struct nss_ipv6_rule_create ipv6_rule_create;
+		struct nss_ipv6_rule_destroy ipv6_rule_destroy;
+		struct nss_l2switch_rule_create l2switch_rule_create;
+		struct nss_l2switch_rule_destroy l2switch_rule_destroy;
+		struct nss_mac_address_set mac_address_set;
+		struct nss_virtual_interface_create virtual_interface_create;
+		struct nss_virtual_interface_destroy virtual_interface_destroy;
+		struct nss_pppoe_rule_destroy pppoe_rule_destroy;
+		struct nss_if_open if_open;
+		struct nss_if_close if_close;
+		struct nss_if_link_state_notify if_link_state_notify;
+		struct nss_crypto_open crypto_open;
+		struct nss_crypto_close crypto_close;
+		struct nss_mss_set mss_set;
+		struct nss_c2c_tx_map c2c_tx_map;
+		struct nss_ipsec_tx_rule_create ipsec_tx_rule_create;
+		struct nss_ipsec_tx_rule_destroy ipsec_tx_rule_destroy;
+		struct nss_ipsec_rx_rule_create ipsec_rx_rule_create;
+		struct nss_ipsec_rx_rule_destroy ipsec_rx_rule_destroy;
+		struct nss_profiler_tx profiler_tx;
+	} sub;
+};
+
+struct nss_port_info {
+	uint8_t num_phys_ports;
+};
+
+/*
+ * The NSS IPv4 rule establish structure.
+ */
+struct nss_ipv4_rule_establish {
+	uint32_t index;				/* Slot ID for cache stats to host OS */
+	uint8_t protocol;			/* Protocol number */
+	int32_t flow_interface;			/* Flow interface number */
+	uint32_t flow_mtu;			/* MTU for flow interface */
+	uint32_t flow_ip;			/* Flow IP address */
+	uint32_t flow_ip_xlate;			/* Translated flow IP address */
+	uint32_t flow_ident;			/* Flow ident (e.g. port) */
+	uint32_t flow_ident_xlate;		/* Translated flow ident (e.g. port) */
+	uint16_t flow_pppoe_session_id;		/* Flow direction`s PPPoE session ID. */
+	uint16_t flow_pppoe_remote_mac[3];	/* Flow direction`s PPPoE Server MAC address */
+	int32_t return_interface;		/* Return interface number */
+	uint32_t return_mtu;			/* MTU for return interface */
+	uint32_t return_ip;			/* Return IP address */
+	uint32_t return_ip_xlate;		/* Translated return IP address */
+	uint32_t return_ident;			/* Return ident (e.g. port) */
+	uint32_t return_ident_xlate;		/* Translated return ident (e.g. port) */
+	uint16_t return_pppoe_session_id;	/* Return direction's PPPoE session ID. */
+	uint16_t return_pppoe_remote_mac[3];	/* Return direction's PPPoE Server MAC address */};
+
+/*
+ * IPv4 rule sync reasons.
+ */
+#define NSS_IPV4_RULE_SYNC_REASON_STATS 0
+					/* Sync is to synchronize stats */
+#define NSS_IPV4_RULE_SYNC_REASON_FLUSH 1
+					/* Sync is to flush a cache entry */
+#define NSS_IPV4_RULE_SYNC_REASON_EVICT 2
+					/* Sync is to evict a cache entry */
+#define NSS_IPV4_RULE_SYNC_REASON_DESTROY 3
+					/* Sync is to destroy a cache entry (requested by host OS) */
+#define NSS_IPV4_RULE_SYNC_REASON_PPPOE_DESTROY 4
+					/* Sync is to destroy a cache entry which belongs to a particular PPPoE session */
+
+/*
+ * The NA IPv4 rule sync structure.
+ */
+struct nss_ipv4_rule_sync {
+	uint32_t index;			/* Slot ID for cache stats to host OS */
+	uint32_t flow_max_window;	/* Flow direction's largest seen window */
+	uint32_t flow_end;		/* Flow direction's largest seen sequence + segment length */
+	uint32_t flow_max_end;		/* Flow direction's largest seen ack + max(1, win) */
+	uint32_t flow_rx_packet_count;	/* Flow interface's RX packet count */
+	uint32_t flow_rx_byte_count;	/* Flow interface's RX byte count */
+	uint32_t flow_tx_packet_count;	/* Flow interface's TX packet count */
+	uint32_t flow_tx_byte_count;	/* Flow interface's TX byte count */
+	uint32_t return_max_window;	/* Return direction's largest seen window */
+	uint32_t return_end;		/* Return direction's largest seen sequence + segment length */
+	uint32_t return_max_end;	/* Return direction's largest seen ack + max(1, win) */
+	uint32_t return_rx_packet_count;
+					/* Return interface's RX packet count */
+	uint32_t return_rx_byte_count;	/* Return interface's RX byte count */
+	uint32_t return_tx_packet_count;
+					/* Return interface's TX packet count */
+	uint32_t return_tx_byte_count;	/* Return interface's TX byte count */
+	uint32_t inc_ticks;		/* Number of ticks since the last sync */
+	uint32_t reason;		/* Reason for the sync */};
+
+/*
+ * The NSS IPv6 rule establish structure.
+ */
+struct nss_ipv6_rule_establish {
+	uint32_t index;				/* Slot ID for cache stats to host OS */
+	uint8_t protocol;			/* Protocol number */
+	int32_t flow_interface;			/* Flow interface number */
+	uint32_t flow_mtu;			/* MTU for flow interface */
+	uint32_t flow_ip[4];			/* Flow IP address */
+	uint32_t flow_ident;			/* Flow ident (e.g. port) */
+	uint16_t flow_pppoe_session_id;		/* Flow direction`s PPPoE session ID. */
+	uint16_t flow_pppoe_remote_mac[3];	/* Flow direction`s PPPoE Server MAC address */
+	int32_t return_interface;		/* Return interface number */
+	uint32_t return_mtu;			/* MTU for return interface */
+	uint32_t return_ip[4];			/* Return IP address */
+	uint32_t return_ident;			/* Return ident (e.g. port) */
+	uint16_t return_pppoe_session_id;	/* Return direction's PPPoE session ID. */
+	uint16_t return_pppoe_remote_mac[3];	/* Return direction's PPPoE Server MAC address */
+};
+
+/*
+ * IPv6 rule sync reasons.
+ */
+#define NSS_IPV6_RULE_SYNC_REASON_STATS 0
+					/* Sync is to synchronize stats */
+#define NSS_IPV6_RULE_SYNC_REASON_FLUSH 1
+					/* Sync is to flush a cache entry */
+#define NSS_IPV6_RULE_SYNC_REASON_EVICT 2
+					/* Sync is to evict a cache entry */
+#define NSS_IPV6_RULE_SYNC_REASON_DESTROY 3
+					/* Sync is to destroy a cache entry (requested by host OS) */
+#define NSS_IPV6_RULE_SYNC_REASON_PPPOE_DESTROY 4
+					/* Sync is to destroy a cache entry which belongs to a particular PPPoE session */
+
+/*
+ * The NSS IPv6 rule sync structure.
+ */
+struct nss_ipv6_rule_sync {
+	uint32_t index;			/* Slot ID for cache stats to host OS */
+	uint32_t flow_max_window;	/* Flow direction's largest seen window */
+	uint32_t flow_end;		/* Flow direction's largest seen sequence + segment length */
+	uint32_t flow_max_end;		/* Flow direction's largest seen ack + max(1, win) */
+	uint32_t flow_rx_packet_count;	/* Flow interface's RX packet count */
+	uint32_t flow_rx_byte_count;	/* Flow interface's RX byte count */
+	uint32_t flow_tx_packet_count;	/* Flow interface's TX packet count */
+	uint32_t flow_tx_byte_count;	/* Flow interface's TX byte count */
+	uint32_t return_max_window;	/* Return direction's largest seen window */
+	uint32_t return_end;		/* Return direction's largest seen sequence + segment length */
+	uint32_t return_max_end;	/* Return direction's largest seen ack + max(1, win) */
+	uint32_t return_rx_packet_count;
+					/* Return interface's RX packet count */
+	uint32_t return_rx_byte_count;	/* Return interface's RX byte count */
+	uint32_t return_tx_packet_count;
+					/* Return interface's TX packet count */
+	uint32_t return_tx_byte_count;	/* Return interface's TX byte count */
+	uint32_t inc_ticks;		/* Number of ticks since the last sync */
+	uint32_t reason;		/* Reason for the sync */
+};
+
+/*
+ * The NSS L2 switch rule establish structure.
+ */
+struct nss_l2switch_rule_establish {
+	uint32_t index;			/* Slot ID for cache stats to host OS */
+	int32_t interface_num;          /* Interface number */
+	uint16_t mac_addr[3];		/* MAC Adress */
+};
+
+/*
+ * Rule sync reasons.
+ */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_STATS 0
+					/*  Sync is to synchronize stats */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_FLUSH 1
+					/*  Sync is to flush a cache entry */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_EVICT 2
+					/*  Sync is to evict a cache entry */
+#define NSS_L2SWITCH_RULE_SYNC_REASON_DESTROY 3
+					/*  Sync is to destroy a cache entry (requested by host OS) */
+
+/*
+ * The NSS L2 switch rule sync structure.
+ */
+struct nss_l2switch_rule_sync {
+	uint32_t index;			/* Slot ID for cache stats to host OS */
+	uint32_t rx_packet_count;	/* Number of packets RX'd */
+	uint32_t rx_byte_count;		/* Number of bytes RX'd */
+	uint32_t inc_ticks;		/* Number of ticks since the last sync */
+	uint32_t reason;		/* Reason for the sync */
+};
+
+/*
+ * The NA per-GMAC statistics sync structure.
+ */
+struct nss_gmac_stats_sync {
+	int32_t interface;		/* Interface number */
+	uint32_t rx_bytes;		/* Number of RX bytes */
+	uint32_t rx_packets;		/* Number of RX packets */
+	uint32_t rx_errors;		/* Number of RX errors */
+	uint32_t rx_receive_errors;	/* Number of RX receive errors */
+	uint32_t rx_overflow_errors;	/* Number of RX overflow errors */
+	uint32_t rx_descriptor_errors;	/* Number of RX descriptor errors */
+	uint32_t rx_watchdog_timeout_errors;
+					/* Number of RX watchdog timeout errors */
+	uint32_t rx_crc_errors;		/* Number of RX CRC errors */
+	uint32_t rx_late_collision_errors;
+					/* Number of RX late collision errors */
+	uint32_t rx_dribble_bit_errors;	/* Number of RX dribble bit errors */
+	uint32_t rx_length_errors;	/* Number of RX length errors */
+	uint32_t rx_ip_header_errors;	/* Number of RX IP header errors */
+	uint32_t rx_ip_payload_errors;	/* Number of RX IP payload errors */
+	uint32_t rx_no_buffer_errors;	/* Number of RX no-buffer errors */
+	uint32_t rx_transport_csum_bypassed;
+					/* Number of RX packets where the transport checksum was bypassed */
+	uint32_t tx_bytes;		/* Number of TX bytes */
+	uint32_t tx_packets;		/* Number of TX packets */
+	uint32_t tx_collisions;		/* Number of TX collisions */
+	uint32_t tx_errors;		/* Number of TX errors */
+	uint32_t tx_jabber_timeout_errors;
+					/* Number of TX jabber timeout errors */
+	uint32_t tx_frame_flushed_errors;
+					/* Number of TX frame flushed errors */
+	uint32_t tx_loss_of_carrier_errors;
+					/* Number of TX loss of carrier errors */
+	uint32_t tx_no_carrier_errors;	/* Number of TX no carrier errors */
+	uint32_t tx_late_collision_errors;
+					/* Number of TX late collision errors */
+	uint32_t tx_excessive_collision_errors;
+					/* Number of TX excessive collision errors */
+	uint32_t tx_excessive_deferral_errors;
+					/* Number of TX excessive deferral errors */
+	uint32_t tx_underflow_errors;	/* Number of TX underflow errors */
+	uint32_t tx_ip_header_errors;	/* Number of TX IP header errors */
+	uint32_t tx_ip_payload_errors;	/* Number of TX IP payload errors */
+	uint32_t tx_dropped;		/* Number of TX dropped packets */
+	uint32_t hw_errs[10];		/* GMAC DMA error counters */
+	uint32_t rx_missed;		/* Number of RX packets missed by the DMA */
+	uint32_t fifo_overflows;	/* Number of RX FIFO overflows signalled by the DMA */
+	uint32_t gmac_total_ticks;	/* Total clock ticks spend inside the GMAC */
+	uint32_t gmac_worst_case_ticks;	/* Worst case iteration of the GMAC in ticks */
+	uint32_t gmac_iterations;	/* Number of iterations around the GMAC */
+};
+
+/*
+ * Exception events from PE
+ */
+enum exception_events_unknown {
+	NSS_EXCEPTION_EVENT_UNKNOWN_L2_PROTOCOL,
+	NSS_EXCEPTION_EVENT_UNKNOWN_LAST
+};
+
+/*
+ * Exception events from PE
+ */
+enum exception_events_ipv4 {
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_UNHANDLED_TYPE,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_IPV4_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_IPV4_UDP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_IPV4_TCP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_IPV4_UNKNOWN_PROTOCOL,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_NO_ICME,
+	NSS_EXCEPTION_EVENT_IPV4_ICMP_FLUSH_TO_HOST,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_NO_ICME,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_IP_OPTION,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_IP_FRAGMENT,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_SMALL_TTL,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_NEEDS_FRAGMENTATION,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_FLAGS,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_SEQ_EXCEEDS_RIGHT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_SMALL_DATA_OFFS,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_BAD_SACK,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_BIG_DATA_OFFS,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_SEQ_BEFORE_LEFT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_ACK_EXCEEDS_RIGHT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV4_TCP_ACK_BEFORE_LEFT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV4_UDP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_UDP_NO_ICME,
+	NSS_EXCEPTION_EVENT_IPV4_UDP_IP_OPTION,
+	NSS_EXCEPTION_EVENT_IPV4_UDP_IP_FRAGMENT,
+	NSS_EXCEPTION_EVENT_IPV4_UDP_SMALL_TTL,
+	NSS_EXCEPTION_EVENT_IPV4_UDP_NEEDS_FRAGMENTATION,
+	NSS_EXCEPTION_EVENT_IPV4_WRONG_TARGET_MAC,
+	NSS_EXCEPTION_EVENT_IPV4_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_BAD_TOTAL_LENGTH,
+	NSS_EXCEPTION_EVENT_IPV4_BAD_CHECKSUM,
+	NSS_EXCEPTION_EVENT_IPV4_NON_INITIAL_FRAGMENT,
+	NSS_EXCEPTION_EVENT_IPV4_DATAGRAM_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_OPTIONS_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV4_UNKNOWN_PROTOCOL,
+	NSS_EXCEPTION_EVENT_IPV4_LAST
+};
+
+/*
+ * Exception events from PE
+ */
+enum exception_events_ipv6 {
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_UNHANDLED_TYPE,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_IPV6_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_IPV6_UDP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_IPV6_TCP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_IPV6_UNKNOWN_PROTOCOL,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_NO_ICME,
+	NSS_EXCEPTION_EVENT_IPV6_ICMP_FLUSH_TO_HOST,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_NO_ICME,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_SMALL_HOP_LIMIT,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_NEEDS_FRAGMENTATION,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_FLAGS,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_SEQ_EXCEEDS_RIGHT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_SMALL_DATA_OFFS,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_BAD_SACK,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_BIG_DATA_OFFS,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_SEQ_BEFORE_LEFT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_ACK_EXCEEDS_RIGHT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV6_TCP_ACK_BEFORE_LEFT_EDGE,
+	NSS_EXCEPTION_EVENT_IPV6_UDP_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_UDP_NO_ICME,
+	NSS_EXCEPTION_EVENT_IPV6_UDP_SMALL_HOP_LIMIT,
+	NSS_EXCEPTION_EVENT_IPV6_UDP_NEEDS_FRAGMENTATION,
+	NSS_EXCEPTION_EVENT_IPV6_WRONG_TARGET_MAC,
+	NSS_EXCEPTION_EVENT_IPV6_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_IPV6_UNKNOWN_PROTOCOL,
+	NSS_EXCEPTION_EVENT_IPV6_LAST
+};
+
+/*
+ * Exception events from PE
+ */
+enum exception_events_pppoe {
+	NSS_EXCEPTION_EVENT_PPPOE_WRONG_VERSION_OR_TYPE,
+	NSS_EXCEPTION_EVENT_PPPOE_WRONG_CODE,
+	NSS_EXCEPTION_EVENT_PPPOE_HEADER_INCOMPLETE,
+	NSS_EXCEPTION_EVENT_PPPOE_UNSUPPORTED_PPP_PROTOCOL,
+	NSS_EXCEPTION_EVENT_PPPOE_LAST
+};
+
+/*
+ * The NSS per-interface statistics sync structure.
+ */
+struct nss_interface_stats_sync {
+	int32_t interface;		/* Interface number */
+	uint32_t rx_packets;		/* Number of packets received */
+	uint32_t rx_bytes;		/* Number of bytes received */
+	uint32_t tx_packets;		/* Number of packets transmitted */
+	uint32_t tx_bytes;		/* Number of bytes transmitted */
+	uint32_t rx_errors;		/* Number of receive errors */
+	uint32_t tx_errors;		/* Number of transmit errors */
+	uint32_t tx_dropped;		/* Number of TX dropped packets */
+	uint32_t collisions;		/* Number of TX and RX collisions */
+	uint32_t host_rx_packets;	/* Number of RX packets received by host OS */
+	uint32_t host_rx_bytes;		/* Number of RX bytes received by host OS */
+	uint32_t host_tx_packets;	/* Number of TX packets sent by host OS */
+	uint32_t host_tx_bytes;		/* Number of TX bytes sent by host OS */
+	uint32_t rx_length_errors;	/* Number of RX length errors */
+	uint32_t rx_overflow_errors;	/* Number of RX overflow errors */
+	uint32_t rx_crc_errors;		/* Number of RX CRC errors */
+	uint32_t exception_events_unknown[NSS_EXCEPTION_EVENT_UNKNOWN_LAST];
+					/* Number of unknown protocol exception events */
+	uint32_t exception_events_ipv4[NSS_EXCEPTION_EVENT_IPV4_LAST];
+					/* Number of IPv4 exception events */
+	uint32_t exception_events_ipv6[NSS_EXCEPTION_EVENT_IPV6_LAST];
+					/* Number of IPv6 exception events */
+};
+
+/*
+ * The NSS NSS statistics sync structure.
+ */
+struct nss_nss_stats_sync {
+	uint32_t ipv4_connection_create_requests;
+					/* Number of IPv4 connection create requests */
+	uint32_t ipv4_connection_create_collisions;
+					/* Number of IPv4 connection create requests that collided with existing entries */
+	uint32_t ipv4_connection_create_invalid_interface;
+					/* Number of IPv4 connection create requests that had invalid interface */
+	uint32_t ipv4_connection_destroy_requests;
+					/* Number of IPv4 connection destroy requests */
+	uint32_t ipv4_connection_destroy_misses;
+					/* Number of IPv4 connection destroy requests that missed the cache */
+	uint32_t ipv4_connection_hash_hits;
+					/* Number of IPv4 connection hash hits */
+	uint32_t ipv4_connection_hash_reorders;
+					/* Number of IPv4 connection hash reorders */
+	uint32_t ipv4_connection_flushes;
+					/* Number of IPv4 connection flushes */
+	uint32_t ipv4_connection_evictions;
+					/* Number of IPv4 connection evictions */
+	uint32_t ipv6_connection_create_requests;
+					/* Number of IPv6 connection create requests */
+	uint32_t ipv6_connection_create_collisions;
+					/* Number of IPv6 connection create requests that collided with existing entries */
+	uint32_t ipv6_connection_create_invalid_interface;
+					/* Number of IPv6 connection create requests that had invalid interface */
+	uint32_t ipv6_connection_destroy_requests;
+					/* Number of IPv6 connection destroy requests */
+	uint32_t ipv6_connection_destroy_misses;
+					/* Number of IPv6 connection destroy requests that missed the cache */
+	uint32_t ipv6_connection_hash_hits;
+					/* Number of IPv6 connection hash hits */
+	uint32_t ipv6_connection_hash_reorders;
+					/* Number of IPv6 connection hash reorders */
+	uint32_t ipv6_connection_flushes;
+					/* Number of IPv6 connection flushes */
+	uint32_t ipv6_connection_evictions;
+					/* Number of IPv6 connection evictions */
+	uint32_t l2switch_create_requests;
+					/* Number of l2 switch entry create requests */
+	uint32_t l2switch_create_collisions;
+					/* Number of l2 switch entry create requests that collided with existing entries */
+	uint32_t l2switch_create_invalid_interface;
+					/* Number of l2 switch entry create requests that had invalid interface */
+	uint32_t l2switch_destroy_requests;
+					/* Number of l2 switch entry destroy requests */
+	uint32_t l2switch_destroy_misses;
+					/* Number of l2 switch entry destroy requests that missed the cache */
+	uint32_t l2switch_hash_hits;	/* Number of l2 switch entry hash hits */
+	uint32_t l2switch_hash_reorders;/* Number of l2 switch entry hash reorders */
+	uint32_t l2switch_flushes;	/* Number of l2 switch entry flushes */
+	uint32_t l2switch_evictions;	/* Number of l2 switch entry evictions */
+	uint32_t pppoe_session_create_requests;
+					/* Number of PPPoE session create requests */
+	uint32_t pppoe_session_create_failures;
+					/* Number of PPPoE session create failures */
+	uint32_t pppoe_session_destroy_requests;
+					/* Number of PPPoE session destroy requests */
+	uint32_t pppoe_session_destroy_misses;
+					/* Number of PPPoE session destroy requests that missed the cache */
+	uint32_t pe_queue_dropped;	/* Number of packets dropped because the PE queue is too full */
+	uint32_t pe_total_ticks;	/* Total clock ticks spend inside the PE */
+	uint32_t pe_worst_case_ticks;	/* Worst case iteration of the PE in ticks */
+	uint32_t pe_iterations;		/* Number of iterations around the PE */
+	uint32_t except_queue_dropped;	/* Number of packets dropped because the exception queue is too full */
+	uint32_t except_total_ticks;	/* Total clock ticks spend inside the PE */
+	uint32_t except_worst_case_ticks;
+					/* Worst case iteration of the exception path in ticks */
+	uint32_t except_iterations;	/* Number of iterations around the PE */
+	uint32_t l2switch_queue_dropped;/* Number of packets dropped because the L2 switch queue is too full */
+	uint32_t l2switch_total_ticks;	/* Total clock ticks spend inside the L2 switch */
+	uint32_t l2switch_worst_case_ticks;
+					/* Worst case iteration of the L2 switch in ticks */
+	uint32_t l2switch_iterations;	/* Number of iterations around the L2 switch */
+	uint32_t pbuf_alloc_fails;	/* Number of pbuf allocations that have failed */
+	uint32_t pbuf_payload_alloc_fails;
+					/* Number of pbuf allocations that have failed because there were no free payloads */
+};
+
+/*
+ * The NSS PPPoE exception statistics sync structure.
+ */
+struct nss_pppoe_exception_stats_sync {
+	uint16_t pppoe_session_id;	/* PPPoE session ID on which stats are based */
+	uint8_t pppoe_remote_mac[ETH_ALEN];
+					/* PPPoE server MAC address */
+	uint32_t exception_events_pppoe[NSS_EXCEPTION_EVENT_PPPOE_LAST];
+					/* PPPoE exception events */
+};
+
+/*
+ * Profiler sync
+ */
+struct nss_profiler_sync {
+	uint32_t len;		/* Valid information length */
+	uint8_t buf[1];		/* Buffer */
+};
+
+/*
+ * Types of RX metadata.
+ */
+enum nss_rx_metadata_types {
+	NSS_RX_METADATA_TYPE_IPV4_RULE_ESTABLISH,
+	NSS_RX_METADATA_TYPE_IPV4_RULE_SYNC,
+	NSS_RX_METADATA_TYPE_IPV6_RULE_ESTABLISH,
+	NSS_RX_METADATA_TYPE_IPV6_RULE_SYNC,
+	NSS_RX_METADATA_TYPE_L2SWITCH_RULE_ESTABLISH,
+	NSS_RX_METADATA_TYPE_L2SWITCH_RULE_SYNC,
+	NSS_RX_METADATA_TYPE_GMAC_STATS_SYNC,
+	NSS_RX_METADATA_TYPE_INTERFACE_STATS_SYNC,
+	NSS_RX_METADATA_TYPE_NSS_STATS_SYNC,
+	NSS_RX_METADATA_TYPE_PPPOE_STATS_SYNC,
+	NSS_RX_METADATA_TYPE_PROFILER_SYNC,
+};
+
+/*
+ * Structure that describes all RX metadata objects.
+ */
+struct nss_rx_metadata_object {
+	enum nss_rx_metadata_types type;	/* Object type */
+	union {				/* Sub-message type */
+		struct nss_ipv4_rule_establish ipv4_rule_establish;
+		struct nss_ipv4_rule_sync ipv4_rule_sync;
+		struct nss_ipv6_rule_establish ipv6_rule_establish;
+		struct nss_ipv6_rule_sync ipv6_rule_sync;
+		struct nss_l2switch_rule_establish l2switch_rule_establish;
+		struct nss_l2switch_rule_sync l2switch_rule_sync;
+		struct nss_gmac_stats_sync gmac_stats_sync;
+		struct nss_interface_stats_sync interface_stats_sync;
+		struct nss_nss_stats_sync nss_stats_sync;
+		struct nss_pppoe_exception_stats_sync pppoe_exception_stats_sync;
+		struct nss_profiler_sync profiler_sync;
+	} sub;
+};
+
+
+/*
+ * H2N Buffer Types
+ */
+#define H2N_BUFFER_EMPTY	0
+#define H2N_BUFFER_PACKET	2
+#define H2N_BUFFER_CTRL		4
+#define H2N_BUFFER_CRYPTO_REQ	7
+
+/*
+ * H2N Bit Flag Definitions
+ */
+#define H2N_BIT_FLAG_GEN_IPV4_IP_CHECKSUM	0x0001
+#define H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM	0x0002
+#define H2N_BIT_FLAG_FIRST_SEGMENT		0x0004
+#define H2N_BIT_FLAG_LAST_SEGMENT		0x0008
+#define H2N_BIT_FLAG_DISCARD                    0x0080
+#define H2N_BIT_FLAG_SEGMENTATION_ENABLE	0x0100
+#define H2N_BIT_FLAG_SEGMENT_TSO		0x0200
+#define H2N_BIT_FLAG_SEGMENT_UFO		0x0400
+#define H2N_BIT_FLAG_SEGMENT_TSO6		0x0800
+
+/*
+ * HLOS to NSS descriptor structure.
+ */
+struct h2n_descriptor {
+	uint32_t opaque;
+			/* 32-bit value provided by the HLOS to associate with the buffer. The cookie has no meaning to the NSS */
+	uint32_t buffer;
+			/* Physical buffer address. This is the address of the start of the usable buffer being provided by the HLOS */
+	uint16_t buffer_len;	/* Length of the buffer (in bytes) */
+	uint16_t metadata_off;	/* Reserved for future use */
+	uint16_t payload_len;	/* Length of the active payload of the buffer (in bytes) */
+	uint16_t mss;		/* MSS to be used with TSO/UFO */
+	uint16_t payload_offs;	/* Offset from the start of the buffer to the start of the payload (in bytes) */
+	uint16_t interface_num;	/* Interface number to which the buffer is to be sent (where appropriate) */
+	uint8_t buffer_type;	/* Type of buffer */
+	uint8_t reserved3;	/* Reserved for future use */
+	uint16_t bit_flags;	/* Bit flags associated with the buffer */
+	uint8_t qos_class;	/* QoS class of the buffer (where appropriate) */
+	uint8_t qos_priority;	/* QoS priority of the buffer (where appropriate) */
+	uint16_t qos_flow_id;	/* QoS flow ID of the buffer (where appropriate) */
+	uint32_t reserved4;	/* Reserved for future use */
+};
+
+/*
+ * N2H Buffer Types
+ */
+#define N2H_BUFFER_EMPTY		1
+#define N2H_BUFFER_PACKET		3
+#define N2H_BUFFER_COMMAND_RESP		5
+#define N2H_BUFFER_STATUS		6
+#define N2H_BUFFER_CRYPTO_RESP		8
+
+/*
+ * Command Response Types
+ */
+#define N2H_COMMAND_RESP_OK			0
+#define N2H_COMMAND_RESP_BUFFER_TOO_SMALL	1
+#define N2H_COMMAND_RESP_BUFFER_NOT_WRITEABLE	2
+#define N2H_COMMAND_RESP_UNSUPPORTED_COMMAND	3
+#define N2H_COMMAND_RESP_INVALID_PARAMETERS	4
+#define N2H_COMMAND_RESP_INACTIVE_SUBSYSTEM	5
+
+/*
+ * N2H Bit Flag Definitions
+ */
+#define N2H_BIT_FLAG_IPV4_IP_CHECKSUM_VALID		0x0001
+#define N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID	0x0002
+#define N2H_BIT_FLAG_FIRST_SEGMENT			0x0004
+#define N2H_BIT_FLAG_LAST_SEGMENT			0x0008
+
+/*
+ * NSS to HLOS descriptor structure
+ */
+struct n2h_descriptor {
+	uint32_t opaque;
+			/* 32-bit value provided by the HLOS to associate with the buffer. The cookie has no meaning to the NSS */
+	uint32_t buffer;
+			/* Physical buffer address. This is the address of the start of the usable buffer being provided by the HLOS */
+	uint16_t buffer_len;	/* Length of the buffer (in bytes) */
+	uint16_t reserved1;	/* Reserved for future use */
+	uint16_t payload_len;	/* Length of the active payload of the buffer (in bytes) */
+	uint16_t reserved2;	/* Reserved for future use */
+	uint16_t payload_offs;	/* Offset from the start of the buffer to the start of the payload (in bytes) */
+	uint16_t interface_num;	/* Interface number to which the buffer is to be sent (where appropriate) */
+	uint8_t buffer_type;	/* Type of buffer */
+	uint8_t response_type;	/* Response type if the buffer is a command response */
+	uint16_t bit_flags;	/* Bit flags associated with the buffer */
+	uint32_t timestamp_lo;	/* Low 32 bits of any timestamp associated with the buffer */
+	uint32_t timestamp_hi;	/* High 32 bits of any timestamp associated with the buffer */
+};
+
+/*
+ * Device Memory Map Definitions
+ */
+#define DEV_MAGIC		0x4e52522e
+#define DEV_INTERFACE_VERSION	1
+#define DEV_DESCRIPTORS		256 /* Do we need it here? */
+
+/**
+ * H2N descriptor ring
+ */
+struct h2n_desc_if_instance {
+	struct h2n_descriptor *desc;
+	uint16_t size;			/* Size in entries of the H2N0 descriptor ring */
+	uint16_t int_bit;		/* H2N0 descriptor ring interrupt */
+};
+
+/**
+ * N2H descriptor ring
+ */
+struct n2h_desc_if_instance {
+	struct n2h_descriptor *desc;
+	uint16_t size;			/* Size in entries of the H2N0 descriptor ring */
+	uint16_t int_bit;		/* H2N0 descriptor ring interrupt */
+};
+
+/**
+ * NSS virtual interface map
+ */
+struct nss_if_mem_map {
+	struct h2n_desc_if_instance h2n_desc_if[16];	/* Base address of H2N0 descriptor ring */
+	struct n2h_desc_if_instance n2h_desc_if[15];	/* Base address of N2H0 descriptor ring */
+	uint32_t magic;				/* Magic value used to identify NSS implementations (must be 0x4e52522e) */
+	uint16_t if_version;			/* Interface version number (must be 1 for this version) */
+	uint8_t h2n_rings;			/* Number of descriptor rings in the H2N direction */
+	uint8_t n2h_rings;			/* Number of descriptor rings in the N2H direction */
+	uint32_t h2n_nss_index[16];
+				/* Index number for the next descriptor that will be read by the NSS in the H2N0 descriptor ring (NSS owned) */
+	uint32_t n2h_nss_index[15];
+				/* Index number for the next descriptor that will be written by the NSS in the N2H0 descriptor ring (NSS owned) */
+	uint8_t num_phys_ports;
+	uint8_t reserved1[3];	/* Reserved for future use */
+	uint32_t h2n_hlos_index[16];
+				/* Index number for the next descriptor that will be written by the HLOS in the H2N0 descriptor ring (HLOS owned) */
+	uint32_t n2h_hlos_index[15];
+				/* Index number for the next descriptor that will be read by the HLOS in the N2H0 descriptor ring (HLOS owned) */
+	uint32_t c2c_start;	/* Reserved for future use */
+};
+#endif /* __NSS_HLOS_IF_H */
diff --git a/nss_init.c b/nss_init.c
new file mode 100755
index 0000000..3a71cac
--- /dev/null
+++ b/nss_init.c
@@ -0,0 +1,307 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/*
+ * nss_init.c
+ *	NSS init APIs
+ *
+ */
+
+#include "nss_core.h"
+#include <nss_hal.h>
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/msm_nss.h>
+
+/*
+ * Global declarations
+ */
+
+/*
+ * Top level nss context structure
+ */
+struct nss_top_instance nss_top_main;
+
+/*
+ * File local/Static variables/functions
+ */
+
+/*
+ * nss_handle_irq()
+ *	HLOS interrupt handler for nss interrupts
+ */
+static irqreturn_t nss_handle_irq (int irq, void *ctx)
+{
+	struct int_ctx_instance *int_ctx = (struct int_ctx_instance *) ctx;
+
+	/*
+	 * Disable IRQ until our bottom half re-enables it
+	 */
+	disable_irq_nosync(irq);
+
+	/*
+	 * Schedule tasklet to process interrupt cause
+	 */
+	tasklet_schedule(&int_ctx->bh);
+	return IRQ_HANDLED;
+}
+
+/*
+ * nss_probe()
+ *	HLOS device probe callback
+ */
+static int __devinit nss_probe (struct platform_device *nss_dev)
+{
+	struct nss_top_instance *nss_top = &nss_top_main;
+	struct nss_ctx_instance *nss_ctx = &nss_top->nss[nss_dev->id];
+	struct nss_platform_data *npd = (struct nss_platform_data *) nss_dev->dev.platform_data;
+	int err, i;
+
+	nss_ctx->nss_top = nss_top;
+	nss_ctx->id = nss_dev->id;
+
+	/*
+	 * Get virtual and physical memory addresses for nss logical/hardware address maps
+	 */
+
+	/*
+	 * Virtual address of CSM space
+	 */
+	nss_ctx->nmap = npd->nmap;
+	nss_assert(nss_ctx->nmap);
+
+	/*
+	 * Physical address of CSM space
+	 */
+	nss_ctx->nphys = npd->nphys;
+	nss_assert(nss_ctx->nphys);
+
+	/*
+	 * Virtual address of logical registers space
+	 */
+	nss_ctx->vmap = npd->vmap;
+	nss_assert(nss_ctx->vmap);
+
+	/*
+	 * Physical address of logical registers space
+	 */
+	nss_ctx->vphys = npd->vphys;
+	nss_assert(nss_ctx->vphys);
+	nss_info("nss:%d:vphys =%x, vmap =%x, nphys=%x, nmap =%x", nss_dev->id, nss_ctx->vphys, nss_ctx->vmap, nss_ctx->nphys, nss_ctx->nmap);
+
+	/*
+	 * request for IRQs
+	 *
+	 * WARNING: CPU affinities should be set using OS supported methods
+	 */
+	nss_ctx->int_ctx[0].nss_ctx = nss_ctx;
+	nss_ctx->int_ctx[0].shift_factor = 0;
+	nss_ctx->int_ctx[0].irq = npd->irq[0];
+	err = request_irq(npd->irq[0], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[0]);
+	if (err) {
+		nss_warning("%d: IRQ0 request failed", nss_dev->id);
+		return err;
+	}
+
+	/*
+	 * Register bottom halves for NSS core interrupt
+	 */
+	tasklet_init(&nss_ctx->int_ctx[0].bh, nss_core_handle_bh, (unsigned long)&nss_ctx->int_ctx[0]);
+
+	/*
+	 * Enable interrupts for NSS core
+	 */
+	nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
+					nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
+
+	/*
+	 * Check if second interrupt is supported on this nss core
+	 */
+	if (npd->num_irq > 1) {
+		nss_info("%d: This NSS core supports two interrupts", nss_dev->id);
+		nss_ctx->int_ctx[1].nss_ctx = nss_ctx;
+		nss_ctx->int_ctx[1].shift_factor = 15;
+		nss_ctx->int_ctx[1].irq = npd->irq[1];
+		err = request_irq(npd->irq[1], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[1]);
+		if (err) {
+			nss_warning("%d: IRQ1 request failed for nss", nss_dev->id);
+			nss_hal_disable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
+					nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
+			tasklet_kill(&nss_ctx->int_ctx[0].bh);
+			free_irq(nss_ctx->int_ctx[0].irq, &nss_ctx->int_ctx[0]);
+			return err;
+		}
+
+		/*
+		 * Register bottom halves for NSS0 interrupts
+		 */
+		tasklet_init(&nss_ctx->int_ctx[1].bh, nss_core_handle_bh, (unsigned long)&nss_ctx->int_ctx[1]);
+
+		nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq,
+				nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
+	}
+
+	spin_lock_bh(&(nss_top->lock));
+
+	/*
+	 * Check functionalities are supported by this NSS core
+	 */
+	if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->ipv4_handler_id = nss_dev->id;
+	}
+
+	if (npd->ipv6_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->ipv6_handler_id = nss_dev->id;
+	}
+
+	if (npd->l2switch_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->l2switch_handler_id = nss_dev->id;
+	}
+
+	if (npd->crypto_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->crypto_handler_id = nss_dev->id;
+	}
+
+	if (npd->ipsec_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->ipsec_handler_id = nss_dev->id;
+	}
+
+	if (npd->wlan_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->wlan_handler_id = nss_dev->id;
+	}
+
+	if (npd->gmac_enabled[0] == NSS_FEATURE_ENABLED) {
+		nss_top->phys_if_handler_id[0] = nss_dev->id;
+	}
+
+	if (npd->gmac_enabled[1] == NSS_FEATURE_ENABLED) {
+		nss_top->phys_if_handler_id[1] = nss_dev->id;
+	}
+
+	if (npd->gmac_enabled[2] == NSS_FEATURE_ENABLED) {
+		nss_top->phys_if_handler_id[2] = nss_dev->id;
+	}
+
+	if (npd->gmac_enabled[3] == NSS_FEATURE_ENABLED) {
+		nss_top->phys_if_handler_id[3] = nss_dev->id;
+	}
+
+	spin_unlock_bh(&(nss_top->lock));
+
+#ifdef CONFIG_MACH_IPQ806X_RUMI3
+	/*
+	 * Clear the whole TCM
+	 * NOTE: This is required on RUMI as TCM does not seem to
+	 * reset properly on RUMI
+	 */
+	for (i = 0; i < (16 * 1024); i++) {
+		*((uint32_t *)nss_ctx->vmap + i) = 0;
+	}
+#endif
+
+	/*
+	 * Initialize decongestion callbacks to NULL
+	 */
+	for (i = 0; i< NSS_MAX_CLIENTS; i++) {
+		nss_ctx->queue_decongestion_callback[i] = NULL;
+		nss_ctx->queue_decongestion_ctx[i] = NULL;
+	}
+
+	spin_lock_init(&(nss_ctx->decongest_cb_lock));
+	nss_ctx->magic = NSS_CTX_MAGIC;
+
+	/*
+	 * Enable clocks and bring NSS core out of reset
+	 */
+	nss_hal_core_reset(nss_dev->id, nss_ctx->nmap, npd->rst_addr);
+	nss_info("%p: All resources initialized and nss core%d have been brought out of reset", nss_ctx, nss_dev->id);
+	return 0;
+}
+
+/*
+ * nss_remove()
+ *	HLOS device remove callback
+ */
+static int __devexit nss_remove (struct platform_device *nss_dev)
+{
+	struct nss_top_instance *nss_top = &nss_top_main;
+	struct nss_ctx_instance *nss_ctx = &nss_top->nss[nss_dev->id];
+
+	/*
+	 * Disable interrupts and bottom halves in HLOS
+	 * Disable interrupts from NSS to HLOS
+	 */
+	nss_hal_disable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
+					nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
+	tasklet_kill(&nss_ctx->int_ctx[0].bh);
+	free_irq(nss_ctx->int_ctx[0].irq, &nss_ctx->int_ctx[0]);
+
+	/*
+	 * Check if second interrupt is supported
+	 * If so then clear resources for second interrupt as well
+	 */
+	if (nss_ctx->int_ctx[1].irq) {
+		nss_hal_disable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq,
+					nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
+		tasklet_kill(&nss_ctx->int_ctx[1].bh);
+		free_irq(nss_ctx->int_ctx[1].irq, &nss_ctx->int_ctx[1]);
+	}
+
+	nss_info("%p: All resources freed for nss core%d", nss_ctx, nss_dev->id);
+	return 0;
+}
+
+/*
+ * nss_driver
+ *	Platform driver structure for NSS
+ */
+struct platform_driver nss_driver = {
+	.probe	= nss_probe,
+	.remove	= __devexit_p(nss_remove),
+	.driver	= {
+		.name	= "qca-nss",
+		.owner	= THIS_MODULE,
+	},
+};
+
+/*
+ * nss_init()
+ *	Registers nss driver
+ */
+static int __init nss_init(void)
+{
+	nss_info("Init NSS driver");
+
+	/*
+	 * Perform clock init common to all NSS cores
+	 */
+	nss_hal_common_reset();
+
+	/*
+	 * Enable spin locks
+	 */
+	spin_lock_init(&(nss_top_main.lock));
+	spin_lock_init(&(nss_top_main.stats_lock));
+
+	/*
+	 * Register platform_driver
+	 */
+	return platform_driver_register(&nss_driver);
+}
+
+/*
+ * nss_cleanup()
+ *	Unregisters nss driver
+ */
+static void __exit nss_cleanup(void)
+{
+	nss_info("Exit NSS driver");
+	platform_driver_unregister(&nss_driver);
+}
+
+module_init(nss_init);
+module_exit(nss_cleanup);
+
+MODULE_DESCRIPTION("QCA NSS Driver");
+MODULE_AUTHOR("Qualcomm Atheros Inc");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/nss_tx_rx.c b/nss_tx_rx.c
new file mode 100755
index 0000000..915ce72
--- /dev/null
+++ b/nss_tx_rx.c
@@ -0,0 +1,1817 @@
+/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * */
+
+/*
+ * nss_tx_rx.c
+ *	NSS Tx and Rx APIs
+ */
+
+#include "nss_core.h"
+#include <nss_hal.h>
+#include <linux/module.h>
+
+/*
+ * Global variables/extern declarations
+ */
+extern struct nss_top_instance nss_top_main;
+
+#if (CONFIG_NSS_DEBUG_LEVEL > 0)
+#define NSS_VERIFY_CTX_MAGIC(x) nss_verify_ctx_magic(x)
+#define NSS_VERIFY_INIT_DONE(x) nss_verify_init_done(x)
+
+/*
+ * nss_verify_ctx_magic()
+ */
+static inline void nss_verify_ctx_magic(struct nss_ctx_instance *nss_ctx)
+{
+	nss_assert(nss_ctx->magic == NSS_CTX_MAGIC);
+}
+
+static inline void nss_verify_init_done(struct nss_ctx_instance *nss_ctx)
+{
+	nss_assert(nss_ctx->state == NSS_CORE_STATE_INITIALIZED);
+}
+
+#else
+#define NSS_VERIFY_CTX_MAGIC(x)
+#define NSS_VERIFY_INIT_DONE(x)
+#endif
+
+/*
+ * nss_rx_metadata_ipv4_rule_establish()
+ *	Handle the establishment of an IPv4 rule.
+ */
+static void nss_rx_metadata_ipv4_rule_establish(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_rule_establish *nire)
+{
+	struct nss_ipv4_statistics *nis;
+	struct nss_top_instance *nss_top = nss_ctx->nss_top;
+
+	if (unlikely(nire->index >= NSS_IPV4_CONNECTION_ENTRIES)) {
+		nss_warning("Bad index: %d\n", nire->index);
+		return;
+	}
+
+	nis = &nss_top->nss_ipv4_statistics[nire->index];
+
+	spin_lock_bh(&nss_top->stats_lock);
+	nis->protocol = nire->protocol;
+	nis->flow_interface = nire->flow_interface;
+	nis->flow_mtu = nire->flow_mtu;
+	nis->flow_ip = nire->flow_ip;
+	nis->flow_ip_xlate = nire->flow_ip_xlate;
+	nis->flow_ident = nire->flow_ident;
+	nis->flow_ident_xlate = nire->flow_ident_xlate;
+	nis->flow_accelerated_rx_packets = 0;
+	nis->flow_accelerated_rx_bytes = 0;
+	nis->flow_accelerated_tx_packets = 0;
+	nis->flow_accelerated_tx_bytes = 0;
+	nis->flow_pppoe_session_id = nire->flow_pppoe_session_id;
+	memcpy(nis->flow_pppoe_remote_mac, nire->flow_pppoe_remote_mac, ETH_ALEN);
+	nis->return_interface = nire->return_interface;
+	nis->return_mtu = nire->return_mtu;
+	nis->return_ip = nire->return_ip;
+	nis->return_ip_xlate = nire->return_ip_xlate;
+	nis->return_ident = nire->return_ident;
+	nis->return_ident_xlate = nire->return_ident_xlate;
+	nis->return_pppoe_session_id = nire->return_pppoe_session_id;
+	memcpy(nis->return_pppoe_remote_mac, nire->return_pppoe_remote_mac, ETH_ALEN);
+	nis->return_accelerated_rx_packets = 0;
+	nis->return_accelerated_rx_bytes = 0;
+	nis->return_accelerated_tx_packets = 0;
+	nis->return_accelerated_tx_bytes = 0;
+	nis->last_sync = nss_top->last_rx_jiffies;
+	spin_unlock_bh(&nss_top->stats_lock);
+}
+
+/*
+ * nss_rx_metadata_ipv4_rule_sync()
+ *	Handle the syncing of an IPv4 rule.
+ */
+static void nss_rx_metadata_ipv4_rule_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_rule_sync *nirs)
+{
+	/* Place holder */
+}
+
+/*
+ * nss_rx_metadata_ipv6_rule_establish()
+ *	Handle the establishment of an IPv6 rule.
+ */
+static void nss_rx_metadata_ipv6_rule_establish(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_rule_establish *nire)
+{
+	struct nss_ipv6_statistics *nis;
+	struct nss_top_instance *nss_top = nss_ctx->nss_top;
+
+	if (unlikely(nire->index >= NSS_IPV6_CONNECTION_ENTRIES)) {
+		nss_warning("%p: Bad connection entry index: %d\n", nss_ctx, nire->index);
+		return;
+	}
+
+	nis = &nss_top->nss_ipv6_statistics[nire->index];
+
+	spin_lock_bh(&nss_top->stats_lock);
+	nis->protocol = nire->protocol;
+	nis->flow_interface = nire->flow_interface;
+	nis->flow_mtu = nire->flow_mtu;
+	nis->flow_ip[0] = nire->flow_ip[0];
+	nis->flow_ip[1] = nire->flow_ip[1];
+	nis->flow_ip[2] = nire->flow_ip[2];
+	nis->flow_ip[3] = nire->flow_ip[3];
+	nis->flow_ident = nire->flow_ident;
+	nis->flow_pppoe_session_id = nire->flow_pppoe_session_id;
+	memcpy(nis->flow_pppoe_remote_mac, nire->flow_pppoe_remote_mac, ETH_ALEN);
+	nis->flow_accelerated_rx_packets = 0;
+	nis->flow_accelerated_rx_bytes = 0;
+	nis->flow_accelerated_tx_packets = 0;
+	nis->flow_accelerated_tx_bytes = 0;
+	nis->return_interface = nire->return_interface;
+	nis->return_mtu = nire->return_mtu;
+	nis->return_ip[0] = nire->return_ip[0];
+	nis->return_ip[1] = nire->return_ip[1];
+	nis->return_ip[2] = nire->return_ip[2];
+	nis->return_ip[3] = nire->return_ip[3];
+	nis->return_ident = nire->return_ident;
+	nis->return_pppoe_session_id = nire->return_pppoe_session_id;
+	memcpy(nis->return_pppoe_remote_mac, nire->return_pppoe_remote_mac, ETH_ALEN);
+	nis->return_accelerated_rx_packets = 0;
+	nis->return_accelerated_rx_bytes = 0;
+	nis->return_accelerated_tx_packets = 0;
+	nis->return_accelerated_tx_bytes = 0;
+	nis->last_sync = nss_top->last_rx_jiffies;
+	spin_unlock_bh(&nss_top->stats_lock);
+}
+
+/*
+ * nss_rx_metadata_ipv6_rule_sync()
+ *	Handle the syncing of an IPv6 rule.
+ */
+static void nss_rx_metadata_ipv6_rule_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_rule_sync *nirs)
+{
+	/* Place holder */
+}
+
+/*
+ * nss_rx_metadata_gmac_stats_sync()
+ *	Handle the syncing of GMAC stats.
+ */
+static void nss_rx_metadata_gmac_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_gmac_stats_sync *ngss)
+{
+	void *ctx;
+	nss_phys_if_event_callback_t cb;
+
+	if (ngss->interface >= NSS_MAX_PHYSICAL_INTERFACES) {
+		nss_warning("%p: Callback received for invalid interface %d", nss_ctx, ngss->interface);
+		return;
+	}
+
+	ctx = nss_ctx->nss_top->phys_if_ctx[ngss->interface];
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb reads. Note that write to ctx and cb happens in
+	 * reverse order. The idea is that we do not want a case where
+	 * cb is valid but ctx is NULL.
+	 */
+	rmb();
+	cb = nss_ctx->nss_top->phys_if_event_callback[ngss->interface];
+
+	/*
+	 * Call GMAC driver callback
+	 */
+	if (cb) {
+		cb(ctx, NSS_GMAC_EVENT_STATS, (void *)ngss, sizeof(struct nss_gmac_stats_sync));
+	} else {
+		nss_warning("%p: Event received for GMAC interface %d before registration", nss_ctx, ngss->interface);
+	}
+}
+
+/*
+ * nss_rx_metadata_interface_stats_sync()
+ *	Handle the syncing of interface statistics.
+ */
+static void nss_rx_metadata_interface_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_interface_stats_sync *niss)
+{
+	/* Place holder */
+}
+
+/*
+ * nss_rx_metadata_na_stats_sync()
+ *	Handle the syncing of NSS statistics.
+ */
+static void nss_rx_metadata_nss_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_nss_stats_sync *nnss)
+{
+	struct nss_top_instance *nss_top = nss_ctx->nss_top;
+
+	spin_lock_bh(&nss_top->stats_lock);
+	nss_top->ipv4_connection_create_requests += nnss->ipv4_connection_create_requests;
+	nss_top->ipv4_connection_create_collisions += nnss->ipv4_connection_create_collisions;
+	nss_top->ipv4_connection_create_invalid_interface += nnss->ipv4_connection_create_invalid_interface;
+	nss_top->ipv4_connection_destroy_requests += nnss->ipv4_connection_destroy_requests;
+	nss_top->ipv4_connection_destroy_misses += nnss->ipv4_connection_destroy_misses;
+	nss_top->ipv4_connection_hash_hits += nnss->ipv4_connection_hash_hits;
+	nss_top->ipv4_connection_hash_reorders += nnss->ipv4_connection_hash_reorders;
+	nss_top->ipv4_connection_flushes += nnss->ipv4_connection_flushes;
+	nss_top->ipv4_connection_evictions += nnss->ipv4_connection_evictions;
+	nss_top->ipv6_connection_create_requests += nnss->ipv6_connection_create_requests;
+	nss_top->ipv6_connection_create_collisions += nnss->ipv6_connection_create_collisions;
+	nss_top->ipv6_connection_create_invalid_interface += nnss->ipv6_connection_create_invalid_interface;
+	nss_top->ipv6_connection_destroy_requests += nnss->ipv6_connection_destroy_requests;
+	nss_top->ipv6_connection_destroy_misses += nnss->ipv6_connection_destroy_misses;
+	nss_top->ipv6_connection_hash_hits += nnss->ipv6_connection_hash_hits;
+	nss_top->ipv6_connection_hash_reorders += nnss->ipv6_connection_hash_reorders;
+	nss_top->ipv6_connection_flushes += nnss->ipv6_connection_flushes;
+	nss_top->ipv6_connection_evictions += nnss->ipv6_connection_evictions;
+	nss_top->l2switch_create_requests += nnss->l2switch_create_requests;
+	nss_top->l2switch_create_collisions += nnss->l2switch_create_collisions;
+	nss_top->l2switch_create_invalid_interface += nnss->l2switch_create_invalid_interface;
+	nss_top->l2switch_destroy_requests += nnss->l2switch_destroy_requests;
+	nss_top->l2switch_destroy_misses += nnss->l2switch_destroy_misses;
+	nss_top->l2switch_hash_hits += nnss->l2switch_hash_hits;
+	nss_top->l2switch_hash_reorders += nnss->l2switch_hash_reorders;
+	nss_top->l2switch_flushes += nnss->l2switch_flushes;
+	nss_top->l2switch_evictions += nnss->l2switch_evictions;
+	nss_top->pppoe_session_create_requests += nnss->pppoe_session_create_requests;
+	nss_top->pppoe_session_create_failures += nnss->pppoe_session_create_failures;
+	nss_top->pppoe_session_destroy_requests += nnss->pppoe_session_destroy_requests;
+	nss_top->pppoe_session_destroy_misses += nnss->pppoe_session_destroy_misses;
+	nss_top->pe_queue_dropped += nnss->pe_queue_dropped;
+	nss_top->pe_total_ticks += nnss->pe_total_ticks;
+	if (unlikely(nss_top->pe_worst_case_ticks < nnss->pe_worst_case_ticks)) {
+		nss_top->pe_worst_case_ticks = nnss->pe_worst_case_ticks;
+	}
+	nss_top->pe_iterations += nnss->pe_iterations;
+
+	nss_top->except_queue_dropped += nnss->except_queue_dropped;
+	nss_top->except_total_ticks += nnss->except_total_ticks;
+	if (unlikely(nss_top->except_worst_case_ticks < nnss->except_worst_case_ticks)) {
+		nss_top->except_worst_case_ticks = nnss->except_worst_case_ticks;
+	}
+	nss_top->except_iterations += nnss->except_iterations;
+
+	nss_top->l2switch_queue_dropped += nnss->l2switch_queue_dropped;
+	nss_top->l2switch_total_ticks += nnss->l2switch_total_ticks;
+	if (unlikely(nss_top->l2switch_worst_case_ticks < nnss->l2switch_worst_case_ticks)) {
+		nss_top->l2switch_worst_case_ticks = nnss->l2switch_worst_case_ticks;
+	}
+	nss_top->l2switch_iterations += nnss->l2switch_iterations;
+
+	nss_top->pbuf_alloc_fails += nnss->pbuf_alloc_fails;
+	nss_top->pbuf_payload_alloc_fails += nnss->pbuf_payload_alloc_fails;
+	spin_unlock_bh(&nss_top->stats_lock);
+}
+
+/*
+ * nss_rx_metadata_pppoe_exception_stats_sync()
+ *	Handle the syncing of PPPoE exception statistics.
+ */
+static void nss_rx_metadata_pppoe_exception_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_pppoe_exception_stats_sync *npess)
+{
+	/* Place holder */
+}
+
+/*
+ * nss_rx_metadata_profiler_sync()
+ *	Handle the syncing of profiler information.
+ */
+static void nss_rx_metadata_profiler_sync(struct nss_ctx_instance *nss_ctx, struct nss_profiler_sync *profiler_sync)
+{
+	void *ctx;
+	nss_profiler_callback_t cb;
+
+	ctx = nss_ctx->nss_top->profiler_ctx[nss_ctx->id];
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb reads. Note that write to ctx and cb happens in
+	 * reverse order. The idea is that we do not want a case where
+	 * cb is valid but ctx is NULL.
+	 */
+	rmb();
+	cb = nss_ctx->nss_top->profiler_callback[nss_ctx->id];
+
+	/*
+	 * Call profiler callback
+	 */
+	if (cb) {
+		cb(ctx, profiler_sync->buf, profiler_sync->len);
+	} else {
+		nss_warning("%p: Event received for profiler interface before registration", nss_ctx);
+	}
+}
+
+/*
+ * nss_rx_handle_status_pkt()
+ *	Handle the metadata/status packet.
+ */
+void nss_rx_handle_status_pkt(struct nss_ctx_instance *nss_ctx, struct sk_buff *nbuf)
+{
+	struct nss_rx_metadata_object *nrmo;
+
+	nrmo = (struct nss_rx_metadata_object *)nbuf->data;
+	switch (nrmo->type) {
+	case NSS_RX_METADATA_TYPE_IPV4_RULE_ESTABLISH:
+		nss_rx_metadata_ipv4_rule_establish(nss_ctx, &nrmo->sub.ipv4_rule_establish);
+		break;
+
+	case NSS_RX_METADATA_TYPE_IPV4_RULE_SYNC:
+		nss_rx_metadata_ipv4_rule_sync(nss_ctx, &nrmo->sub.ipv4_rule_sync);
+		break;
+
+	case NSS_RX_METADATA_TYPE_IPV6_RULE_ESTABLISH:
+		nss_rx_metadata_ipv6_rule_establish(nss_ctx, &nrmo->sub.ipv6_rule_establish);
+		break;
+
+	case NSS_RX_METADATA_TYPE_IPV6_RULE_SYNC:
+		nss_rx_metadata_ipv6_rule_sync(nss_ctx, &nrmo->sub.ipv6_rule_sync);
+		break;
+
+	case NSS_RX_METADATA_TYPE_GMAC_STATS_SYNC:
+		nss_rx_metadata_gmac_stats_sync(nss_ctx, &nrmo->sub.gmac_stats_sync);
+		break;
+
+	case NSS_RX_METADATA_TYPE_INTERFACE_STATS_SYNC:
+		nss_rx_metadata_interface_stats_sync(nss_ctx, &nrmo->sub.interface_stats_sync);
+		break;
+
+	case NSS_RX_METADATA_TYPE_NSS_STATS_SYNC:
+		nss_rx_metadata_nss_stats_sync(nss_ctx, &nrmo->sub.nss_stats_sync);
+		break;
+
+	case NSS_RX_METADATA_TYPE_PPPOE_STATS_SYNC:
+		nss_rx_metadata_pppoe_exception_stats_sync(nss_ctx, &nrmo->sub.pppoe_exception_stats_sync);
+		break;
+
+	case NSS_RX_METADATA_TYPE_PROFILER_SYNC:
+		nss_rx_metadata_profiler_sync(nss_ctx, &nrmo->sub.profiler_sync);
+		break;
+
+	default:
+		/*
+		 * WARN: Unknown metadata type
+		 */
+		nss_warning("%p: Unknown NRMO %d received from NSS, nbuf->data=%p", nss_ctx, nrmo->type, nbuf->data);
+	}
+}
+
+/*
+ * nss_rx_handle_crypto_buf()
+ *	Create a nss entry to accelerate the given connection
+ */
+void nss_rx_handle_crypto_buf(struct nss_ctx_instance *nss_ctx, uint32_t buf, uint32_t paddr, uint32_t len)
+{
+	void *ctx;
+	nss_crypto_callback_t cb;
+
+	ctx = nss_ctx->nss_top->crypto_ctx;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb reads. Note that write to ctx and cb happens in
+	 * reverse order. The idea is that we do not want a case where
+	 * cb is valid but ctx is NULL.
+	 */
+	rmb();
+	cb = nss_ctx->nss_top->crypto_callback;
+
+	/*
+	 * We do not know what to do with this buffer if crypto driver has not
+	 * registered with us
+	 */
+	nss_assert(nss_ctx->nss_top->crypto_callback != NULL);
+
+	if (likely(cb)) {
+		cb(ctx, (void *)buf, paddr, len);
+	}
+}
+
+/*
+ * nss_tx_create_ipv4_rule()
+ *	Create a nss entry to accelerate the given connection
+ */
+nss_tx_status_t nss_tx_create_ipv4_rule(void *ctx, struct nss_ipv4_create *unic)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipv4_rule_create *nirc;
+
+	nss_info("%p: Create IPv4: %pI4:%d (%pI4:%d), %pI4:%d (%pI4:%d), p: %d\n", nss_ctx,
+		&unic->src_ip, unic->src_port, &unic->src_ip_xlate, unic->src_port_xlate,
+		&unic->dest_ip, unic->dest_port, &unic->dest_ip_xlate, unic->dest_port_xlate, unic->protocol);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Create IPv4' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Create IPv4' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPV4_RULE_CREATE;
+
+	nirc = &ntmo->sub.ipv4_rule_create;
+	nirc->protocol = (uint8_t)unic->protocol;
+
+	nirc->flow_pppoe_session_id = unic->flow_pppoe_session_id;
+	memcpy(nirc->flow_pppoe_remote_mac, unic->flow_pppoe_remote_mac, ETH_ALEN);
+	nirc->flow_interface_num = unic->src_interface_num;
+	nirc->flow_ip = unic->src_ip;
+	nirc->flow_ip_xlate = unic->src_ip_xlate;
+	nirc->flow_ident = (uint32_t)unic->src_port;
+	nirc->flow_ident_xlate = (uint32_t)unic->src_port_xlate;
+	nirc->flow_window_scale = unic->flow_window_scale;
+	nirc->flow_max_window = unic->flow_max_window;
+	nirc->flow_end = unic->flow_end;
+	nirc->flow_max_end = unic->flow_max_end;
+	nirc->flow_mtu = unic->from_mtu;
+	memcpy(nirc->flow_mac, unic->src_mac, 6);
+
+	nirc->return_pppoe_session_id = unic->return_pppoe_session_id;
+	memcpy(nirc->return_pppoe_remote_mac, unic->return_pppoe_remote_mac, ETH_ALEN);
+	nirc->return_interface_num = unic->dest_interface_num;
+	nirc->return_ip = unic->dest_ip;
+	nirc->return_ip_xlate = unic->dest_ip_xlate;
+	nirc->return_ident = (uint32_t)unic->dest_port;
+	nirc->return_ident_xlate = (uint32_t)unic->dest_port_xlate;
+	nirc->return_window_scale = unic->return_window_scale;
+	nirc->return_max_window = unic->return_max_window;
+	nirc->return_end = unic->return_end;
+	nirc->return_max_end = unic->return_max_end;
+	nirc->return_mtu = unic->to_mtu;
+	if (nirc->return_ip != nirc->return_ip_xlate || nirc->return_ident != nirc->return_ident_xlate) {
+		memcpy(nirc->return_mac, unic->dest_mac_xlate, 6);
+	} else {
+		memcpy(nirc->return_mac, unic->dest_mac, 6);
+	}
+
+	nirc->flags = 0;
+	if (unic->flags & NSS_IPV4_CREATE_FLAG_NO_SEQ_CHECK) {
+		nirc->flags |= NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK;
+	}
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Create IPv4' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_destroy_ipv4_rule()
+ *	Destroy the given connection in the NSS
+ */
+nss_tx_status_t nss_tx_destroy_ipv4_rule(void *ctx, struct nss_ipv4_destroy *unid)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipv4_rule_destroy *nird;
+
+	nss_info("%p: Destroy IPv4: %pI4:%d, %pI4:%d, p: %d\n", nss_ctx,
+		&unid->src_ip, unid->src_port, &unid->dest_ip, unid->dest_port, unid->protocol);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Destroy IPv4' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Destroy IPv4' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPV4_RULE_DESTROY;
+
+	nird = &ntmo->sub.ipv4_rule_destroy;
+	nird->protocol = (uint8_t)unid->protocol;
+	nird->flow_ip = unid->src_ip;
+	nird->flow_ident = (uint32_t)unid->src_port;
+	nird->return_ip = unid->dest_ip;
+	nird->return_ident = (uint32_t)unid->dest_port;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Destroy IPv4' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_create_ipv6_rule()
+ *	Create a NSS entry to accelerate the given connection
+ */
+nss_tx_status_t nss_tx_create_ipv6_rule(void *ctx, struct nss_ipv6_create *unic)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipv6_rule_create *nirc;
+
+	nss_info("%p: Create IPv6: %pI6:%d, %pI6:%d, p: %d\n", nss_ctx,
+		unic->src_ip, unic->src_port, unic->dest_ip, unic->dest_port, unic->protocol);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Create IPv6' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf =  __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Create IPv6' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPV6_RULE_CREATE;
+
+	nirc = &ntmo->sub.ipv6_rule_create;
+	nirc->protocol = (uint8_t)unic->protocol;
+
+	nirc->flow_pppoe_session_id = unic->flow_pppoe_session_id;
+	memcpy(nirc->flow_pppoe_remote_mac, unic->flow_pppoe_remote_mac, ETH_ALEN);
+	nirc->flow_interface_num = unic->src_interface_num;
+	nirc->flow_ip[0] = unic->src_ip[0];
+	nirc->flow_ip[1] = unic->src_ip[1];
+	nirc->flow_ip[2] = unic->src_ip[2];
+	nirc->flow_ip[3] = unic->src_ip[3];
+	nirc->flow_ident = (uint32_t)unic->src_port;
+	nirc->flow_window_scale = unic->flow_window_scale;
+	nirc->flow_max_window = unic->flow_max_window;
+	nirc->flow_end = unic->flow_end;
+	nirc->flow_max_end = unic->flow_max_end;
+	nirc->flow_mtu = unic->from_mtu;
+	memcpy(nirc->flow_mac, unic->src_mac, 6);
+
+	nirc->return_pppoe_session_id = unic->return_pppoe_session_id;
+	memcpy(nirc->return_pppoe_remote_mac, unic->return_pppoe_remote_mac, ETH_ALEN);
+	nirc->return_interface_num = unic->dest_interface_num;
+	nirc->return_ip[0] = unic->dest_ip[0];
+	nirc->return_ip[1] = unic->dest_ip[1];
+	nirc->return_ip[2] = unic->dest_ip[2];
+	nirc->return_ip[3] = unic->dest_ip[3];
+	nirc->return_ident = (uint32_t)unic->dest_port;
+	nirc->return_window_scale = unic->return_window_scale;
+	nirc->return_max_window = unic->return_max_window;
+	nirc->return_end = unic->return_end;
+	nirc->return_max_end = unic->return_max_end;
+	nirc->return_mtu = unic->to_mtu;
+	memcpy(nirc->return_mac, unic->dest_mac, 6);
+
+	nirc->flags = 0;
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_NO_SEQ_CHECK) {
+		nirc->flags |= NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK;
+	}
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Create IPv6' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_destroy_ipv6_rule()
+ *	Destroy the given connection in the NSS
+ */
+nss_tx_status_t nss_tx_destroy_ipv6_rule(void *ctx, struct nss_ipv6_destroy *unid)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipv6_rule_destroy *nird;
+
+	nss_info("%p: Destroy IPv6: %pI6:%d, %pI6:%d, p: %d\n", nss_ctx,
+		unid->src_ip, unid->src_port, unid->dest_ip, unid->dest_port, unid->protocol);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Destroy IPv6' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf =  __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Destroy IPv6' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPV6_RULE_DESTROY;
+
+	nird = &ntmo->sub.ipv6_rule_destroy;
+	nird->protocol = (uint8_t)unid->protocol;
+	nird->flow_ip[0] = unid->src_ip[0];
+	nird->flow_ip[1] = unid->src_ip[1];
+	nird->flow_ip[2] = unid->src_ip[2];
+	nird->flow_ip[3] = unid->src_ip[3];
+	nird->flow_ident = (uint32_t)unid->src_port;
+	nird->return_ip[0] = unid->dest_ip[0];
+	nird->return_ip[1] = unid->dest_ip[1];
+	nird->return_ip[2] = unid->dest_ip[2];
+	nird->return_ip[3] = unid->dest_ip[3];
+	nird->return_ident = (uint32_t)unid->dest_port;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Destroy IPv6' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_create_l2switch_rule()
+ *	Create a NSS entry to accelerate the given connection
+ */
+nss_tx_status_t nss_tx_create_l2switch_rule(void *ctx, struct nss_l2switch_create *unlc)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_l2switch_rule_create *nlrc;
+
+	nss_info("%p: Create L2switch rule, addr=%p\n", nss_ctx, unlc->addr);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Create L2switch' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Create L2switch' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_L2SWITCH_RULE_CREATE;
+
+	nlrc = &ntmo->sub.l2switch_rule_create;
+	nlrc->addr[0] = unlc->addr[0];
+	nlrc->addr[1] = unlc->addr[1];
+	nlrc->addr[2] = unlc->addr[2];
+	nlrc->interface_num = unlc->interface_num;
+	nlrc->state = unlc->state;
+	nlrc->priority =  unlc->priority;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Create L2switch' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_destroy_l2switch_rule()
+ *	Destroy the given connection in the NSS
+ */
+nss_tx_status_t nss_tx_destroy_l2switch_rule(void *ctx, struct nss_l2switch_destroy *unld)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_l2switch_rule_destroy *nlrd;
+
+	nss_info("%p: L2switch destroy rule, addr=%p\n", nss_ctx, unld->addr);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Destroy L2switch' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Destroy L2switch' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_L2SWITCH_RULE_DESTROY;
+
+	nlrd = &ntmo->sub.l2switch_rule_destroy;
+	nlrd->mac_addr[0] = unld->addr[0];
+	nlrd->mac_addr[1] = unld->addr[1];
+	nlrd->mac_addr[2] = unld->addr[2];
+	nlrd->interface_num = unld->interface_num;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Destroy L2switch'\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_destroy_all_l2switch_rules
+ *	Destroy all L2 switch rules in NSS.
+ */
+nss_tx_status_t nss_tx_destroy_all_l2switch_rules(void *ctx)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+
+	nss_info("%p: L2switch destroy all rules", nss_ctx);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Destroy all L2switch' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Destroy all L2switch' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_DESTROY_ALL_L2SWITCH_RULES;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Destroy all L2switch' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_create_ipsec_tx_rule
+ *	Create ipsec_tx rule in NSS.
+ */
+nss_tx_status_t nss_tx_create_ipsec_tx_rule(void *ctx, struct nss_ipsec_tx_create *nitc)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipsec_tx_rule_create *nitrc;
+
+	nss_info("%p: Create IPsec Tx rule\n", nss_ctx);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Create IPsec Tx' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Create IPsec Tx' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPSEC_TX_RULE_CREATE;
+
+	nitrc = &ntmo->sub.ipsec_tx_rule_create;
+	nitrc->spi = nitc->spi;
+	nitrc->replay = nitc->replay;
+	nitrc->src_addr = nitc->src_addr;
+	nitrc->dest_addr = nitc->dest_addr;
+	nitrc->ses_idx = nitc->ses_idx;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Create IPsec Tx' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_destroy_ipsec_tx_rule
+ *	Destroy ipsec_tx rule in NSS.
+ */
+nss_tx_status_t nss_tx_destroy_ipsec_tx_rule(void *ctx, struct nss_ipsec_tx_destroy *nitd)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipsec_tx_rule_destroy *nitrd;
+
+	nss_info("%p: Destroy IPsec Tx rule\n", nss_ctx);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Destroy IPsec Tx' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Destroy IPsec Tx' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPSEC_TX_RULE_DESTROY;
+
+	nitrd = &ntmo->sub.ipsec_tx_rule_destroy;
+	nitrd->ses_idx = nitd->ses_idx;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Destroy IPsec Tx' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_create_ipsec_rx_rule
+ *	Create ipsec_rx rule in NSS.
+ */
+nss_tx_status_t nss_tx_create_ipsec_rx_rule(void *ctx, struct nss_ipsec_rx_create *nirc)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipsec_rx_rule_create *nirrc;
+
+	nss_info("%p: Create IPsec Rx rule\n", nss_ctx);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Create IPsec Rx' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Create IPsec Rx' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPSEC_RX_RULE_CREATE;
+
+	nirrc = &ntmo->sub.ipsec_rx_rule_create;
+	nirrc->spi = nirc->spi;
+	nirrc->replay = nirc->replay;
+	nirrc->src_addr = nirc->src_addr;
+	nirrc->dest_addr = nirc->dest_addr;
+	nirrc->ses_idx = nirc->ses_idx;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Create IPsec Rx' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_destroy_ipsec_rx_rule
+ *	Destroy ipsec_rx rule in NSS
+ */
+nss_tx_status_t nss_tx_destroy_ipsec_rx_rule(void *ctx, struct nss_ipsec_rx_destroy *nird)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_ipsec_rx_rule_destroy *nirrd;
+
+	nss_info("%p: Destroy IPsec Rx rule\n", nss_ctx);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Destroy IPsec Rx' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Destroy IPsec Rx' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_IPSEC_RX_RULE_DESTROY;
+
+	nirrd = &ntmo->sub.ipsec_rx_rule_destroy;
+	nirrd->ses_idx = nird->ses_idx;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Destroy IPsec Rx' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_phys_if_buf ()
+ *	Send packet to physical interface owned by NSS
+ */
+nss_tx_status_t nss_tx_phys_if_buf(void *ctx, struct sk_buff *os_buf, uint32_t if_num)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+	int32_t status;
+
+	nss_trace("%p: Phys If Tx packet, id:%d, data=%p", nss_ctx, if_num, os_buf->data);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Phys If Tx' packet dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	status = nss_core_send_buffer(nss_ctx, if_num, os_buf, NSS_IF_DATA_QUEUE, H2N_BUFFER_PACKET, 0);
+	if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) {
+		nss_warning("%p: Unable to enqueue 'Phys If Tx' packet\n", nss_ctx);
+		if (status == NSS_CORE_STATUS_FAILURE_QUEUE) {
+			return NSS_TX_FAILURE_QUEUE;
+		}
+
+		return NSS_TX_FAILURE;
+	}
+
+	/*
+	 * Kick the NSS awake so it can process our new entry.
+	 */
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].desc_ring.int_bit,
+									NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_phys_if_open()
+ *	Send open command to physical interface
+ */
+nss_tx_status_t nss_tx_phys_if_open(void *ctx, uint32_t tx_desc_ring, uint32_t rx_desc_ring, uint32_t if_num)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_if_open *nio;
+
+	nss_info("%p: Phys If Open, id:%d, TxDesc: %x, RxDesc: %x\n", nss_ctx, if_num, tx_desc_ring, rx_desc_ring);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Phys If Open' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Phys If Open' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_INTERFACE_OPEN;
+
+	nio = &ntmo->sub.if_open;
+	nio->interface_num = if_num;
+	nio->tx_desc_ring = tx_desc_ring;
+	nio->rx_desc_ring = rx_desc_ring;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Phys If Open' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_phys_if_close()
+ *	Send close command to physical interface
+ */
+nss_tx_status_t nss_tx_phys_if_close(void *ctx, uint32_t if_num)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_if_close *nic;
+
+	nss_info("%p: Phys If Close, id:%d \n", nss_ctx, if_num);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Phys If Close' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Phys If Close' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_INTERFACE_CLOSE;
+
+	nic = &ntmo->sub.if_close;
+	nic->interface_num = if_num;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_info("%p: Unable to enqueue 'Phys If Close' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_phys_if_link_state()
+ *	Send link state to physical interface
+ */
+nss_tx_status_t nss_tx_phys_if_link_state(void *ctx, uint32_t link_state, uint32_t if_num)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_if_link_state_notify *nils;
+
+	nss_info("%p: Phys If Link State, id:%d, State: %x\n", nss_ctx, if_num, link_state);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Phys If Link State' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Phys If Link State' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_INTERFACE_LINK_STATE_NOTIFY;
+
+	nils = &ntmo->sub.if_link_state_notify;
+	nils->interface_num = if_num;
+	nils->state = link_state;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Phys If Link State' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_phys_if_mac_addr()
+ *	Send a MAC address to physical interface
+ */
+nss_tx_status_t nss_tx_phys_if_mac_addr(void *ctx, uint8_t *addr, uint32_t if_num)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_mac_address_set *nmas;
+
+	nss_info("%p: Phys If MAC Address, id:%d\n", nss_ctx, if_num);
+	nss_assert(addr != NULL);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Phys If MAC Address' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Phys If MAC Address' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_MAC_ADDR_SET;
+
+	nmas = &ntmo->sub.mac_address_set;
+	nmas->interface_num = if_num;
+	memcpy(nmas->mac_addr, addr, ETH_ALEN);
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Phys If Mac Address' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_phys_if_change_mtu()
+ *	Send a MTU change command
+ */
+nss_tx_status_t nss_tx_phys_if_change_mtu(void *ctx, uint32_t mtu, uint32_t if_num)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+
+	nss_info("%p: Phys If Change MTU, id:%d, mtu=%d\n", nss_ctx, if_num, mtu);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Phys If Change MTU' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_DESTROY_ALL_L3_RULES;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Phys If Change MTU' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_crypto_if_open()
+ *	NSS crypto open API. Opens a crypto session.
+ */
+nss_tx_status_t nss_tx_crypto_if_open(void *ctx, uint8_t *buf, uint32_t len)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_crypto_open *nco;
+
+	nss_info("%p: Crypto If Open: buf: %p, len: %d\n", nss_ctx, buf, len);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Crypto If Open' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Crypto If Open' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_CRYPTO_OPEN;
+
+	nco = &ntmo->sub.crypto_open;
+	nco->len = len;
+	memcpy(nco->buf, buf, len);
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Crypto If Open' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_crypto_if_close()
+ *	NSS crypto if close API. Closes a crypto session.
+ */
+nss_tx_status_t nss_tx_crypto_if_close(void *ctx, uint32_t eng)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_crypto_close *ncc;
+
+	nss_info("%p: Crypto If Close:%d\n", nss_ctx, eng);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Crypto If Close' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Crypto If Close' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_CRYPTO_CLOSE;
+
+	ncc = &ntmo->sub.crypto_close;
+	ncc->eng = eng;
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Crypto If Close' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_crypto_if_buf()
+ *	NSS crypto Tx API. Sends a crypto buffer to NSS.
+ */
+nss_tx_status_t nss_tx_crypto_if_buf(void *ctx, void *buf, uint32_t buf_paddr, uint16_t len)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+	int32_t status;
+
+	nss_trace("%p: Crypto If Tx, buf=%p", nss_ctx, buf);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Crypto If Tx' packet dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	status = nss_core_send_crypto(nss_ctx, buf, buf_paddr, len);
+	if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) {
+		nss_warning("%p: Unable to enqueue 'Crypto If Tx' packet", nss_ctx);
+		if (status == NSS_CORE_STATUS_FAILURE_QUEUE) {
+			return NSS_TX_FAILURE_QUEUE;
+		}
+
+		return NSS_TX_FAILURE;
+	}
+
+	/*
+	 * Kick the NSS awake so it can process our new entry.
+	 */
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_DATA_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_tx_profiler_if_buf()
+ *	NSS profiler Tx API
+ */
+nss_tx_status_t nss_tx_profiler_if_buf(void *ctx, uint8_t *buf, uint32_t len)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_profiler_tx *npt;
+
+	nss_trace("%p: Profiler If Tx, buf=%p", nss_ctx, buf);
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: 'Profiler If Tx' rule dropped as core not ready", nss_ctx);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nbuf = __dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE, GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->nbuf_alloc_err++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: 'Profiler If Tx' rule dropped as command allocation failed", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_PROFILER_TX;
+
+	npt = &ntmo->sub.profiler_tx;
+	npt->len = len;
+	memcpy(npt->buf, buf, len);
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unable to enqueue 'Profiler If Tx' rule\n", nss_ctx);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_get_interface_number()
+ *	Return the interface number of the NSS net_device.
+ *
+ * Returns -1 on failure or the interface number of dev is an NSS net_device.
+ */
+int32_t nss_get_interface_number(void *ctx, void *dev)
+{
+	int i;
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: Interface number could not be found as core not ready", nss_ctx);
+		return -1;
+	}
+
+	nss_assert(dev != NULL);
+	for (i = 0; i < NSS_MAX_NET_INTERFACES; i++) {
+		if (dev == ((struct nss_ctx_instance *)nss_ctx)->nss_top->phys_if_ctx[i]) {
+			return i;
+		}
+	}
+
+	/*
+	 * TODO: Take care of virtual interfaces
+	 */
+
+	nss_warning("%p: Interface number could not be found as interface has not registered yet", nss_ctx);
+	return -1;
+}
+
+/*
+ * nss_get_state()
+ *	return the NSS initialization state
+ */
+nss_state_t nss_get_state(void *ctx)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+	nss_state_t state = NSS_STATE_UNINITIALIZED;
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	spin_lock_bh(&nss_top_main.lock);
+	if (nss_ctx->state == NSS_CORE_STATE_INITIALIZED) {
+		state = NSS_STATE_INITIALIZED;
+	}
+	spin_unlock_bh(&nss_top_main.lock);
+
+	return state;
+}
+
+/*
+ * nss_register_ipv4_mgr()
+ */
+void *nss_register_ipv4_mgr(nss_ipv4_sync_callback_t event_callback)
+{
+	nss_top_main.ipv4_sync = event_callback;
+	return (void *)&nss_top_main.nss[nss_top_main.ipv4_handler_id];
+}
+
+/*
+ * nss_unregister_ipv4_mgr()
+ */
+void nss_unregister_ipv4_mgr(void)
+{
+	nss_top_main.ipv4_sync = NULL;
+}
+
+/*
+ * nss_register_ipv6_mgr()
+ *	Called to register an IPv6 connection manager with this driver
+ */
+void *nss_register_ipv6_mgr(nss_ipv6_sync_callback_t event_callback)
+{
+	nss_top_main.ipv6_sync = event_callback;
+	return (void *)&nss_top_main.nss[nss_top_main.ipv6_handler_id];
+}
+
+/*
+ * nss_unregister_ipv6_mgr()
+ *	Called to unregister an IPv6 connection manager
+ */
+void nss_unregister_ipv6_mgr(void)
+{
+	nss_top_main.ipv6_sync = NULL;
+}
+
+/*
+ * nss_register_l2switch_mgr()
+ */
+void *nss_register_l2switch_mgr(nss_l2switch_sync_callback_t event_callback)
+{
+	nss_top_main.l2switch_sync = event_callback;
+	return (void *)&nss_top_main.nss[nss_top_main.l2switch_handler_id];
+}
+
+/*
+ * nss_unregister_l2switch_mgr()
+ */
+void nss_unregister_l2switch_mgr(void)
+{
+	nss_top_main.l2switch_sync = NULL;
+}
+
+/*
+ * nss_register_connection_expire_all()
+ */
+void nss_register_connection_expire_all(nss_connection_expire_all_callback_t event_callback)
+{
+	nss_top_main.conn_expire = event_callback;
+}
+
+/*
+ * nss_unregister_connection_expire_all()
+ */
+void nss_unregister_connection_expire_all(void)
+{
+	nss_top_main.conn_expire = NULL;
+}
+
+/*
+ * nss_register_queue_decongestion()
+ *	Register for queue decongestion event
+ */
+nss_cb_register_status_t nss_register_queue_decongestion(void *ctx, nss_queue_decongestion_callback_t event_callback, void *app_ctx)
+{
+	uint32_t i;
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	spin_lock_bh(&nss_ctx->decongest_cb_lock);
+
+	/*
+	 * Find vacant location in callback table
+	 */
+	for (i = 0; i< NSS_MAX_CLIENTS; i++) {
+		if (nss_ctx->queue_decongestion_callback[i] == NULL) {
+			nss_ctx->queue_decongestion_callback[i] = event_callback;
+			nss_ctx->queue_decongestion_ctx[i] = app_ctx;
+			spin_unlock_bh(&nss_ctx->decongest_cb_lock);
+			return NSS_CB_REGISTER_SUCCESS;
+		}
+	}
+
+	spin_unlock_bh(&nss_ctx->decongest_cb_lock);
+	return NSS_CB_REGISTER_FAILED;
+}
+
+/*
+ * nss_unregister_queue_decongestion()
+ *	Unregister for queue decongestion event
+ */
+nss_cb_unregister_status_t nss_unregister_queue_decongestion(void *ctx, nss_queue_decongestion_callback_t event_callback)
+{
+	uint32_t i;
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+	spin_lock_bh(&nss_ctx->decongest_cb_lock);
+
+	/*
+	 * Find actual location in callback table
+	 */
+	for (i = 0; i< NSS_MAX_CLIENTS; i++) {
+		if (nss_ctx->queue_decongestion_callback[i] == event_callback) {
+			nss_ctx->queue_decongestion_callback[i] = NULL;
+			nss_ctx->queue_decongestion_ctx[i] = NULL;
+			spin_unlock_bh(&nss_ctx->decongest_cb_lock);
+			return NSS_CB_UNREGISTER_SUCCESS;
+		}
+	}
+
+	spin_unlock_bh(&nss_ctx->decongest_cb_lock);
+	return NSS_CB_UNREGISTER_FAILED;
+}
+
+/*
+ * nss_register_crypto_mgr()
+ */
+void *nss_register_crypto_if(nss_crypto_callback_t crypto_callback, void *ctx)
+{
+	nss_top_main.crypto_ctx = ctx;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb writes
+	 */
+	wmb();
+	nss_top_main.crypto_callback = crypto_callback;
+
+	return (void *)&nss_top_main.nss[nss_top_main.crypto_handler_id];
+}
+
+/*
+ * nss_unregister_crypto_mgr()
+ */
+void nss_unregister_crypto_if(void)
+{
+	nss_top_main.crypto_callback = NULL;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb writes
+	 */
+	wmb();
+	nss_top_main.crypto_ctx = NULL;
+}
+
+/*
+ * nss_register_phys_if()
+ */
+void *nss_register_phys_if(uint32_t if_num,
+				nss_phys_if_rx_callback_t rx_callback,
+				nss_phys_if_event_callback_t event_callback, void *if_ctx)
+{
+	uint8_t id = nss_top_main.phys_if_handler_id[if_num];
+
+	nss_assert(if_num <= NSS_MAX_PHYSICAL_INTERFACES);
+
+	nss_top_main.phys_if_ctx[if_num] = if_ctx;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb writes
+	 */
+	wmb();
+	nss_top_main.phys_if_rx_callback[if_num] = rx_callback;
+	nss_top_main.phys_if_event_callback[if_num] = event_callback;
+
+	return (void *)&nss_top_main.nss[id];
+}
+
+/*
+ * nss_unregister_phys_if()
+ */
+void nss_unregister_phys_if(uint32_t if_num)
+{
+	nss_assert(if_num <= NSS_MAX_PHYSICAL_INTERFACES);
+
+	nss_top_main.phys_if_rx_callback[if_num] = NULL;
+	nss_top_main.phys_if_event_callback[if_num] = NULL;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb writes
+	 */
+	wmb();
+	nss_top_main.phys_if_ctx[if_num] = NULL;
+}
+
+/*
+ * nss_register_ipsec_if()
+ */
+void *nss_register_ipsec_if(nss_ipsec_callback_t crypto_callback, void *ctx)
+{
+	return (void *)&nss_top_main.nss[nss_top_main.ipsec_handler_id];
+}
+
+/*
+ * nss_unregister_ipsec_if()
+ */
+void nss_unregister_ipsec_if(void)
+{
+	/*
+	 * Place holder for now
+	 */
+}
+
+/*
+ * nss_register_profiler_if()
+ */
+void *nss_register_profiler_if(nss_profiler_callback_t profiler_callback, nss_core_id_t core_id, void *ctx)
+{
+	nss_assert(core_id < NSS_CORE_MAX);
+
+	nss_top_main.profiler_ctx[core_id] = ctx;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb writes
+	 */
+	wmb();
+	nss_top_main.profiler_callback[core_id] = profiler_callback;
+
+	return (void *)&nss_top_main.nss[core_id];
+}
+
+/*
+ * nss_unregister_profiler_if()
+ */
+void nss_unregister_profiler_if(nss_core_id_t core_id)
+{
+	nss_assert(core_id < NSS_CORE_MAX);
+
+	nss_top_main.profiler_callback[core_id] = NULL;
+
+	/**
+	 * We need to ensure that processor/compiler do not re-order ctx
+	 * and cb writes
+	 */
+	wmb();
+	nss_top_main.profiler_ctx[core_id] = NULL;
+}
+
+EXPORT_SYMBOL(nss_get_interface_number);
+EXPORT_SYMBOL(nss_get_state);
+
+EXPORT_SYMBOL(nss_register_connection_expire_all);
+EXPORT_SYMBOL(nss_unregister_connection_expire_all);
+
+EXPORT_SYMBOL(nss_register_queue_decongestion);
+EXPORT_SYMBOL(nss_unregister_queue_decongestion);
+
+EXPORT_SYMBOL(nss_register_ipv4_mgr);
+EXPORT_SYMBOL(nss_unregister_ipv4_mgr);
+EXPORT_SYMBOL(nss_tx_create_ipv4_rule);
+EXPORT_SYMBOL(nss_tx_destroy_ipv4_rule);
+
+EXPORT_SYMBOL(nss_register_ipv6_mgr);
+EXPORT_SYMBOL(nss_unregister_ipv6_mgr);
+EXPORT_SYMBOL(nss_tx_create_ipv6_rule);
+EXPORT_SYMBOL(nss_tx_destroy_ipv6_rule);
+
+EXPORT_SYMBOL(nss_register_l2switch_mgr);
+EXPORT_SYMBOL(nss_unregister_l2switch_mgr);
+EXPORT_SYMBOL(nss_tx_create_l2switch_rule);
+EXPORT_SYMBOL(nss_tx_destroy_l2switch_rule);
+EXPORT_SYMBOL(nss_tx_destroy_all_l2switch_rules);
+
+EXPORT_SYMBOL(nss_register_crypto_if);
+EXPORT_SYMBOL(nss_unregister_crypto_if);
+EXPORT_SYMBOL(nss_tx_crypto_if_buf);
+EXPORT_SYMBOL(nss_tx_crypto_if_open);
+EXPORT_SYMBOL(nss_tx_crypto_if_close);
+
+EXPORT_SYMBOL(nss_register_phys_if);
+EXPORT_SYMBOL(nss_unregister_phys_if);
+EXPORT_SYMBOL(nss_tx_phys_if_buf);
+EXPORT_SYMBOL(nss_tx_phys_if_open);
+EXPORT_SYMBOL(nss_tx_phys_if_close);
+EXPORT_SYMBOL(nss_tx_phys_if_link_state);
+EXPORT_SYMBOL(nss_tx_phys_if_change_mtu);
+EXPORT_SYMBOL(nss_tx_phys_if_mac_addr);
+
+EXPORT_SYMBOL(nss_register_ipsec_if);
+EXPORT_SYMBOL(nss_unregister_ipsec_if);
+EXPORT_SYMBOL(nss_tx_create_ipsec_tx_rule);
+EXPORT_SYMBOL(nss_tx_destroy_ipsec_tx_rule);
+EXPORT_SYMBOL(nss_tx_create_ipsec_rx_rule);
+EXPORT_SYMBOL(nss_tx_destroy_ipsec_rx_rule);
+
+EXPORT_SYMBOL(nss_register_profiler_if);
+EXPORT_SYMBOL(nss_unregister_profiler_if);
+EXPORT_SYMBOL(nss_tx_profiler_if_buf);