Add QoS support to the NSS driver.

- Authors of this work are:
	Gareth Williams <garethw@codeaurora.org>
	Sakthi Vignesh Radhakrishnan <sradhakr@codeaurora.org>

CRs-Fixed: 609824
Change-Id: Iba941d8a5b5255da6dd3f4283110ee7b3d24bd07
Signed-off-by: Murat Sezgin <msezgin@codeaurora.org>
diff --git a/Makefile b/Makefile
index 10477cf..b13e9af 100755
--- a/Makefile
+++ b/Makefile
@@ -27,15 +27,18 @@
 qca-nss-tun6rd-objs := nss_tun6rd.o
 ccflags-y += -DNSS_TUN6RD_DEBUG_LEVEL=0
 endif
+obj-m += qca-nss-qdisc.o
 
 qca-nss-connmgr-ipv4-objs := nss_connmgr_ipv4.o
 qca-nss-connmgr-ipv6-objs := nss_connmgr_ipv6.o
 qca-nss-tunipip6-objs := nss_tunipip6.o
+qca-nss-qdisc-objs := nss_qdisc.o
 
 ccflags-y += -I$(obj)/nss_hal/include -DNSS_DEBUG_LEVEL=0 -DNSS_EMPTY_BUFFER_SIZE=1792 -DNSS_PKT_STATS_ENABLED=0
 ccflags-y += -DNSS_CONNMGR_DEBUG_LEVEL=0 -DNSS_CONNMGR_PPPOE_SUPPORT=0
 ccflags-y += -DNSS_TUNIPIP6_DEBUG_LEVEL=0
 ccflags-y += -DNSS_PM_DEBUG_LEVEL=0
+ccflags-y += -I$(TOPDIR)/qca/src/linux/net/bridge -DNSSQDISC_DEBUG_LEVEL=0
 
 obj ?= .
 
diff --git a/nss_api_if.h b/nss_api_if.h
index 0a0618c..9eadd19 100755
--- a/nss_api_if.h
+++ b/nss_api_if.h
@@ -224,6 +224,7 @@
 	uint32_t param_a2;		/**< Custom extra parameter 2 */
 	uint32_t param_a3;		/**< Custom extra parameter 3 */
 	uint32_t param_a4;		/**< Custom extra parameter 4 */
+	uint32_t qos_tag;		/**< QoS tag value */
 };
 
 /**
@@ -236,6 +237,9 @@
 /** Indicates that this is a pure bridge flow (no routing involved) */
 #define NSS_IPV4_CREATE_FLAG_BRIDGE_FLOW 0x02
 
+/** Rule is for a routed connection. */
+#define NSS_IPV4_CREATE_FLAG_ROUTED 0x04
+
 /**
  * Structure to be used while sending an IPv4 flow/connection destroy rule.
  */
@@ -297,6 +301,7 @@
 	uint16_t return_pppoe_session_id;		/**< PPPoE session associated with return */
 	uint8_t return_pppoe_remote_mac[ETH_ALEN];	/**< Remote PPPoE peer MAC address for return */
 	uint16_t egress_vlan_tag;	/**< Egress VLAN tag expected for this flow */
+	uint32_t qos_tag;		/**< QoS tag value */
 };
 
 /**
@@ -306,6 +311,8 @@
 					/**< Indicates that we should not check sequence numbers */
 #define NSS_IPV6_CREATE_FLAG_BRIDGE_FLOW 0x02
 					/**< Indicates that this is a pure bridge flow (no routing involved) */
+#define NSS_IPV6_CREATE_FLAG_ROUTED 0x04
+					/**< Rule is for a routed connection. */
 
 /**
  * Structure to be used while sending an IPv6 flow/connection destroy rule.
@@ -350,6 +357,9 @@
 	uint32_t param_a2;		/**< Custom extra parameter 2 */
 	uint32_t param_a3;		/**< Custom extra parameter 3 */
 	uint32_t param_a4;		/**< Custom extra parameter 4 */
+
+	uint8_t flags;			/**< Flags */
+	uint32_t qos_tag;		/**< Qos Tag */
 };
 
 /**
@@ -386,6 +396,8 @@
 	uint16_t return_pppoe_remote_mac[3];
 					/**< Return direction's PPPoE Server MAC address */
 	uint16_t egress_vlan_tag;	/**< Egress VLAN tag */
+	uint8_t flags;			/**< Flags */
+	uint32_t qos_tag;		/**< Qos Tag */
 };
 
 /**
@@ -439,6 +451,9 @@
 					/**< Time in Linux jiffies to be added to the current timeout to keep the connection alive */
 	uint8_t final_sync;		/**< Non-zero when the NA has ceased to accelerate the given connection */
 	uint8_t evicted;		/**< Non-zero if connection evicted */
+
+	uint8_t flags;			/**< Flags */
+	uint32_t qos_tag;		/**< Qos Tag */
 };
 
 /**
@@ -468,6 +483,8 @@
 	uint16_t return_pppoe_remote_mac[3];
 					/**< Return direction's PPPoE Server MAC address */
 	uint16_t egress_vlan_tag;	/**< Egress VLAN tag */
+	uint8_t flags;			/**< Flags */
+	uint32_t qos_tag;		/**< Qos Tag */
 };
 
 /**
@@ -629,6 +646,378 @@
 } nss_tunipip6_event_t;
 
 /**
+ * NSS Shaping
+ */
+
+/*
+ * struct nss_shaper_config_assign_shaper
+ */
+struct nss_shaper_config_assign_shaper {
+	uint32_t shaper_num;		/* Number of the shaper to assign an existing one, or 0 if any new one will do.*/
+};
+
+/*
+ * struct nss_shaper_config_unassign_shaper
+ */
+struct nss_shaper_config_unassign_shaper {
+	uint32_t shaper_num;		/* Number of the shaper to unassign */
+};
+
+/*
+ * enum nss_shaper_node_types
+ *	Types of shaper node we export to the HLOS 
+ */
+enum nss_shaper_node_types {
+	NSS_SHAPER_NODE_TYPE_CODEL = 1,
+	NSS_SHAPER_NODE_TYPE_PRIO = 3,
+	NSS_SHAPER_NODE_TYPE_FIFO = 4,
+	NSS_SHAPER_NODE_TYPE_TBL = 5,
+};
+typedef enum nss_shaper_node_types nss_shaper_node_type_t;
+
+/*
+ * struct nss_shaper_config_alloc_shaper_node
+ */
+struct nss_shaper_config_alloc_shaper_node {
+	nss_shaper_node_type_t node_type;
+					/* Type of shaper node */
+	uint32_t qos_tag;		/* The qos tag to give the new node */
+};
+
+/*
+ * struct nss_shaper_config_free_shaper_node
+ */
+struct nss_shaper_config_free_shaper_node {
+	uint32_t qos_tag;		/* The qos tag of the node to free */
+};
+
+/*
+ * struct nss_shaper_config_set_root_node
+ */
+struct nss_shaper_config_set_root_node {
+	uint32_t qos_tag;		/* The qos tag of the node that becomes root */
+};
+
+/*
+ * struct nss_shaper_config_set_default_node
+ */
+struct nss_shaper_config_set_default_node {
+	uint32_t qos_tag;		/* The qos tag of the node that becomes default */
+};
+
+/*
+ * struct nss_shaper_shaper_node_basic_stats_get
+ *	Obtain basic stats for a shaper node
+ */
+struct nss_shaper_shaper_node_basic_stats_get {
+	uint32_t qos_tag;		/* The qos tag of the node from which to obtain basic stats */
+};
+
+/*
+ * struct nss_shaper_config_prio_attach
+ */
+struct nss_shaper_config_prio_attach {
+	uint32_t child_qos_tag;		/* Qos tag of shaper node to add as child */
+	uint32_t priority;		/* Priority of the child */
+};
+
+/*
+ * struct nss_shaper_config_prio_detach
+ */
+struct nss_shaper_config_prio_detach {
+	uint32_t priority;		/* Priority of the child to detach */
+};
+
+/*
+ * struct nss_shaper_config_codel_alg_param
+ */
+struct nss_shaper_config_codel_alg_param {
+	uint16_t interval;		/* Buffer time to smoothen state transition */
+	uint16_t target;		/* Acceptable delay associated with a queue */
+	uint16_t mtu;			/* MTU for the associated interface */
+};
+
+/*
+ * struct nss_shaper_config_codel_param
+ */
+struct nss_shaper_config_codel_param {
+	int32_t qlen_max;					/* Max no. of packets that can be enqueued */
+	struct nss_shaper_config_codel_alg_param cap;		/* Config structure for codel algorithm */
+};
+
+/*
+ * struct nss_shaper_config_limiter_alg_param
+ */
+struct nss_shaper_config_limiter_alg_param {
+	uint32_t rate;		/* Allowed Traffic rate measured in bytes per second */
+	uint32_t burst;		/* Max bytes that can be sent before the next token update */
+	uint32_t max_size;	/* The maximum size of packets (in bytes) supported */
+	bool short_circuit;	/* When set, limiter will stop limiting the sending rate */
+};
+
+/*
+ * struct nss_shaper_configure_tbl_attach
+ */
+struct nss_shaper_config_tbl_attach {
+	uint32_t child_qos_tag;	/* Qos tag of shaper node to add as child */
+};
+
+/*
+ * struct nss_shaper_configure_tbl_param
+ */
+struct nss_shaper_config_tbl_param {
+	uint32_t qlen_bytes;						/* Max size of queue in bytes */
+	struct nss_shaper_config_limiter_alg_param lap_cir;		/* Config committed information rate */
+	struct nss_shaper_config_limiter_alg_param lap_pir;		/* Config committed information rate */
+};
+
+/*
+ * struct nss_shaper_config_bf_attach
+ */
+struct nss_shaper_config_bf_attach {
+	uint32_t child_qos_tag;		/* Qos tag of the shaper node to add as child */
+};
+
+/*
+ * struct nss_shaper_config_bf_detach
+ */
+struct nss_shaper_config_bf_detach {
+	uint32_t child_qos_tag;		/* Qos tag of the shaper node to add as child */
+};
+
+/*
+ * struct nss_shaper_config_bf_group_attach
+ */
+struct nss_shaper_config_bf_group_attach {
+	uint32_t child_qos_tag;		/* Qos tag of shaper node to add as child */
+};
+
+/*
+ * struct nss_shaper_config_bf_group_param
+ */
+struct nss_shaper_config_bf_group_param {
+	uint32_t qlen_bytes;					/* Maximum size of queue in bytes */
+	uint32_t quantum;					/* Smallest increment value for the DRRs */
+	struct nss_shaper_config_limiter_alg_param lap;		/* Config structure for codel algorithm */
+};
+
+/*
+ * struct nss_shaper_config_fifo_limit_set
+ */
+enum nss_shaper_config_fifo_drop_modes {
+	NSS_SHAPER_FIFO_DROP_MODE_HEAD = 0,
+	NSS_SHAPER_FIFO_DROP_MODE_TAIL,
+	NSS_SHAPER_FIFO_DROP_MODES,			/* Must be last */
+};
+typedef enum nss_shaper_config_fifo_drop_modes nss_shaper_config_fifo_drop_mode_t;
+
+/*
+ * struct pnode_h2c_shaper_config_fifo_param
+ */
+struct nss_shaper_config_fifo_param {
+	uint32_t limit;					/* Queue limit in packets */
+	nss_shaper_config_fifo_drop_mode_t drop_mode;	/* FIFO drop mode when queue is full */
+};
+
+/*
+ * struct nss_shaper_node_config
+ *	Configurartion messages for shaper nodes, which one depends on the type of configuration message
+ *
+ * This structure contains all of the different node configuration messages that can be sent, though not to all shaper node types.
+ */
+struct nss_shaper_node_config {
+	uint32_t qos_tag;		/* Identifier of the shaper node to which the config is targetted */
+
+	union {
+		struct nss_shaper_config_prio_attach prio_attach;
+		struct nss_shaper_config_prio_detach prio_detach;
+		struct nss_shaper_config_codel_param codel_param;
+		struct nss_shaper_config_tbl_attach tbl_attach;
+		struct nss_shaper_config_tbl_param tbl_param;
+		struct nss_shaper_config_bf_attach bf_attach;
+		struct nss_shaper_config_bf_detach bf_detach;
+		struct nss_shaper_config_bf_group_attach bf_group_attach;
+		struct nss_shaper_config_bf_group_param bf_group_param;
+		struct nss_shaper_config_fifo_param fifo_param;
+	} snc;
+};
+
+/*
+ * enum nss_shaper_config_types
+ *	Types of shaper configuration messages
+ */
+enum nss_shaper_config_types {
+	NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER,		/* Assign a shaper to an interface (B or I) */
+	NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE,	/* Allocate a type of shaper node and give it a qos tag */
+	NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE,	/* Free a shaper node */
+	NSS_SHAPER_CONFIG_TYPE_PRIO_ATTACH,		/* Configure prio to attach a node with a given priority */
+	NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH,		/* Configure prio to detach a node at a given priority */
+	NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT,		/* Configure shaper to have a default node */
+	NSS_SHAPER_CONFIG_TYPE_SET_ROOT,		/* Configure shaper to have a root node */
+	NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER,		/* Unassign a shaper from an interface (B or I) */
+	NSS_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM,	/* Configure codel parameters */
+	NSS_SHAPER_CONFIG_TYPE_TBL_ATTACH,		/* Configure tbl to attach a child node */
+	NSS_SHAPER_CONFIG_TYPE_TBL_DETACH,		/* Configure tbl to detach its child node */
+	NSS_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM,	/* Configure tbl to tune its parameters */
+	NSS_SHAPER_CONFIG_TYPE_BF_ATTACH,		/* Configure bf to attach a node to its round robin list */
+	NSS_SHAPER_CONFIG_TYPE_BF_DETACH,		/* Configure bf to detach a node with a particular QoS tag */
+	NSS_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH,	/* Configure bf group to attach a node as child */
+	NSS_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH,	/* Configure bf group to detach its child */
+	NSS_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM,	/* Configure bf group to tune its parameters */
+	NSS_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM,		/* Configure fifo queue limit */
+	NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET,
+							/* Get shaper node basic stats */
+};
+
+typedef enum nss_shaper_config_types nss_shaper_config_type_t;
+
+struct nss_shaper_response;				/* Forward declaration */
+
+typedef void (*nss_shaper_config_response_callback_t)(void *arg, struct nss_shaper_response *response);
+							/* Function that is invoked when a shaper configuration command has been processed */
+
+/*
+ * struct nss_shaper_configure
+ *	Shaper configuration messages
+ */
+struct nss_shaper_configure {
+	uint32_t interface_num;		/* Interface (pnode) number for which the shaper config message is targetted */
+	bool i_shaper;			/* true when I shaper, false when B shaper */
+	nss_shaper_config_type_t type;
+					/* Type of configuration message mt */
+	nss_shaper_config_response_callback_t cb;
+					/* Invoked when command has been processed by the NSS */
+	void *app_data;			/* Context argument returned to cb */
+	struct module *owner;		/* Module that is sending the configuration command */
+
+	union {
+		struct nss_shaper_config_assign_shaper assign_shaper;
+		struct nss_shaper_config_unassign_shaper unassign_shaper;
+		struct nss_shaper_config_alloc_shaper_node alloc_shaper_node;
+		struct nss_shaper_config_free_shaper_node free_shaper_node;
+		struct nss_shaper_config_set_default_node set_default_node;
+		struct nss_shaper_config_set_root_node set_root_node;
+		struct nss_shaper_node_config shaper_node_config;
+		struct nss_shaper_shaper_node_basic_stats_get shaper_node_basic_stats_get;
+	} mt;
+};
+
+/*
+ * enum nss_shaper_response_types
+ *	Types of shaper configuration response messages
+ */
+enum nss_shaper_response_types {
+	/*
+	 * Failure messages are < 0
+	 */
+	NSS_SHAPER_RESPONSE_TYPE_NO_SHAPERS = -65536,			/* No shaper available for a shaper assign command to succeed */
+	NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER,				/* No shaper to which to issue a shaper or node configuration message */
+	NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODE,			/* No shaper node to which to issue a configuration message */
+	NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODES,			/* No available shaper nodes available of the type requested */
+	NSS_SHAPER_RESPONSE_TYPE_OLD,					/* Request is old / environment changed by the time the request was processed */
+	NSS_SHAPER_RESPONSE_TYPE_UNRECOGNISED,				/* Request is not recognised by the recipient */
+	NSS_SHAPER_RESPONSE_TYPE_FIFO_QUEUE_LIMIT_INVALID,		/* Fifo queue Limit is bad */
+	NSS_SHAPER_RESPONSE_TYPE_FIFO_DROP_MODE_INVALID,		/* Fifo Drop mode is bad */
+	NSS_SHAPER_RESPONSE_TYPE_BAD_DEFAULT_CHOICE,			/* Node selected has no queue to enqueue to */
+	NSS_SHAPER_RESPONSE_TYPE_DUPLICATE_QOS_TAG,			/* Duplicate QoS Tag as another node */
+        NSS_SHAPER_RESPONSE_TYPE_TBL_CIR_RATE_AND_BURST_REQUIRED,	/* CIR rate and burst are mandatory */
+	NSS_SHAPER_RESPONSE_TYPE_TBL_CIR_BURST_LESS_THAN_MTU,		/* CIR burst size is smaller than MTU */
+	NSS_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_LESS_THAN_MTU,		/* PIR burst size is smaller than MTU */
+	NSS_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_REQUIRED,		/* PIR burst size must be provided if peakrate
+									 * limiting is required.
+									 */
+	NSS_SHAPER_RESPONSE_TYPE_CODEL_ALL_PARAMS_REQUIRED,		/* Codel requires non-zero value for target,
+									 * interval and limit.
+									 */
+	/*
+	 * Success messages are >= 0
+	 */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_ASSIGN_SUCCESS = 0,	/* Successfully assigned a shaper */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_NODE_ALLOC_SUCCESS,	/* Alloc shaper node request successful */
+	NSS_SHAPER_RESPONSE_TYPE_PRIO_ATTACH_SUCCESS,		/* Prio attach success */
+	NSS_SHAPER_RESPONSE_TYPE_PRIO_DETACH_SUCCESS,		/* Prio detach success */
+	NSS_SHAPER_RESPONSE_TYPE_CODEL_CHANGE_PARAM_SUCCESS,	/* Codel parameter configuration success */
+	NSS_SHAPER_RESPONSE_TYPE_TBL_ATTACH_SUCCESS,		/* Tbl attach success */
+	NSS_SHAPER_RESPONSE_TYPE_TBL_DETACH_SUCCESS,		/* Tbl detach success */
+	NSS_SHAPER_RESPONSE_TYPE_TBL_CHANGE_PARAM_SUCCESS,	/* Tbl parameter configuration success */
+	NSS_SHAPER_RESPONSE_TYPE_BF_ATTACH_SUCCESS,	/* Bigfoot attach success */
+	NSS_SHAPER_RESPONSE_TYPE_BF_DETACH_SUCCESS,	/* Bigfoot detach success */
+	NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_ATTACH_SUCCESS,	/* Bigfoot group attach success */
+	NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_DETACH_SUCCESS,	/* Bigfoot group detach success */
+	NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_CHANGE_PARAM_SUCCESS,
+								/* Bigfoot group parameter configuration success */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_SET_ROOT_SUCCESS,	/* Setting of root successful */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_SET_DEFAULT_SUCCESS,	/* Setting of default successful */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_NODE_FREE_SUCCESS,	/* Free shaper node request successful */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_UNASSIGN_SUCCESS,	/* Successfully unassigned a shaper */
+	NSS_SHAPER_RESPONSE_TYPE_FIFO_CHANGE_PARAM_SUCCESS,	/* Fifo limit set success */
+	NSS_SHAPER_RESPONSE_TYPE_SHAPER_NODE_BASIC_STATS_GET_SUCCESS,	/* Success response for a shaper node basic stats get request */
+};
+typedef enum nss_shaper_response_types nss_shaper_response_type_t;
+
+/*
+ * struct nss_shaper_response_shaper_assign_success
+ *	Shaper successfully assigned
+ */
+struct nss_shaper_response_shaper_assign_success {
+	uint32_t shaper_num;		/* Number of the shaper assigned */
+};
+
+/*
+ * struct nss_shaper_node_basic_statistics_delta
+ *	Statistics that are sent as deltas
+ */
+struct nss_shaper_node_basic_statistics_delta {
+	uint32_t enqueued_bytes;			/* Bytes enqueued successfully */
+	uint32_t enqueued_packets;			/* Packets enqueued successfully */
+	uint32_t enqueued_bytes_dropped;		/* Bytes dropped during an enqueue operation due to node limits */
+	uint32_t enqueued_packets_dropped;		/* Packets dropped during an enqueue operation due to node limits */
+	uint32_t dequeued_bytes;			/* Bytes dequeued successfully from a shaper node */
+	uint32_t dequeued_packets;			/* Packets dequeued successfully from a shaper node */
+	uint32_t dequeued_bytes_dropped;		/* Bytes dropped by this node during dequeue (some nodes drop packets during dequeue rather than enqueue) */
+	uint32_t dequeued_packets_dropped;		/* Packets dropped by this node during dequeue (some nodes drop packets during dequeue rather than enqueue) */
+	uint32_t queue_overrun;				/* Number of times any queue limit has been overrun / perhaps leading to a drop of packet(s) */
+};
+
+/*
+ * struct nss_shaper_response_shaper_node_basic_stats_get_success
+ *	Response to a request for basic stats of a shaper node
+ */
+struct nss_shaper_response_shaper_node_basic_stats_get_success {
+	uint32_t qlen_bytes;				/* Total size of packets waiting in queue */
+	uint32_t qlen_packets;				/* Number of packets waiting in queue */
+	uint32_t packet_latency_peak_msec_dequeued;	/* Maximum milliseconds a packet was in this shaper node before being dequeued */
+	uint32_t packet_latency_minimum_msec_dequeued;	/* Minimum milliseconds a packet was in this shaper node before being dequeued */
+	uint32_t packet_latency_peak_msec_dropped;	/* Maximum milliseconds a packet was in this shaper node before being dropped */
+	uint32_t packet_latency_minimum_msec_dropped;	/* Minimum milliseconds a packet was in this shaper node before being dropped */
+	struct nss_shaper_node_basic_statistics_delta delta;
+							/* Stastics that are sent as deltas */
+};
+
+/*
+ * union nss_shaper_responses
+ *	Types of response message
+ */
+union nss_shaper_responses {
+	struct nss_shaper_response_shaper_assign_success shaper_assign_success;
+	struct nss_shaper_response_shaper_node_basic_stats_get_success shaper_node_basic_stats_get_success;
+};
+
+/*
+ * struct nss_shaper_response
+ *	Shaper configuration response messages
+ */
+struct nss_shaper_response {
+	struct nss_shaper_configure request;
+					/* Original request to which this response relates */
+	nss_shaper_response_type_t type;
+					/* The response type (rt) being issued to the request */
+	union nss_shaper_responses rt;
+};
+
+typedef void (*nss_shaper_bounced_callback_t)(void *app_data, struct sk_buff *skb);	/* Registrant callback to receive shaper bounced packets */
+
+/**
  * General utilities
  */
 
@@ -643,6 +1032,16 @@
 extern int32_t nss_get_interface_number(void *nss_ctx, void *dev);
 
 /**
+ * @brief Determine if the interface number is a represented as a virtual interface in the NSS
+ *
+ * @param nss_ctx NSS context
+ * @param int32_t The NSS interface number
+ *
+ * @return bool true if it is a virtual.
+ */
+extern bool nss_interface_is_virtual(void *nss_ctx, int32_t interface_num);
+
+/**
  * @brief Obtain interface device pointer
  *
  * @param nss_ctx NSS context
@@ -984,6 +1383,15 @@
 extern void *nss_create_virt_if(struct net_device *if_ctx);
 
 /**
+ * @brief Obtain NSS Interface number for a virtual interface context
+ *
+ * @param context Interface context
+ *
+ * @return int32_t The NSS interface number
+ */
+int32_t nss_virt_if_get_interface_num(void *if_ctx);
+
+/**
  * @brief Destroy virtual interface (VAPs)
  *
  * @param ctx Context provided by NSS driver during registration
@@ -1217,5 +1625,77 @@
  */
 extern nss_pm_interface_status_t nss_pm_set_perf_level(void *handle, nss_pm_perf_level_t lvl);
 
+/**
+ * @brief Register for basic shaping operations
+ *
+ * @return void* NSS context
+ */
+extern void *nss_register_shaping(void);
+
+/**
+ * @brief Unregister for basic shaping operations
+ * @param ctx NSS context
+ */
+extern void nss_unregister_shaping(void *ctx);
+
+/**
+ * @brief Register to received shaper bounced packets for (interface bounce)
+ * @param if_num Interface to be registered on
+ * @param cb Callback invoked when the NSS returns a sk_buff after shaping
+ * @param app_data Given to the callback along with the sk_buff to provide context to the registrant (state)
+ * @param owner Pass THIS_MODULE for this parameter - your module is held until you unregister
+ * @return void * NSS context or NULL on failure
+ */
+extern void *nss_register_shaper_bounce_interface(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner);
+
+/**
+ * @brief Unregister for interface shaper bouncing
+ * @param if_num Interface to be unregistered
+ */
+extern void nss_unregister_shaper_bounce_interface(uint32_t if_num);
+
+/**
+ * @brief Register to received shaper bounced packets for (bridge bounce)
+ * @param if_num Interface to be registered on
+ * @param cb Callback invoked when the NSS returns a sk_buff after shaping
+ * @param app_data Given to the callback along with the sk_buff to provide context to the registrant (state)
+ * @param owner Pass THIS_MODULE for this parameter - your module is held until you unregister
+ * @return void * NSS context or NULL on failure
+ */
+extern void *nss_register_shaper_bounce_bridge(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner);
+
+/**
+ * @brief Unregister for bridge shaper bouncing
+ * @param if_num Interface to be unregistered
+ */
+extern void nss_unregister_shaper_bounce_bridge(uint32_t if_num);
+
+/**
+ * @brief Issue a packet for shaping via a bounce operation
+ * @param ctx NSS context you were given when you registered for shaper bouncing
+ * @param if_num Interface to be bounced to
+ * @param skb The packet
+ * @return nss_tx_status_t Succes or failure to issue packet to NSS
+ */
+extern nss_tx_status_t nss_shaper_bounce_interface_packet(void *ctx, uint32_t if_num, struct sk_buff *skb);
+
+/**
+ * @brief Issue a packet for shaping via a bounce operation
+ * @param ctx NSS context you were given when you registered for shaper bouncing
+ * @param if_num Interface to be bounced to
+ * @param skb The packet
+ * @return nss_tx_status_t Succes or failure to issue packet to NSS
+ */
+extern nss_tx_status_t nss_shaper_bounce_bridge_packet(void *ctx, uint32_t if_num, struct sk_buff *skb);
+
+/**
+ * @brief Send a shaping configuration message
+ * @param ctx NSS context
+ * @param config The config message
+ * 
+ * @return nss_tx_status_t Indication if the configuration message was issued.  This does not mean that the configuration message was successfully processed, that will be determined by the response issued to your given callback function as specified in the config structure.
+ */
+nss_tx_status_t nss_shaper_config_send(void *ctx, struct nss_shaper_configure *config);
+
 /**@}*/
 #endif /** __NSS_API_IF_H */
diff --git a/nss_connmgr_ipv4.c b/nss_connmgr_ipv4.c
index 5418ea9..cabeef5 100755
--- a/nss_connmgr_ipv4.c
+++ b/nss_connmgr_ipv4.c
@@ -304,6 +304,7 @@
 	struct dentry *dent;		/* Debugfs directory */
 	uint32_t debug_stats[NSS_CONNMGR_IPV4_DEBUG_STATS_MAX];
 					/* Debug statistics */
+	uint32_t need_mark;		/* When 0 needing to see a mark value is disabled.  When != 0 we only process packets that have the given skb->mark value */
 };
 
 static unsigned int nss_connmgr_ipv4_post_routing_hook(unsigned int hooknum,
@@ -335,6 +336,7 @@
 		.conntrack_notifier = {
 			.fcn = nss_connmgr_ipv4_conntrack_event,
 		},
+		.need_mark = 0,
 };
 
 /*
@@ -553,6 +555,15 @@
 	bool is_return_pppoe;
 
 	/*
+	 * If the 'need_mark' flag is set and this packet does not have the relevant mark
+	 * then we don't accelerate at all
+	 */
+	if (nss_connmgr_ipv4.need_mark && (nss_connmgr_ipv4.need_mark != skb->mark)) {
+		NSS_CONNMGR_DEBUG_TRACE("Mark %x not seen, ignoring: %p\n", nss_connmgr_ipv4.need_mark, skb);
+		return NF_ACCEPT;
+	}
+
+	/*
 	 * Only process IPV4 packets in bridge hook
 	 */
 	if(skb->protocol != htons(ETH_P_IP)){
@@ -715,6 +726,22 @@
 
 	unic.flags = 0;
 
+	/*
+	 * Store the skb->priority as the qos tag
+	 */
+	unic.qos_tag = (uint32_t)skb->priority;
+
+	/*
+	 * Only set the routed flag if the interface from which this packet came
+	 * was NOT a bridge interface OR if it is then it is not of the same bridge we are outputting onto.
+	 */
+	if (!is_bridge_port(in) || (out->master != in->master)) {
+		unic.flags |= NSS_IPV4_CREATE_FLAG_ROUTED;
+	}
+
+	/*
+	 * Reset the pppoe session info
+	 */
 	unic.return_pppoe_session_id = 0;
 	unic.flow_pppoe_session_id = 0;
 
@@ -1085,7 +1112,7 @@
 	/*
 	 * We have everything we need (hopefully :-])
 	 */
-	NSS_CONNMGR_DEBUG_TRACE("\n%p: Conntrack connection\n"
+	NSS_CONNMGR_DEBUG_TRACE("\n%p: Bridge Conntrack connection\n"
 			"skb: %p\n"
 			"dir: %s\n"
 			"Protocol: %d\n"
@@ -1101,8 +1128,9 @@
 			"dest_dev: %s\n"
 			"src_iface_num: %u\n"
 			"dest_iface_num: %u\n"
-			"ingress_vlan_tag: %u"
-			"egress_vlan_tag: %u",
+			"ingress_vlan_tag: %u\n"
+			"egress_vlan_tag: %u\n"
+			"qos_tag: %u\n",
 			ct,
 			skb,
 			(ctinfo < IP_CT_IS_REPLY)? "Original" : "Reply",
@@ -1120,7 +1148,8 @@
 			unic.src_interface_num,
 			unic.dest_interface_num,
 			unic.ingress_vlan_tag,
-			unic.egress_vlan_tag);
+			unic.egress_vlan_tag,
+			unic.qos_tag);
 
 	/*
 	 * Create the Network Accelerator connection cache entries
@@ -1449,6 +1478,14 @@
 	struct net_device *dest_slave = NULL;
 	struct net_device *src_slave = NULL;
 
+	/*
+	 * If the 'need_mark' flag is set and this packet does not have the relevant mark
+	 * then we don't accelerate at all
+	 */
+	if (nss_connmgr_ipv4.need_mark && (nss_connmgr_ipv4.need_mark != skb->mark)) {
+		NSS_CONNMGR_DEBUG_TRACE("Mark %x not seen, ignoring: %p\n", nss_connmgr_ipv4.need_mark, skb);
+		return NF_ACCEPT;
+	}
 
 	/*
 	 * Don't process broadcast or multicast
@@ -1631,6 +1668,16 @@
 	unic.flags = 0;
 
 	/*
+	 * Store the skb->priority as the qos tag
+	 */
+	unic.qos_tag = (uint32_t)skb->priority;
+
+	/*
+	 * Always a routed path
+	 */
+	unic.flags |= NSS_IPV4_CREATE_FLAG_ROUTED;
+
+	/*
 	 * Set the PPPoE values to the defaults, just in case there is not any PPPoE connection.
 	 */
 	unic.return_pppoe_session_id = 0;
@@ -2040,7 +2087,8 @@
 			"ingress_vlan_tag: %u\n"
 			"egress_vlan_tag: %u\n"
 			"flow_pppoe_session_id: %u\n"
-			"return_pppoe_session_id: %u\n",
+			"return_pppoe_session_id: %u\n"
+			"qos_tag: %u\n", 
 			ct,
 			skb,
 			(ctinfo < IP_CT_IS_REPLY)? "Original" : "Reply",
@@ -2060,7 +2108,8 @@
 			unic.ingress_vlan_tag,
 			unic.egress_vlan_tag,
 			unic.flow_pppoe_session_id,
-			unic.return_pppoe_session_id);
+			unic.return_pppoe_session_id,
+			unic.qos_tag);
 
 	/*
 	 * If operations have stopped then do not proceed further
@@ -2993,12 +3042,69 @@
 }
 
 /*
+ * nss_connmgr_ipv4_get_need_mark()
+ * 	Get the value of "need_mark" operational control variable
+ */
+static ssize_t nss_connmgr_ipv4_get_need_mark(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	ssize_t count;
+	uint32_t num;
+
+	/*
+	 * Operate under our locks
+	 */
+	spin_lock_bh(&nss_connmgr_ipv4.lock);
+	num = nss_connmgr_ipv4.need_mark;
+	spin_unlock_bh(&nss_connmgr_ipv4.lock);
+
+	count = snprintf(buf, (ssize_t)PAGE_SIZE, "%x\n", num);
+	return count;
+}
+
+/*
+ * nss_connmgr_ipv4_set_need_mark()
+ * 	Set the value of "need_mark" operational control variable.
+ */
+static ssize_t nss_connmgr_ipv4_set_need_mark(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	char num_buf[12];
+	uint32_t num;
+
+
+	/*
+	 * Get the hex number from buf into a properly z-termed number buffer
+	 */
+	if (count > 11) {
+		return 0;
+	}
+	memcpy(num_buf, buf, count);
+	num_buf[count] = '\0';
+	sscanf(num_buf, "%x", &num);
+	NSS_CONNMGR_DEBUG_TRACE("nss_connmgr_ipv4_need_mark = %x\n", num);
+
+	/*
+	 * Operate under our locks and stop further processing of packets
+	 */
+	spin_lock_bh(&nss_connmgr_ipv4.lock);
+	nss_connmgr_ipv4.need_mark = num;
+	spin_unlock_bh(&nss_connmgr_ipv4.lock);
+
+	return count;
+}
+
+/*
  * SysFS attributes for the default classifier itself.
  */
 static const struct device_attribute nss_connmgr_ipv4_terminate_attr =
 		__ATTR(terminate, S_IWUGO | S_IRUGO, nss_connmgr_ipv4_get_terminate, nss_connmgr_ipv4_set_terminate);
 static const struct device_attribute nss_connmgr_ipv4_stop_attr =
 		__ATTR(stop, S_IWUGO | S_IRUGO, nss_connmgr_ipv4_get_stop, nss_connmgr_ipv4_set_stop);
+static const struct device_attribute nss_connmgr_ipv4_need_mark_attr =
+		__ATTR(need_mark, S_IWUGO | S_IRUGO, nss_connmgr_ipv4_get_need_mark, nss_connmgr_ipv4_set_need_mark);
 
 /*
  * nss_connmgr_ipv4_thread_fn()
@@ -3066,6 +3172,12 @@
 		goto task_cleanup_6;
 	}
 
+	result = sysfs_create_file(nss_connmgr_ipv4.nom_v4, &nss_connmgr_ipv4_need_mark_attr.attr);
+	if (result) {
+		NSS_CONNMGR_DEBUG_ERROR("Failed to register need mark file %d\n", result);
+		goto task_cleanup_7;
+	}
+
 	/*
 	 * Register this module with the Linux NSS driver (net_device)
 	 */
@@ -3104,6 +3216,8 @@
 
 	nss_unregister_ipv4_mgr();
 
+	sysfs_remove_file(nss_connmgr_ipv4.nom_v4, &nss_connmgr_ipv4_need_mark_attr.attr);
+task_cleanup_7:
 	unregister_netdevice_notifier(&nss_connmgr_ipv4.netdev_notifier);
 task_cleanup_6:
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -3122,6 +3236,7 @@
 	module_put(THIS_MODULE);
 	return result;
 }
+
 /*
  * nss_connmgr_ipv4_read_conn_stats
  *      Read IPV4 stats
diff --git a/nss_connmgr_ipv6.c b/nss_connmgr_ipv6.c
index c907354..c4b9561 100755
--- a/nss_connmgr_ipv6.c
+++ b/nss_connmgr_ipv6.c
@@ -312,6 +312,7 @@
 	struct dentry *dent;	/* Debugfs directory */
 	uint32_t debug_stats[NSS_CONNMGR_IPV6_DEBUG_STATS_MAX];
 				/* Debug statistics */
+	uint32_t need_mark;	/* When 0 needing to see a mark value is disabled.  When != 0 we only process packets that have the given skb->mark value */
 };
 
 static unsigned int nss_connmgr_ipv6_post_routing_hook(unsigned int hooknum,
@@ -823,6 +824,15 @@
 	nss_tx_status_t nss_tx_status;
 
 	/*
+	 * If the 'need_mark' flag is set and this packet does not have the relevant mark
+	 * then we don't accelerate at all
+	 */
+	if (nss_connmgr_ipv6.need_mark && (nss_connmgr_ipv6.need_mark != skb->mark)) {
+		NSS_CONNMGR_DEBUG_TRACE("Mark %x not seen, ignoring: %p\n", nss_connmgr_ipv6.need_mark, skb);
+		return NF_ACCEPT;
+	}
+
+	/*
 	 * Only process IPV6 packets in bridge hook
 	 */
 	if(skb->protocol != htons(ETH_P_IPV6)){
@@ -977,6 +987,22 @@
 
 	unic.flags = 0;
 
+	/*
+	 * Store the skb->priority as the qos tag
+	 */
+	unic.qos_tag = (uint32_t)skb->priority;
+
+	/*
+	 * Only set the routed flag if the interface from which this packet came
+	 * was NOT a bridge interface OR if it is then it is not of the same bridge we are outputting onto.
+	 */
+	if (!is_bridge_port(in) || (out->master != in->master)) {
+		unic.flags |= NSS_IPV6_CREATE_FLAG_ROUTED;
+	}
+
+	/*
+	 * Set the PPPoE values to the defaults, just in case there is not any PPPoE connection.
+	 */
 	unic.return_pppoe_session_id = 0;
 	unic.flow_pppoe_session_id = 0;
 
@@ -1342,8 +1368,9 @@
 			"dest_dev: %s\n"
 			"src_iface_num: %u\n"
 			"dest_iface_num: %u\n"
-			"ingress_vlan_tag: %u"
-			"egress_vlan_tag: %u",
+			"ingress_vlan_tag: %u\n"
+			"egress_vlan_tag: %u\n"
+			"qos_tag: %u\n",
 			ct,
 			skb,
 			(ctinfo < IP_CT_IS_REPLY)? "Original" : "Reply",
@@ -1357,7 +1384,8 @@
 			unic.src_interface_num,
 			unic.dest_interface_num,
 			unic.ingress_vlan_tag,
-			unic.egress_vlan_tag);
+			unic.egress_vlan_tag,
+			unic.qos_tag);
 
 	/*
 	 * Create the Network Accelerator connection cache entries
@@ -1459,6 +1487,15 @@
 	struct net_device *src_slave = NULL;
 
 	/*
+	 * If the 'need_mark' flag is set and this packet does not have the relevant mark
+	 * then we don't accelerate at all
+	 */
+	if (nss_connmgr_ipv6.need_mark && (nss_connmgr_ipv6.need_mark != skb->mark)) {
+		NSS_CONNMGR_DEBUG_TRACE("Mark %x not seen, ignoring: %p\n", nss_connmgr_ipv6.need_mark, skb);
+		return NF_ACCEPT;
+	}
+
+	/*
 	 * Don't process broadcast or multicast
 	 */
 	if (skb->pkt_type == PACKET_BROADCAST) {
@@ -1635,6 +1672,16 @@
 	unic.flags = 0;
 
 	/*
+	 * Store the skb->priority as the qos tag
+	 */
+	unic.qos_tag = (uint32_t)skb->priority;
+
+	/*
+	 * Always a routed path
+	 */
+	unic.flags |= NSS_IPV6_CREATE_FLAG_ROUTED;
+
+	/*
 	 * Set the PPPoE values to the defaults, just in case there is not any PPPoE connection.
 	 */
 	unic.return_pppoe_session_id = 0;
@@ -1984,7 +2031,8 @@
 			"ingress_vlan_tag: %u\n"
 			"egress_vlan_tag: %u\n"
 			"flow_pppoe_session_id: %u\n"
-			"return_pppoe_session_id: %u\n",
+			"return_pppoe_session_id: %u\n"
+			"qos_tag: %u\n",
 			ct,
 			skb,
 			(ctinfo < IP_CT_IS_REPLY)? "Original" : "Reply",
@@ -2000,7 +2048,8 @@
 			unic.ingress_vlan_tag,
 			unic.egress_vlan_tag,
 			unic.flow_pppoe_session_id,
-			unic.return_pppoe_session_id);
+			unic.return_pppoe_session_id,
+			unic.qos_tag);
 
 	/*
 	 * Create the Network Accelerator connection cache entries
@@ -2707,12 +2756,69 @@
 }
 
 /*
+ * nss_connmgr_ipv6_get_need_mark()
+ * 	Get the value of "need_mark" operational control variable
+ */
+static ssize_t nss_connmgr_ipv6_get_need_mark(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	ssize_t count;
+	uint32_t num;
+
+	/*
+	 * Operate under our locks
+	 */
+	spin_lock_bh(&nss_connmgr_ipv6.lock);
+	num = nss_connmgr_ipv6.need_mark;
+	spin_unlock_bh(&nss_connmgr_ipv6.lock);
+
+	count = snprintf(buf, (ssize_t)PAGE_SIZE, "%x\n", num);
+	return count;
+}
+
+/*
+ * nss_connmgr_ipv6_set_need_mark()
+ * 	Set the value of "need_mark" operational control variable.
+ */
+static ssize_t nss_connmgr_ipv6_set_need_mark(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	char num_buf[12];
+	uint32_t num;
+
+
+	/*
+	 * Get the hex number from buf into a properly z-termed number buffer
+	 */
+	if (count > 11) {
+		return 0;
+	}
+	memcpy(num_buf, buf, count);
+	num_buf[count] = '\0';
+	sscanf(num_buf, "%x", &num);
+	NSS_CONNMGR_DEBUG_TRACE("nss_connmgr_ipv6_need_mark = %x\n", num);
+
+	/*
+	 * Operate under our locks and stop further processing of packets
+	 */
+	spin_lock_bh(&nss_connmgr_ipv6.lock);
+	nss_connmgr_ipv6.need_mark = num;
+	spin_unlock_bh(&nss_connmgr_ipv6.lock);
+
+	return count;
+}
+
+/*
  * SysFS attributes for the default classifier itself.
  */
 static const struct device_attribute nss_connmgr_ipv6_terminate_attr =
 		__ATTR(terminate, S_IWUGO | S_IRUGO, nss_connmgr_ipv6_get_terminate, nss_connmgr_ipv6_set_terminate);
 static const struct device_attribute nss_connmgr_ipv6_stop_attr =
 		__ATTR(stop, S_IWUGO | S_IRUGO, nss_connmgr_ipv6_get_stop, nss_connmgr_ipv6_set_stop);
+static const struct device_attribute nss_connmgr_ipv6_need_mark_attr =
+		__ATTR(need_mark, S_IWUGO | S_IRUGO, nss_connmgr_ipv6_get_need_mark, nss_connmgr_ipv6_set_need_mark);
 
 /*
  * nss_connmgr_ipv6_thread_fn()
@@ -2778,6 +2884,12 @@
 		goto task_cleanup_5;
 	}
 
+	result = sysfs_create_file(nss_connmgr_ipv6.nom_v6, &nss_connmgr_ipv6_need_mark_attr.attr);
+	if (result) {
+		NSS_CONNMGR_DEBUG_ERROR("Failed to register need mark file %d\n", result);
+		goto task_cleanup_6;
+	}
+
 	nss_connmgr_ipv4_register_bond_slave_linkup_cb(nss_connmgr_bond_link_up);
 
 	/*
@@ -2812,6 +2924,8 @@
 
 	nss_unregister_ipv6_mgr();
 
+	sysfs_remove_file(nss_connmgr_ipv6.nom_v6, &nss_connmgr_ipv6_need_mark_attr.attr);
+task_cleanup_6:
 	unregister_netdevice_notifier(&nss_connmgr_ipv6.netdev_notifier);
 task_cleanup_5:
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
diff --git a/nss_core.c b/nss_core.c
index 9ea5432..8550a83 100755
--- a/nss_core.c
+++ b/nss_core.c
@@ -20,6 +20,7 @@
  */
 
 #include "nss_core.h"
+#include <linux/module.h>
 #include <nss_hal.h>
 #include <net/dst.h>
 #include <linux/etherdevice.h>
@@ -164,7 +165,7 @@
 		buffer_type = desc->buffer_type;
 		opaque = desc->opaque;
 
-	if (unlikely((buffer_type == N2H_BUFFER_CRYPTO_RESP))) {
+		if (unlikely((buffer_type == N2H_BUFFER_CRYPTO_RESP))) {
 			NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_CRYPTO_RESP]);
 
 
@@ -223,6 +224,105 @@
 			 */
 
 			switch (buffer_type) {
+			case N2H_BUFFER_SHAPER_BOUNCED_INTERFACE:
+				{
+					/*
+					 * Bounced packet is returned from an interface bounce operation
+					 * Obtain the registrant to which to return the skb
+					 */
+					nss_shaper_bounced_callback_t bounced_callback;
+					void *app_data;
+					struct module *owner;
+					struct nss_shaper_bounce_registrant *reg = &nss_top->bounce_interface_registrants[interface_num];
+
+					spin_lock_bh(&nss_top->lock);
+
+					/*
+					 * Do we have a registrant?
+					 */
+					if (!reg->registered) {
+						spin_unlock_bh(&nss_top->lock);
+						break;
+					}
+
+					/*
+					 * Get handle to the owning registrant
+					 */
+					bounced_callback = reg->bounced_callback;
+					app_data = reg->app_data;
+					owner = reg->owner;
+					if (!try_module_get(owner)) {
+						spin_unlock_bh(&nss_top->lock);
+						break;
+					}
+
+					/*
+					 * Callback is active, unregistration is not permitted while this is in progress
+					 */
+					reg->callback_active = true;
+					spin_unlock_bh(&nss_top->lock);
+
+					/*
+					 * Pass bounced packet back to registrant
+					 */
+					bounced_callback(app_data, nbuf);
+					spin_lock_bh(&nss_top->lock);
+					reg->callback_active = false;
+					spin_unlock_bh(&nss_top->lock);
+					module_put(owner);
+				}
+				break;
+			case N2H_BUFFER_SHAPER_BOUNCED_BRIDGE:
+				/*
+				 * Bounced packet is returned from a bridge bounce operation
+				 */
+				{
+					/*
+					 * Bounced packet is returned from a bridge bounce operation
+					 * Obtain the registrant to which to return the skb
+					 */
+					nss_shaper_bounced_callback_t bounced_callback;
+					void *app_data;
+					struct module *owner;
+					struct nss_shaper_bounce_registrant *reg = &nss_top->bounce_bridge_registrants[interface_num];
+
+					spin_lock_bh(&nss_top->lock);
+
+					/*
+					 * Do we have a registrant?
+					 */
+					if (!reg->registered) {
+						spin_unlock_bh(&nss_top->lock);
+						break;
+					}
+
+					/*
+					 * Get handle to the owning registrant
+					 */
+					bounced_callback = reg->bounced_callback;
+					app_data = reg->app_data;
+					owner = reg->owner;
+					if (!try_module_get(owner)) {
+						spin_unlock_bh(&nss_top->lock);
+						break;
+					}
+
+					/*
+					 * Callback is active, unregistration is not permitted while this is in progress
+					 */
+					reg->callback_active = true;
+					spin_unlock_bh(&nss_top->lock);
+
+					/*
+					 * Pass bounced packet back to registrant
+					 */
+					bounced_callback(app_data, nbuf);
+					spin_lock_bh(&nss_top->lock);
+					reg->callback_active = false;
+					spin_unlock_bh(&nss_top->lock);
+					module_put(owner);
+				}
+				break;
 			case N2H_BUFFER_PACKET_VIRTUAL:
 				NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_top->stats_drv[NSS_STATS_DRV_RX_VIRTUAL]);
 
@@ -249,11 +349,14 @@
 					break;
 				}
 
-				dev_hold(ndev);
+				dev_hold(ndev);		// GGG FIXME THIS IS BROKEN AS NDEV COULD BE DESTROYED BEFORE THE HOLD IS TAKEN!  NDEV SHOULD BE HELD WHEN THE VIRTUAL IS REGISTERED
+							// AND THE HOLD HERE TAKEN INSIDE OF SOME KIND OF MUTEX LOCK WITH VIRTUAL UNREGISTRATION
 				nbuf->dev = ndev;
 
 				/*
 				 * Send the packet to virtual interface
+				 * NOTE: Invoking this will BYPASS any assigned QDisc - this is OKAY
+				 * as TX packets out of the NSS will have been shaped inside the NSS.
 				 */
 				ndev->netdev_ops->ndo_start_xmit(nbuf, ndev);
 				dev_put(ndev);
@@ -817,6 +920,7 @@
 		desc->payload_offs = (uint16_t)(nbuf->data - nbuf->head);
 		desc->payload_len = nbuf->len;
 		desc->buffer_len = (uint16_t)(nbuf->end - nbuf->head);
+		desc->qos_tag = (uint32_t)nbuf->priority;
 
 		if (unlikely(!NSS_IS_IF_TYPE(VIRTUAL, if_num))) {
 			if (likely(nbuf->destructor == NULL)) {
@@ -880,6 +984,7 @@
 		desc->buffer = frag0phyaddr;
 		desc->mss = mss;
 		desc->bit_flags = bit_flags;
+		desc->qos_tag = (uint32_t)nbuf->priority;
 
 		/*
 		 * Now handle rest of the fragments.
@@ -907,6 +1012,7 @@
 			desc->payload_offs = 0;
 			desc->payload_len = skb_frag_size(frag);
 			desc->buffer_len = skb_frag_size(frag);
+			desc->qos_tag = (uint32_t)nbuf->priority;
 			desc->mss = mss;
 			desc->bit_flags = bit_flags;
 		}
diff --git a/nss_core.h b/nss_core.h
index 1ac0c68..2927884 100755
--- a/nss_core.h
+++ b/nss_core.h
@@ -426,6 +426,18 @@
 #define NSS_H2N_DESC_RING_FLAGS_TX_STOPPED 0x1	/* Tx has been stopped for this queue */
 
 /*
+ * struct nss_shaper_bounce_registrant
+ *	Registrant detail for shaper bounce operations
+ */
+struct nss_shaper_bounce_registrant {
+	nss_shaper_bounced_callback_t bounced_callback;		/* Invoked for each shaper bounced packet returned from the NSS */
+	void *app_data;						/* Argument given to the callback */
+	struct module *owner;					/* Owning module of the callback + arg */
+	bool registered;
+	volatile bool callback_active;				/* true when the bounce callback is being called */
+};
+
+/*
  * NSS context instance (one per NSS core)
  */
 struct nss_ctx_instance {
@@ -481,6 +493,7 @@
 	/*
 	 * Network processing handler core ids (CORE0/CORE1)
 	 */
+	uint8_t shaping_handler_id;
 	uint8_t ipv4_handler_id;
 	uint8_t ipv6_handler_id;
 	uint8_t crypto_handler_id;
@@ -490,6 +503,7 @@
 	uint8_t tunipip6_handler_id;
 	uint8_t phys_if_handler_id[4];
 	uint8_t frequency_handler_id;
+
 	nss_ipv4_callback_t ipv4_callback;
 					/* IPv4 sync/establish callback function */
 	nss_ipv6_callback_t ipv6_callback;
@@ -510,6 +524,10 @@
 					/* 6rd tunnel interface event callback function */
 	nss_tunipip6_if_event_callback_t tunipip6_if_event_callback;
 					/* ipip6 tunnel interface event callback function */
+	struct nss_shaper_bounce_registrant bounce_interface_registrants[NSS_MAX_NET_INTERFACES];
+					/* Registrants for interface shaper bounce operations */
+	struct nss_shaper_bounce_registrant bounce_bridge_registrants[NSS_MAX_NET_INTERFACES];
+					/* Registrants for bridge shaper bounce operations */
 	void *crypto_ctx;		/* Crypto interface context */
 	void *if_ctx[NSS_MAX_NET_INTERFACES];
 					/* Phys/Virt interface context */
diff --git a/nss_hlos_if.h b/nss_hlos_if.h
index a8fcd6a..de3782f 100755
--- a/nss_hlos_if.h
+++ b/nss_hlos_if.h
@@ -38,6 +38,8 @@
 					/* Do not perform sequence number checks */
 #define NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW 0x02
 					/* This is a pure bridge forwarding flow */
+#define NSS_IPV4_RULE_CREATE_FLAG_ROUTED 0x04
+					/* Rule is for a routed connection. */
 
 /*
  * The NSS IPv4 rule creation structure.
@@ -73,7 +75,7 @@
 	uint16_t return_pppoe_remote_mac[3];	/* PPPoE Server MAC address */
 	uint16_t egress_vlan_tag;		/* Egress VLAN tag expected for this flow */
 	uint8_t flags;				/* Bit flags associated with the rule */
-	enum nss_lro_modes lro_mode;	/* LRO mode for this connection */
+	uint32_t qos_tag;			/* QoS tag value */
 };
 
 /*
@@ -94,6 +96,8 @@
 					/* Do not perform sequence number checks */
 #define NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW 0x02
 					/* This is a pure bridge forwarding flow */
+#define NSS_IPV6_RULE_CREATE_FLAG_ROUTED 0x04
+					/* Rule is for a routed connection. */
 
 /*
  * The NSS IPv6 rule creation structure.
@@ -125,6 +129,7 @@
 	uint16_t return_pppoe_remote_mac[3];	/* PPPoE Server MAC address */
 	uint16_t egress_vlan_tag;		/* Egress VLAN tag expected for this flow */
 	uint8_t flags;				/* Bit flags associated with the rule */
+	uint32_t qos_tag;			/* QoS tag value */
 };
 
 /*
@@ -288,6 +293,258 @@
 };
 
 /*
+ * struct nss_tx_shaper_config_assign_shaper
+ */
+struct nss_tx_shaper_config_assign_shaper {
+	uint32_t shaper_num;		/* Number of the shaper to assign an existing one, or 0 if any new one will do.*/
+};
+
+/*
+ * struct nss_tx_shaper_config_unassign_shaper
+ */
+struct nss_tx_shaper_config_unassign_shaper {
+	uint32_t shaper_num;		/* Number of the shaper to unassign. */
+};
+
+/*
+ * enum nss_tx_shaper_node_types
+ *	Types of shaper node we export to the HLOS 
+ */
+enum nss_tx_shaper_node_types {
+	NSS_TX_SHAPER_NODE_TYPE_CODEL = 1,		/* Matched SHAPER_NODE_TYPE_CODEL */
+	NSS_TX_SHAPER_NODE_TYPE_PRIO = 3,		/* Matches SHAPER_NODE_TYPE_PRIO */
+	NSS_TX_SHAPER_NODE_TYPE_FIFO = 4,		/* Matches SHAPER_NODE_TYPE_FIFO */
+	NSS_TX_SHAPER_NODE_TYPE_TBL = 5,		/* Matched SHAPER_NODE_TYPE_FIFO */
+};
+typedef enum nss_tx_shaper_node_types nss_tx_shaper_node_type_t;
+
+/*
+ * struct nss_tx_shaper_config_alloc_shaper_node
+ */
+struct nss_tx_shaper_config_alloc_shaper_node {
+	nss_tx_shaper_node_type_t node_type;
+					/* Type of shaper node */
+	uint32_t qos_tag;		/* The qos tag to give the new node */
+};
+
+/*
+ * struct nss_tx_shaper_config_free_shaper_node
+ */
+struct nss_tx_shaper_config_free_shaper_node {
+	uint32_t qos_tag;		/* The qos tag of the node to free */
+};
+
+/*
+ * struct nss_tx_shaper_config_set_root_node
+ */
+struct nss_tx_shaper_config_set_root_node {
+	uint32_t qos_tag;		/* The qos tag of the node that becomes root */
+};
+
+/*
+ * struct nss_tx_shaper_config_set_default_node
+ */
+struct nss_tx_shaper_config_set_default_node {
+	uint32_t qos_tag;		/* The qos tag of the node that becomes default */
+};
+
+/*
+ * struct nss_tx_shaper_shaper_node_basic_stats_get
+ *	Obtain basic stats for a shaper node
+ */
+struct nss_tx_shaper_shaper_node_basic_stats_get {
+	uint32_t qos_tag;		/* The qos tag of the node from which to obtain basic stats */
+};
+
+/*
+ * struct nss_tx_shaper_config_prio_attach
+ */
+struct nss_tx_shaper_config_prio_attach {
+	uint32_t child_qos_tag;		/* Qos tag of shaper node to add as child */
+	uint32_t priority;		/* Priority of the child */
+};
+
+/*
+ * struct nss_tx_shaper_config_prio_detach
+ */
+struct nss_tx_shaper_config_prio_detach {
+	uint32_t priority;		/* Priority of the child to detach */
+};
+
+/*
+ * struct nss_tx_shaper_config_codel_alg_param
+ */
+struct nss_tx_shaper_config_codel_alg_param {
+	uint16_t interval;		/* Buffer time to smoothen state transition */
+	uint16_t target;		/* Acceptable delay associated with a queue */
+	uint16_t mtu;			/* MTU for the associated interface */
+};
+
+/*
+ * struct nss_tx_shaper_configure_codel_param
+ */
+struct nss_tx_shaper_config_codel_param {
+	int32_t qlen_max;					/* Max no. of packets that can be enqueued */
+	struct nss_tx_shaper_config_codel_alg_param cap;	/* Config structure for codel algorithm */
+};
+
+/*
+ * struct nss_tx_shaper_config_limiter_alg_param
+ */
+struct nss_tx_shaper_config_limiter_alg_param {
+	uint32_t rate;		/* Allowed Traffic rate measured in bytes per second */
+	uint32_t burst;		/* Max bytes that can be sent before the next token update */
+	uint32_t max_size;	/* The maximum size of packets (in bytes) supported */
+	bool short_circuit;	/* When set, limiter will stop limiting the sending rate */
+};
+
+/*
+ * struct nss_tx_shaper_configure_tbl_attach
+ */
+struct nss_tx_shaper_config_tbl_attach {
+	uint32_t child_qos_tag;						/* Qos tag of shaper node to add as child */
+};
+
+/*
+ * struct nss_tx_shaper_configure_tbl_param
+ */
+struct nss_tx_shaper_config_tbl_param {
+	uint32_t qlen_bytes;						/* Max size of queue in bytes */
+	struct nss_tx_shaper_config_limiter_alg_param lap_cir;	/* Config committed information rate */
+	struct nss_tx_shaper_config_limiter_alg_param lap_pir;	/* Config committed information rate */
+};
+
+/*
+ * struct nss_tx_shaper_config_bf_attach
+ */
+struct nss_tx_shaper_config_bf_attach {
+	uint32_t child_qos_tag;		/* Qos tag of the shaper node to add as child */
+};
+
+/*
+ * struct nss_tx_shaper_config_bf_detach
+ */
+struct nss_tx_shaper_config_bf_detach {
+	uint32_t child_qos_tag;		/* Qos tag of the shaper node to add as child */
+};
+
+/*
+ * struct nss_tx_shaper_config_bf_group_attach
+ */
+struct nss_tx_shaper_config_bf_group_attach {
+	uint32_t child_qos_tag;		/* Qos tag of shaper node to add as child */
+};
+
+/*
+ * struct nss_tx_shaper_config_bf_group_param
+ */
+struct nss_tx_shaper_config_bf_group_param {
+	uint32_t qlen_bytes;					/* Maximum size of queue in bytes */
+	uint32_t quantum;					/* Smallest increment value for the DRRs */
+	struct nss_tx_shaper_config_limiter_alg_param lap;	/* Config structure for codel algorithm */
+};
+
+/*
+ * enum nss_shaper_config_fifo_drop_modes
+ *	Different drop modes for fifo
+ */
+enum nss_tx_shaper_config_fifo_drop_modes {
+	NSS_TX_SHAPER_FIFO_DROP_MODE_HEAD = 0,
+	NSS_TX_SHAPER_FIFO_DROP_MODE_TAIL,
+	NSS_TX_SHAPER_FIFO_DROP_MODES,			/* Must be last */
+};
+typedef enum nss_tx_shaper_config_fifo_drop_modes nss_tx_shaper_config_fifo_drop_mode_t;
+
+/*
+ * struct pnode_h2c_shaper_config_fifo_param
+ */
+struct nss_tx_shaper_config_fifo_param {
+	uint32_t limit;						/* Queue limit in packets */
+	nss_tx_shaper_config_fifo_drop_mode_t drop_mode;	/* FIFO drop mode when queue is full */
+};
+
+/*
+ * struct nss_tx_shaper_node_config
+ *	Configurartion messages for shaper nodes, which one depends on the type of configuration message
+ *
+ * This structure contains all of the different node configuration messages that can be sent, though not to all shaper node types.
+ */
+struct nss_tx_shaper_node_config {
+	uint32_t qos_tag;		/* Identifier of the shaper node to which the config is targetted */
+
+	union {
+		struct nss_tx_shaper_config_prio_attach prio_attach;
+		struct nss_tx_shaper_config_prio_detach prio_detach;
+		struct nss_tx_shaper_config_codel_param codel_param;
+		struct nss_tx_shaper_config_tbl_attach tbl_attach;
+		struct nss_tx_shaper_config_tbl_param tbl_param;
+		struct nss_tx_shaper_config_bf_attach bf_attach;
+		struct nss_tx_shaper_config_bf_detach bf_detach;
+		struct nss_tx_shaper_config_bf_group_attach bf_group_attach;
+		struct nss_tx_shaper_config_bf_group_param bf_group_param;
+		struct nss_tx_shaper_config_fifo_param fifo_param;
+	} snc;
+};
+
+/*
+ * enum nss_tx_shaper_config_types
+ *	Types of shaper configuration messages
+ */
+enum nss_tx_shaper_config_types {
+	NSS_TX_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER,	/* Assign a shaper to an interface (B or I) */
+	NSS_TX_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE,	/* Allocate a type of shaper node and give it a qos tag */
+	NSS_TX_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE,	/* Free a shaper node */
+	NSS_TX_SHAPER_CONFIG_TYPE_PRIO_ATTACH,		/* Configure prio to attach a node with a given priority */
+	NSS_TX_SHAPER_CONFIG_TYPE_PRIO_DETACH,		/* Configure prio to detach a node at a given priority */
+	NSS_TX_SHAPER_CONFIG_TYPE_SET_DEFAULT,		/* Configure shaper to have a default node */
+	NSS_TX_SHAPER_CONFIG_TYPE_SET_ROOT,		/* Configure shaper to have a root node */
+	NSS_TX_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER,	/* Unassign a shaper from an interface (B or I) */
+	NSS_TX_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM,	/* Configure codel parameters */
+	NSS_TX_SHAPER_CONFIG_TYPE_TBL_ATTACH,		/* Configure tbl to attach a node as child */
+	NSS_TX_SHAPER_CONFIG_TYPE_TBL_DETACH,		/* Configure tbl to detach its child */
+	NSS_TX_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM,	/* Configure tbl to tune its parameters */
+	NSS_TX_SHAPER_CONFIG_TYPE_BF_ATTACH,		/* Configure bf to attach a node to its round robin list */
+	NSS_TX_SHAPER_CONFIG_TYPE_BF_DETACH,		/* Configure bf to detach a node with a particular QoS tag */
+	NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH,	/* Configure bf group to attach a node as child */
+	NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH,	/* Configure bf group to detach its child */
+	NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM,
+							/* Configure bf group to tune its parameters */
+	NSS_TX_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM,	/* Configure fifo */
+	NSS_TX_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET,
+							/* Get shaper node basic stats */
+};
+typedef enum nss_tx_shaper_config_types nss_tx_shaper_config_type_t;
+
+/*
+ * struct nss_tx_shaper_configure
+ *	Shaper configuration messages
+ */
+struct nss_tx_shaper_configure {
+	uint32_t opaque1;		/* DO NOT TOUCH, HLOS USE ONLY */
+	uint32_t reserved1;		/* DO NOT TOUCH */
+	uint32_t opaque2;		/* DO NOT TOUCH, HLOS USE ONLY */
+	uint32_t reserved2;		/* DO NOT TOUCH */
+	uint32_t opaque3;		/* DO NOT TOUCH, HLOS USE ONLY */
+	uint32_t reserved3;		/* DO NOT TOUCH */
+	uint32_t reserved4;		/* DO NOT TOUCH */
+	uint32_t reserved5;		/* DO NOT TOUCH */
+	uint32_t interface_num;		/* Interface (pnode) number for which the shaper config message is targetted */
+	bool i_shaper;			/* true when I shaper, false when B shaper */
+	nss_tx_shaper_config_type_t type;
+					/* Type of configuration message mt */
+	union {
+		struct nss_tx_shaper_config_assign_shaper assign_shaper;
+		struct nss_tx_shaper_config_assign_shaper unassign_shaper;
+		struct nss_tx_shaper_config_alloc_shaper_node alloc_shaper_node;
+		struct nss_tx_shaper_config_free_shaper_node free_shaper_node;
+		struct nss_tx_shaper_config_set_default_node set_default_node;
+		struct nss_tx_shaper_config_set_root_node set_root_node;
+		struct nss_tx_shaper_node_config shaper_node_config;
+		struct nss_tx_shaper_shaper_node_basic_stats_get shaper_node_basic_stats_get;
+	} mt;
+};
+
+/*
  * Types of TX metadata.
  */
 enum nss_tx_metadata_types {
@@ -315,6 +572,7 @@
 	NSS_TX_METADATA_TYPE_GENERIC_IF_PARAMS,
 	NSS_TX_METADATA_TYPE_NSS_FREQ_CHANGE,
 	NSS_TX_METADATA_TYPE_INTERFACE_MTU_CHANGE,
+	NSS_TX_METADATA_TYPE_SHAPER_CONFIGURE,
 };
 
 /*
@@ -345,6 +603,7 @@
 		struct nss_generic_if_params generic_if_params;
 		struct nss_freq_change freq_change;
 		struct nss_if_mtu_change if_mtu_change;
+		struct nss_tx_shaper_configure shaper_configure;
 	} sub;
 };
 
@@ -387,6 +646,8 @@
 	uint16_t return_pppoe_session_id;	/* Return direction's PPPoE session ID. */
 	uint16_t return_pppoe_remote_mac[3];	/* Return direction's PPPoE Server MAC address */
 	uint16_t egress_vlan_tag;		/* Egress VLAN tag */
+	uint8_t flags;				/* Bit flags associated with the rule */
+	uint32_t qos_tag;			/* Qos Tag */
 };
 
 /*
@@ -433,6 +694,9 @@
 					/* Return interface's PPPoE remote server MAC address if there is any */
 	uint32_t inc_ticks;		/* Number of ticks since the last sync */
 	uint32_t reason;		/* Reason for the sync */
+
+	uint8_t flags;			/* Bit flags associated with the rule */
+	uint32_t qos_tag;		/* Qos Tag */
 };
 
 /*
@@ -458,6 +722,8 @@
 	uint16_t return_pppoe_session_id;	/* Return direction's PPPoE session ID. */
 	uint16_t return_pppoe_remote_mac[3];	/* Return direction's PPPoE Server MAC address */
 	uint16_t egress_vlan_tag;		/* Egress VLAN tag */
+	uint8_t flags;				/* Bit flags associated with the rule */
+	uint32_t qos_tag;			/* Qos Tag */
 };
 
 /*
@@ -504,6 +770,9 @@
 					/* Return interface's PPPoE remote server MAC address if there is any */
 	uint32_t inc_ticks;		/* Number of ticks since the last sync */
 	uint32_t reason;		/* Reason for the sync */
+
+	uint8_t flags;			/* Bit flags associated with the rule */
+	uint32_t qos_tag;		/* Qos Tag */
 };
 
 /*
@@ -878,6 +1147,119 @@
 };
 
 /*
+ * enum nss_rx_shaper_response_types
+ *	Types of shaper configuration response messages
+ */
+enum nss_rx_shaper_response_types {
+	/*
+	 * Failure messages are < 0
+	 */
+	NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPERS = -65536,		/* No shaper available for a shaper assign command to succeed */
+	NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPER,				/* No shaper to which to issue a shaper or node configuration message */
+	NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODE,			/* No shaper node to which to issue a configuration message */
+	NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODES,			/* No available shaper nodes available of the type requested */
+	NSS_RX_SHAPER_RESPONSE_TYPE_OLD,				/* Request is old / environment changed by the time the request was processed */
+	NSS_RX_SHAPER_RESPONSE_TYPE_UNRECOGNISED,			/* Request is not recognised by the recipient */
+	NSS_RX_SHAPER_RESPONSE_TYPE_FIFO_QUEUE_LIMIT_INVALID,		/* Fifo queue Limit is bad */
+	NSS_RX_SHAPER_RESPONSE_TYPE_FIFO_DROP_MODE_INVALID,		/* Fifo Drop mode is bad */
+	NSS_RX_SHAPER_RESPONSE_TYPE_BAD_DEFAULT_CHOICE,			/* Node selected has no queue to enqueue to */
+	NSS_RX_SHAPER_RESPONSE_TYPE_DUPLICATE_QOS_TAG,			/* Duplicate QoS tag as another node */
+        NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CIR_RATE_AND_BURST_REQUIRED,	/* CIR rate and burst are mandatory */
+	NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CIR_BURST_LESS_THAN_MTU,	/* CIR burst size is smaller than MTU */
+	NSS_RX_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_LESS_THAN_MTU,	/* PIR burst size is smaller than MTU */
+	NSS_RX_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_REQUIRED,		/* PIR burst size must be provided if peakrate
+									 * limiting is required.
+									 */
+	NSS_RX_SHAPER_RESPONSE_TYPE_CODEL_ALL_PARAMS_REQUIRED,		/* Codel requires non-zero value for target,
+									 * interval and limit.
+									 */
+	/*
+	 * Success messages are >= 0
+	 */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_ASSIGN_SUCCESS = 0,		/* Successfully assigned a shaper */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_NODE_ALLOC_SUCCESS,		/* Alloc shaper node request successful */
+	NSS_RX_SHAPER_RESPONSE_TYPE_PRIO_ATTACH_SUCCESS,		/* Prio attach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_PRIO_DETACH_SUCCESS,		/* Prio detach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_CODEL_CHANGE_PARAM_SUCCESS,		/* Codel parameter configuration success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_TBL_ATTACH_SUCCESS,			/* Tbl attach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_TBL_DETACH_SUCCESS,			/* Tbl detach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CHANGE_PARAM_SUCCESS,		/* Tbl parameter configuration success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_BF_ATTACH_SUCCESS,			/* Bigfoot attach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_BF_DETACH_SUCCESS,			/* Bigfoot detach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_BF_GROUP_ATTACH_SUCCESS,		/* Bigfoot group attach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_BF_GROUP_DETACH_SUCCESS,		/* Bigfoot group detach success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_BF_GROUP_CHANGE_PARAM_SUCCESS,	/* Bigfoot group parameter configuration success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_SET_ROOT_SUCCESS,		/* Setting of root successful */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_SET_DEFAULT_SUCCESS,		/* Setting of default successful */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_NODE_FREE_SUCCESS,		/* Free shaper node request successful */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_UNASSIGN_SUCCESS,		/* Successfully unassigned a shaper */
+	NSS_RX_SHAPER_RESPONSE_TYPE_FIFO_CHANGE_PARAM_SUCCESS,		/* Fifo limit set success */
+	NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_NODE_BASIC_STATS_GET_SUCCESS,
+									/* Success response for a shaper node basic stats get request */
+};
+typedef enum nss_rx_shaper_response_types nss_rx_shaper_response_type_t;
+
+/*
+ * struct nss_rx_shaper_response_shaper_assign_success
+ *	Shaper successfully assigned
+ */
+struct nss_rx_shaper_response_shaper_assign_success {
+	uint32_t shaper_num;		/* Number of the shaper assigned */
+};
+
+/*
+ * struct nss_rx_shaper_node_basic_statistics_delta
+ *	Stastics that are sent as deltas
+ */
+struct nss_rx_shaper_node_basic_statistics_delta {
+	uint32_t enqueued_bytes;			/* Bytes enqueued successfully */
+	uint32_t enqueued_packets;			/* Packets enqueued successfully */
+	uint32_t enqueued_bytes_dropped;		/* Bytes dropped during an enqueue operation due to node limits */
+	uint32_t enqueued_packets_dropped;		/* Packets dropped during an enqueue operation due to node limits */
+	uint32_t dequeued_bytes;			/* Bytes dequeued successfully from a shaper node */
+	uint32_t dequeued_packets;			/* Packets dequeued successfully from a shaper node */
+	uint32_t dequeued_bytes_dropped;		/* Bytes dropped by this node during dequeue (some nodes drop packets during dequeue rather than enqueue) */
+	uint32_t dequeued_packets_dropped;		/* Packets dropped by this node during dequeue (some nodes drop packets during dequeue rather than enqueue) */
+	uint32_t queue_overrun;				/* Number of times any queue limit has been overrun / perhaps leading to a drop of packet(s) */
+};
+
+/*
+ * struct nss_rx_shaper_response_shaper_node_basic_stats_get_success
+ *	Response to a request for basic stats of a shaper node
+ */
+struct nss_rx_shaper_response_shaper_node_basic_stats_get_success {
+	uint32_t qlen_bytes;				/* Total size of all packets in queue */
+	uint32_t qlen_packets;				/* Number of packets waiting in queue */
+	uint32_t packet_latency_peak_msec_dequeued;	/* Maximum milliseconds a packet was in this shaper node before being dequeued */
+	uint32_t packet_latency_minimum_msec_dequeued;	/* Minimum milliseconds a packet was in this shaper node before being dequeued */
+	uint32_t packet_latency_peak_msec_dropped;	/* Maximum milliseconds a packet was in this shaper node before being dropped */
+	uint32_t packet_latency_minimum_msec_dropped;	/* Minimum milliseconds a packet was in this shaper node before being dropped */
+	struct nss_rx_shaper_node_basic_statistics_delta delta;
+							/* Statistics that are sent as deltas */
+};
+
+/*
+ * union nss_rx_shaper_responses
+ *	Types of response message
+ */
+union nss_rx_shaper_responses {
+	struct nss_rx_shaper_response_shaper_assign_success shaper_assign_success;
+	struct nss_rx_shaper_response_shaper_node_basic_stats_get_success shaper_node_basic_stats_get_success;
+};
+
+/*
+ * struct nss_rx_shaper_response
+ *	Shaper configuration response messages
+ */
+struct nss_rx_shaper_response {
+	struct nss_tx_shaper_configure request;
+					/* Original request to which this response relates */
+	nss_rx_shaper_response_type_t type;
+					/* The response type (rt) being issued to the request */
+	union nss_rx_shaper_responses rt;
+};
+
+/*
  * Types of RX metadata.
  */
 enum nss_rx_metadata_types {
@@ -898,6 +1280,7 @@
 	NSS_RX_METADATA_TYPE_TUN6RD_STATS_SYNC,
 	NSS_RX_METADATA_TYPE_TUNIPIP6_STATS_SYNC,
 	NSS_RX_METADATA_TYPE_IPSEC_EVENTS_SYNC,
+	NSS_RX_METADATA_TYPE_SHAPER_RESPONSE,
 };
 
 /*
@@ -923,19 +1306,21 @@
 		struct nss_tun6rd_stats_sync tun6rd_stats_sync;
 		struct nss_tunipip6_stats_sync tunipip6_stats_sync;
 		struct nss_ipsec_events_sync ipsec_events_sync;
+		struct nss_rx_shaper_response shaper_response;
 	} sub;
 };
 
-
 /*
  * H2N Buffer Types
  */
-#define H2N_BUFFER_EMPTY	0
-#define H2N_BUFFER_PACKET	2
-#define H2N_BUFFER_CTRL		4
-#define H2N_BUFFER_CRYPTO_REQ	7
-#define H2N_BUFFER_NATIVE_WIFI	8
-#define H2N_BUFFER_MAX		16
+#define H2N_BUFFER_EMPTY			0
+#define H2N_BUFFER_PACKET			2
+#define H2N_BUFFER_CTRL				4
+#define H2N_BUFFER_CRYPTO_REQ			7
+#define H2N_BUFFER_SHAPER_BOUNCE_INTERFACE	8
+#define H2N_BUFFER_SHAPER_BOUNCE_BRIDGE		9
+#define H2N_BUFFER_NATIVE_WIFI	10
+#define H2N_BUFFER_MAX				16
 
 /*
  * H2N Bit Flag Definitions
@@ -944,12 +1329,15 @@
 #define H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM	0x0002
 #define H2N_BIT_FLAG_FIRST_SEGMENT		0x0004
 #define H2N_BIT_FLAG_LAST_SEGMENT		0x0008
+
 #define H2N_BIT_FLAG_DISCARD			0x0080
 #define H2N_BIT_FLAG_SEGMENTATION_ENABLE	0x0100
 #define H2N_BIT_FLAG_SEGMENT_TSO		0x0200
 #define H2N_BIT_FLAG_SEGMENT_UFO		0x0400
 #define H2N_BIT_FLAG_SEGMENT_TSO6		0x0800
+
 #define H2N_BIT_FLAG_VIRTUAL_BUFFER		0x2000
+
 #define H2N_BIT_BUFFER_REUSE			0x8000
 
 /*
@@ -977,26 +1365,23 @@
 				/* Reserved for future use */
 	uint16_t bit_flags;
 				/* Bit flags associated with the buffer */
-	uint8_t qos_class;
-				/* QoS class of the buffer (where appropriate) */
-	uint8_t qos_priority;
-				/* QoS priority of the buffer (where appropriate) */
-	uint16_t qos_flow_id;
-				/* QoS flow ID of the buffer (where appropriate) */
+	uint32_t qos_tag;
+				/* QoS tag information of the buffer (where appropriate) */
 	uint32_t reserved4;	/* Reserved for future use */
-
 };
 
 /*
  * N2H Buffer Types
  */
-#define N2H_BUFFER_EMPTY		1
-#define N2H_BUFFER_PACKET		3
-#define N2H_BUFFER_COMMAND_RESP		5
-#define N2H_BUFFER_STATUS		6
-#define N2H_BUFFER_CRYPTO_RESP		8
-#define N2H_BUFFER_PACKET_VIRTUAL	10
-#define N2H_BUFFER_MAX			16
+#define N2H_BUFFER_EMPTY			1
+#define N2H_BUFFER_PACKET			3
+#define N2H_BUFFER_COMMAND_RESP			5
+#define N2H_BUFFER_STATUS			6
+#define N2H_BUFFER_CRYPTO_RESP			8
+#define N2H_BUFFER_PACKET_VIRTUAL		10
+#define N2H_BUFFER_SHAPER_BOUNCED_INTERFACE	11
+#define N2H_BUFFER_SHAPER_BOUNCED_BRIDGE	12
+#define N2H_BUFFER_MAX				16
 
 /*
  * Command Response Types
diff --git a/nss_init.c b/nss_init.c
index 085a181..07e81fe 100755
--- a/nss_init.c
+++ b/nss_init.c
@@ -279,6 +279,11 @@
 	/*
 	 * Check functionalities are supported by this NSS core
 	 */
+	if (npd->shaping_enabled == NSS_FEATURE_ENABLED) {
+		nss_top->shaping_handler_id = nss_dev->id;
+		printk(KERN_INFO "%p: NSS Shaping is enabled, handler id: %u", __func__, nss_top->shaping_handler_id);
+	}
+
 	if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) {
 		nss_top->ipv4_handler_id = nss_dev->id;
 	}
@@ -806,6 +811,9 @@
 	 */
 	nss_pm_init();
 
+	// GGG EARLY DEBUG ENABLE HACK - TODO REMOVE
+	nss_hal_debug_enable();
+
 	/*
 	 * Register with Bus driver
 	 */
diff --git a/nss_qdisc.c b/nss_qdisc.c
new file mode 100755
index 0000000..d3e9b68
--- /dev/null
+++ b/nss_qdisc.c
@@ -0,0 +1,3006 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2014, Qualcomm Atheros, Inc.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * Note: This file will be moved into the nss-qdisc directory once the driver
+ * is re-organized.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/if_bridge.h>
+#include <linux/list.h>
+#include <nss_api_if.h>
+#include <linux/version.h>
+#include <br_private.h>
+
+/*
+ * NSS QDisc debug macros
+ */
+#if (NSSQDISC_DEBUG_LEVEL < 1)
+#define nssqdisc_assert(fmt, args...)
+#else
+#define nssqdisc_assert(c) if (!(c)) { BUG_ON(!(c)); }
+#endif
+
+#if (NSSQDISC_DEBUG_LEVEL < 2)
+#define nssqdisc_error(fmt, args...)
+#else
+#define nssqdisc_error(fmt, args...) printk(KERN_ERR "%d:ERROR:"fmt, __LINE__, ##args)
+#endif
+
+#if (NSSQDISC_DEBUG_LEVEL < 3)
+#define nssqdisc_warning(fmt, args...)
+#else
+#define nssqdisc_warning(fmt, args...) printk(KERN_WARNING "%d:WARN:"fmt, __LINE__, ##args)
+#endif
+
+#if (NSSQDISC_DEBUG_LEVEL < 4)
+#define nssqdisc_info(fmt, args...)
+#else
+#define nssqdisc_info(fmt, args...) printk(KERN_INFO "%d:INFO:"fmt, __LINE__, ##args)
+#endif
+
+#if (NSSQDISC_DEBUG_LEVEL < 5)
+#define nssqdisc_trace(fmt, args...)
+#else
+#define nssqdisc_trace(fmt, args...) printk(KERN_DEBUG "%d:TRACE:"fmt, __LINE__, ##args)
+#endif
+
+/*
+ * State values
+ */
+#define NSSQDISC_STATE_IDLE 0
+#define NSSQDISC_STATE_READY 1
+#define NSSQDISC_STATE_BUSY 2
+
+#define NSSQDISC_STATE_INIT_FAILED -1
+#define NSSQDISC_STATE_ASSIGN_SHAPER_SEND_FAIL -2
+#define NSSQDISC_STATE_SHAPER_ASSIGN_FAILED -3
+#define NSSQDISC_STATE_NODE_ALLOC_SEND_FAIL -4
+#define NSSQDISC_STATE_NODE_ALLOC_FAIL -5
+#define NSSQDISC_STATE_ROOT_SET_SEND_FAIL -6
+#define NSSQDISC_STATE_ROOT_SET_FAIL -7
+#define NSSQDISC_STATE_DEFAULT_SET_SEND_FAIL -8
+#define NSSQDISC_STATE_DEFAULT_SET_FAIL -9
+#define NSSQDISC_STATE_CHILD_ALLOC_SEND_FAIL -10
+#define NSSQDISC_STATE_NODE_ALLOC_FAIL_CHILD -11
+#define NSSQDISC_STATE_FAILED_RESPONSE -12
+
+#define NSSQDISC_BRIDGE_PORT_MAX 100
+
+void *nssqdisc_ctx;				/* Shaping context for nssqdisc */
+
+struct nssqdisc_qdisc {
+	struct Qdisc *qdisc;			/* Handy pointer back to containing qdisc */
+	void *nss_shaping_ctx;			/* NSS context for general operations */
+	int32_t nss_interface_number;		/* NSS Interface number we are shaping on */
+	nss_shaper_node_type_t type;		/* Type of shaper node */
+	bool is_root;				/* True if root qdisc on a net device */
+	bool is_bridge;				/* True when qdisc is a bridge */
+	bool is_virtual;			/* True when this is a non-bridge qdisc BUT
+						 * the device is represented as a virtual in
+						 * the NSS e.g. perhaps operating on a wifi interface.
+						 */
+	bool destroy_virtual_interface;		/* Set if the interface is first registered in NSS by
+						 * us. This means it needs to be un-regisreted when the
+						 * module goes down.
+						 */
+	volatile atomic_t state;		/* < 0: Signal that qdisc has 'failed'. 0
+						 * indicates 'pending' setup.  > 0 is READY.
+						 * NOTE: volatile AND atomic - this is polled
+						 * AND is used for syncronisation.
+						 */
+	uint32_t shaper_id;			/* Used when is_root. Child qdiscs use this
+						 * information to know what shaper under
+						 * which to create shaper nodes
+						 */
+	uint32_t qos_tag;			/* QoS tag of this node */
+	volatile int32_t pending_final_state;	/* Used to let the callback cycle know what
+						 * state to set the qdisc in on successful
+						 * completion.
+						 */
+	void *virtual_interface_context;	/* Context provided by the NSS driver for
+						 * new interfaces that are registered.
+						 */
+	void *bounce_context;			/* Context for bounce registration. Bounce
+						 * enables packets to be sent to NSS for
+						 * shaping purposes, and is returned to
+						 * Linux for transmit.
+						 */
+	void (*stats_update_callback)(void *, struct nss_shaper_response *);
+						/* Stats update callback function for qdisc specific
+						 * stats update
+						 */
+	struct timer_list stats_get_timer;	/* Timer used to poll for stats */
+	atomic_t pending_stat_requests;		/* Number of pending stats responses */
+	struct nss_shaper_response_shaper_node_basic_stats_get_success basic_stats_latest;
+						/* Latest stats obtained */
+};
+
+/*
+ * nssqdisc bridge update structure
+ */
+struct nssqdisc_bridge_update {
+	int port_list[NSSQDISC_BRIDGE_PORT_MAX];
+	int port_list_count;
+	int unassign_count;
+};
+
+/*
+ * nssqdisc bridge task types
+ */
+enum nssqdisc_bshaper_tasks {
+	NSSQDISC_ASSIGN_BSHAPER,
+	NSSQDISC_UNASSIGN_BSHAPER,
+};
+
+/*
+ * nssqdisc_get_br_port()
+ * 	Returns the bridge port structure of the bridge to which the device is attached to.
+ */
+static inline struct net_bridge_port *nssqdisc_get_br_port(const struct net_device *dev)
+{
+        struct net_bridge_port *br_port;
+
+        if (!dev)
+                return NULL;
+
+        rcu_read_lock();
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+        br_port = br_port_get_rcu(dev);
+#else
+	br_port = rcu_dereference(dev->br_port);
+#endif
+        rcu_read_unlock();
+
+        return br_port;
+}
+
+/*
+ * nssqdisc_attach_bshaper_callback()
+ *	Call back funtion for bridge shaper attach to an interface.
+ */
+static void nssqdisc_attach_bshaper_callback(void *app_data, struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	if (response->type < 0) {
+		nssqdisc_info("%s: B-shaper attach FAILED - response: %d\n", __func__, response->type);
+		atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
+		return;
+	}
+
+	nssqdisc_info("%s: B-shaper attach SUCCESS - response %d\n", __func__, response->type);
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_attach_bridge()
+ *	Attaches a given bridge shaper to a given interface.
+ */
+static int nssqdisc_attach_bshaper(struct Qdisc *sch, uint32_t if_num)
+{
+	struct nss_shaper_configure shaper_assign;
+	struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
+	int32_t state, rc;
+
+	nssqdisc_info("%s: Attaching B-shaper %u to interface %u\n", __func__,
+			nq->shaper_id, if_num);
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: qdisc %p (type %d) is not ready: State - %d\n",
+				__func__, sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	shaper_assign.interface_num = if_num;
+	shaper_assign.i_shaper = false;
+	shaper_assign.cb = nssqdisc_attach_bshaper_callback;
+	shaper_assign.app_data = sch;
+	shaper_assign.owner = THIS_MODULE;
+	shaper_assign.type = NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER;
+	shaper_assign.mt.unassign_shaper.shaper_num = nq->shaper_id;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_assign);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_warning("%s: Failed to send bshaper (id: %u) attach for "
+				"interface(if_num: %u)\n", __func__, nq->shaper_id, if_num);
+		return -1;
+	}
+
+	while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+		yield();
+	}
+
+	if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
+		nssqdisc_error("%s: Failed to attach B-shaper %u to interface %u\n",
+				__func__, nq->shaper_id, if_num);
+		return -1;
+	}
+
+	nssqdisc_info("%s: Attach of B-shaper %u to interface %u is complete\n",
+			__func__, nq->shaper_id, if_num);
+	return 0;
+}
+
+/*
+ * nssqdisc_detach_bshaper_callback()
+ *	Call back function for bridge shaper detach
+ */
+static void nssqdisc_detach_bshaper_callback(void *app_data, struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	if (response->type < 0) {
+		nssqdisc_info("%s: B-shaper detach FAILED - response: %d\n",
+				__func__, response->type);
+		atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
+		return;
+	}
+
+	nssqdisc_info("%s: B-shaper detach SUCCESS\n", __func__);
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_detach_bridge()
+ *	Detaches a given bridge shaper from a given interface
+ */
+static int nssqdisc_detach_bshaper(struct Qdisc *sch, uint32_t if_num)
+{
+	struct nss_shaper_configure shaper_assign;
+	struct nssqdisc_qdisc *nq = (struct nssqdisc_qdisc *)qdisc_priv(sch);
+	int32_t state, rc;
+
+	nssqdisc_info("%s: Detaching B-shaper %u from interface %u\n",
+			__func__, nq->shaper_id, if_num);
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: qdisc %p (type %d) is not ready: %d\n",
+				__func__, sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	shaper_assign.interface_num = if_num;
+	shaper_assign.i_shaper = false;
+	shaper_assign.cb = nssqdisc_detach_bshaper_callback;
+	shaper_assign.app_data = sch;
+	shaper_assign.owner = THIS_MODULE;
+	shaper_assign.type = NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER;
+	shaper_assign.mt.unassign_shaper.shaper_num = nq->shaper_id;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_assign);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_warning("%s: Failed to send B-shaper (id: %u) detach "
+			"for interface(if_num: %u)\n", __func__, nq->shaper_id, if_num);
+		return -1;
+	}
+
+	nssqdisc_info("%s: Detach of B-shaper %u to interface %u is complete.",
+			__func__, nq->shaper_id, if_num);
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+	return 0;
+}
+
+/*
+ * nssqdisc_refresh_bshaper_assignment()
+ *	Performs assign on unassign of bshaper for interfaces on the bridge.
+ */
+static int nssqdisc_refresh_bshaper_assignment(struct Qdisc *br_qdisc,
+					enum nssqdisc_bshaper_tasks task)
+{
+	struct net_device *dev;
+	struct net_device *br_dev = qdisc_dev(br_qdisc);
+	struct nssqdisc_qdisc *nq;
+	struct nssqdisc_bridge_update br_update;
+	int i;
+
+	if ((br_qdisc->parent != TC_H_ROOT) && (br_qdisc->parent != TC_H_UNSPEC)) {
+		nssqdisc_error("%s: Qdisc not root qdisc for the bridge interface: "
+				"Handle - %x", __func__, br_qdisc->parent);
+		return -1;
+	}
+
+	nq = qdisc_priv(br_qdisc);
+
+	/*
+	 * Initialize the bridge update srtucture.
+	 */
+	br_update.port_list_count = 0;
+	br_update.unassign_count = 0;
+
+	read_lock(&dev_base_lock);
+	dev = first_net_device(&init_net);
+	while(dev) {
+		struct net_bridge_port *br_port = nssqdisc_get_br_port(dev);
+		int nss_if_num;
+
+		nssqdisc_info("%s: Scanning device %s", __func__, dev->name);
+		if (!br_port || !br_port->br) {
+			goto nextdev;
+		}
+
+		/*
+		 * Dont care if this device is not on the
+		 * bridge that is of concern.
+		 */
+		if (br_port->br->dev != br_dev) {
+			goto nextdev;
+		}
+
+		/*
+		 * If the interface is known to NSS then we will have to shape it.
+		 * Irrespective of whether it has an interface qdisc or not.
+		 */
+		nss_if_num = nss_get_interface_number(nq->nss_shaping_ctx, dev);
+		if (nss_if_num < 0) {
+			goto nextdev;
+		}
+
+		nssqdisc_info("%s: Will be linking %s to bridge %s\n", __func__,
+				dev->name, br_dev->name);
+		br_update.port_list[br_update.port_list_count++] = nss_if_num;
+nextdev:
+		dev = next_net_device(dev);
+	}
+	read_unlock(&dev_base_lock);
+
+	nssqdisc_info("%s: List count %d\n", __func__, br_update.port_list_count);
+
+	if (task == NSSQDISC_ASSIGN_BSHAPER) {
+		/*
+		 * Loop through the ports and assign them with B-shapers.
+		 */
+		for (i = 0; i < br_update.port_list_count; i++) {
+			if (nssqdisc_attach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
+				nssqdisc_info("%s: Interface %u added to bridge %s\n",
+					__func__, br_update.port_list[i], br_dev->name);
+				continue;
+			}
+			nssqdisc_error("%s: Unable to attach bshaper with shaper-id: %u, "
+				"to interface if_num: %d\n", __func__, nq->shaper_id,
+				br_update.port_list[i]);
+			br_update.unassign_count = i;
+			break;
+		}
+		nssqdisc_info("%s: Unassign count %d\n", __func__, br_update.unassign_count);
+		if (br_update.unassign_count == 0) {
+			return 0;
+		}
+
+		/*
+		 * In case of a failure, unassign the B-shapers that were assigned above
+		 */
+		for (i = 0; i < br_update.unassign_count; i++) {
+			if (nssqdisc_detach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
+				continue;
+			}
+			nssqdisc_error("%s: Unable to detach bshaper with shaper-id: %u, "
+				"from interface if_num: %d\n", __func__, nq->shaper_id,
+				br_update.port_list[i]);
+			BUG();
+		}
+
+		nssqdisc_info("%s: Failed to link interfaces to bridge\n", __func__);
+		return -1;
+	} else if (task == NSSQDISC_UNASSIGN_BSHAPER) {
+		/*
+		 * Loop through the ports and assign them with B-shapers.
+		 */
+		for (i = 0; i < br_update.port_list_count; i++) {
+			if (nssqdisc_detach_bshaper(br_qdisc, br_update.port_list[i]) >= 0) {
+				nssqdisc_info("%s: Interface %u removed from bridge %s\n",
+					__func__, br_update.port_list[i], br_dev->name);
+				continue;
+			}
+			nssqdisc_error("%s: Unable to detach bshaper with shaper-id: %u, "
+				"from interface if_num: %d\n", __func__, nq->shaper_id,
+				br_update.port_list[i]);
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * nssqdisc_root_cleanup_final()
+ *	Performs final cleanup of a root shaper node after all other
+ *	shaper node cleanup is complete.
+ */
+static void nssqdisc_root_cleanup_final(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Root qdisc %p (type %d) final cleanup\n", __func__,
+				nq->qdisc, nq->type);
+
+	/*
+	 * If we are a bridge then we have to unregister for bridge bouncing
+	 * AND destroy the virtual interface that provides bridge shaping.
+	 */
+	if (nq->is_bridge) {
+		/*
+		 * Unregister for bouncing to the NSS for bridge shaping
+	 	 */
+		nssqdisc_info("%s: Unregister for bridge bouncing: %p\n", __func__,
+				nq->bounce_context);
+		nss_unregister_shaper_bounce_bridge(nq->nss_interface_number);
+
+		/*
+		 * Unregister the virtual interface we use to act as shaper
+		 * for bridge shaping.
+	 	 */
+		nssqdisc_info("%s: Release root bridge virtual interface: %p\n",
+				__func__, nq->virtual_interface_context);
+		nss_destroy_virt_if(nq->virtual_interface_context);
+	}
+
+	/*
+	 * If we are a virual interface then we have to unregister for interface
+	 * bouncing.
+	 */
+	if (nq->is_virtual) {
+		/*
+		 * Unregister for interface bouncing of packets
+	 	 */
+		nssqdisc_info("%s: Unregister for interface bouncing: %p\n",
+				__func__, nq->bounce_context);
+		nss_unregister_shaper_bounce_interface(nq->nss_interface_number);
+	}
+
+	/*
+	 * Finally unregister for shaping
+	 */
+	nssqdisc_info("%s: Unregister for shaping\n", __func__);
+	nss_unregister_shaping(nq->nss_shaping_ctx);
+
+	/*
+	 * Now set our final state
+	 */
+	atomic_set(&nq->state, nq->pending_final_state);
+}
+
+/*
+ * nssqdisc_root_cleanup_shaper_unassign_callback()
+ *	Invoked on the response to a shaper unassign config command issued
+ */
+static void nssqdisc_root_cleanup_shaper_unassign_callback(void *app_data,
+					struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq __attribute__ ((unused)) = qdisc_priv(sch);
+	nssqdisc_info("%s: Root qdisc %p (type %d) shaper unsassign "
+		"response: %d\n", __func__, sch, nq->type, response->type);
+	nssqdisc_root_cleanup_final(sch);
+}
+
+/*
+ * nssqdisc_root_cleanup_shaper_unassign()
+ *	Issue command to unassign the shaper
+ */
+static void nssqdisc_root_cleanup_shaper_unassign(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct nss_shaper_configure shaper_unassign;
+	nss_tx_status_t rc;
+
+	nssqdisc_info("%s: Root qdisc %p (type %d): shaper unassign: %d\n",
+			__func__, sch, nq->type, nq->shaper_id);
+
+	shaper_unassign.interface_num = nq->nss_interface_number;
+	shaper_unassign.i_shaper = (nq->is_bridge)? false : true;
+	shaper_unassign.cb = nssqdisc_root_cleanup_shaper_unassign_callback;
+	shaper_unassign.app_data = sch;
+	shaper_unassign.owner = THIS_MODULE;
+	shaper_unassign.type = NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER;
+	shaper_unassign.mt.unassign_shaper.shaper_num = nq->shaper_id;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_unassign);
+	if (rc == NSS_TX_SUCCESS) {
+		return;
+	}
+
+	nssqdisc_error("%s: Root qdisc %p (type %d): unassign command send failed: "
+		"%d, shaper id: %d\n", __func__, sch, nq->type, rc, nq->shaper_id);
+
+	nssqdisc_root_cleanup_final(sch);
+}
+
+/*
+ * nssqdisc_root_cleanup_free_node_callback()
+ *	Invoked on the response to freeing a shaper node
+ */
+static void nssqdisc_root_cleanup_free_node_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq __attribute__ ((unused)) = qdisc_priv(sch);
+	nssqdisc_info("%s: Root qdisc %p (type %d) free response "
+		"type: %d\n", __func__, sch, nq->type, response->type);
+
+	nssqdisc_root_cleanup_shaper_unassign(sch);
+}
+
+/*
+ * nssqdisc_root_cleanup_free_node()
+ *	Free the shaper node, issue command to do so.
+ */
+static void nssqdisc_root_cleanup_free_node(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct nss_shaper_configure shaper_node_free;
+	nss_tx_status_t rc;
+
+	nssqdisc_info("%s: Root qdisc %p (type %d): freeing shaper node\n",
+			__func__, sch, nq->type);
+
+	shaper_node_free.interface_num = nq->nss_interface_number;
+	shaper_node_free.i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_free.cb = nssqdisc_root_cleanup_free_node_callback;
+	shaper_node_free.app_data = sch;
+	shaper_node_free.owner = THIS_MODULE;
+	shaper_node_free.type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
+	shaper_node_free.mt.free_shaper_node.qos_tag = nq->qos_tag;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_free);
+	if (rc == NSS_TX_SUCCESS) {
+		return;
+	}
+
+	nssqdisc_error("%s: Qdisc %p (type %d): free command send "
+		"failed: %d, qos tag: %x\n", __func__, sch, nq->type,
+		rc, nq->qos_tag);
+
+	/*
+	 * Move onto unassigning the shaper instead
+	 */
+	nssqdisc_root_cleanup_shaper_unassign(sch);
+}
+
+/*
+ * nssqdisc_root_init_root_assign_callback()
+ *	Invoked on the response to assigning shaper node as root
+ */
+static void nssqdisc_root_init_root_assign_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Root assign response for qdisc %p (type %d), "
+		"response type: %d\n", __func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		nq->pending_final_state = NSSQDISC_STATE_ROOT_SET_FAIL;
+		nssqdisc_root_cleanup_free_node(sch);
+		return;
+	}
+
+	/*
+	 * If we are not a root upon a bridge then we are ready
+	 */
+	if (!nq->is_bridge) {
+		nssqdisc_info("%s: Qdisc %p (type %d): set as root and "
+			"default, and is READY\n", __func__, sch, nq->type);
+		atomic_set(&nq->state, NSSQDISC_STATE_READY);
+		return;
+	}
+
+	/*
+	 * We need to scan the bridge for ports that must have shapers
+	 * assigned to them
+	 */
+	nssqdisc_info("%s: Qdisc %p (type %d): set as root is done. "
+		"Bridge update..\n", __func__, sch, nq->type);
+
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_root_init_alloc_node_callback()
+ *	Invoked on the response to creating a shaper node as root
+ */
+static void nssqdisc_root_init_alloc_node_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct nss_shaper_configure root_assign;
+	nss_tx_status_t rc;
+
+	nssqdisc_info("%s: Qdisc %p (type %d) root alloc node "
+		"response type: %d\n", __func__, sch, nq->type,
+		response->type);
+
+	if (response->type < 0) {
+		nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_FAIL;
+
+		/*
+		 * No shaper node created, cleanup from unsassigning the shaper
+		 */
+		nssqdisc_root_cleanup_shaper_unassign(sch);
+		return;
+	}
+
+	/*
+	 * Shaper node has been allocated. Next step is to assign
+	 * the shaper node as the root node of our shaper.
+	 */
+	root_assign.interface_num = nq->nss_interface_number;
+	root_assign.i_shaper = (nq->is_bridge)? false : true;
+	root_assign.cb = nssqdisc_root_init_root_assign_callback;
+	root_assign.app_data = sch;
+	root_assign.owner = THIS_MODULE;
+	root_assign.type = NSS_SHAPER_CONFIG_TYPE_SET_ROOT;
+	root_assign.mt.set_root_node.qos_tag = nq->qos_tag;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &root_assign);
+	if (rc == NSS_TX_SUCCESS) {
+		return;
+	}
+
+	nssqdisc_error("%s: Root assign send command failed: %d\n",
+			__func__, rc);
+
+	nq->pending_final_state = NSSQDISC_STATE_ROOT_SET_SEND_FAIL;
+	nssqdisc_root_cleanup_free_node(sch);
+}
+
+/*
+ * nssqdisc_root_init_shaper_assign_callback()
+ *	Invoked on the response to a shaper assign config command issued
+ */
+static void nssqdisc_root_init_shaper_assign_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct nss_shaper_configure shaper_node_create;
+	nss_tx_status_t rc;
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper assign response type: %d\n",
+					__func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		/*
+		 * Unable to assign a shaper, perform cleanup from final stage
+		 */
+		nq->pending_final_state = NSSQDISC_STATE_SHAPER_ASSIGN_FAILED;
+		nssqdisc_root_cleanup_final(sch);
+		return;
+	}
+
+	/*
+	 * Shaper has been allocated and assigned
+	 */
+	nq->shaper_id = response->rt.shaper_assign_success.shaper_num;
+	nssqdisc_info("%s: Qdisc %p (type %d), shaper assigned: %u\n",
+				__func__, sch, nq->type, nq->shaper_id);
+
+	/*
+	 * Next step is to allocate our actual shaper node
+	 * qos_tag will be the handle we have been given
+	 */
+	shaper_node_create.interface_num = nq->nss_interface_number;
+	shaper_node_create.i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_create.cb = nssqdisc_root_init_alloc_node_callback;
+	shaper_node_create.app_data = sch;
+	shaper_node_create.owner = THIS_MODULE;
+	shaper_node_create.type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
+	shaper_node_create.mt.alloc_shaper_node.node_type = nq->type;
+	shaper_node_create.mt.alloc_shaper_node.qos_tag = nq->qos_tag;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_create);
+	if (rc == NSS_TX_SUCCESS) {
+		return;
+	}
+
+	/*
+	 * Unable to send alloc node command, cleanup from unassigning the shaper
+	 */
+	nssqdisc_error("%s: Qdisc %p (type %d) create command failed: %d\n",
+			__func__, sch, nq->type, rc);
+
+	nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_SEND_FAIL;
+	nssqdisc_root_cleanup_shaper_unassign(sch);
+}
+
+
+/*
+ * nssqdisc_child_cleanup_final()
+ *	Perform final cleanup of a shaper node after all shaper node
+ *	cleanup is complete.
+ */
+static void nssqdisc_child_cleanup_final(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Final cleanup type %d: %p\n", __func__,
+			nq->type, sch);
+
+	/*
+	 * Finally unregister for shaping
+	 */
+	nssqdisc_info("%s: Unregister for shaping\n", __func__);
+	nss_unregister_shaping(nq->nss_shaping_ctx);
+
+	/*
+	 * Now set our final state
+	 */
+	atomic_set(&nq->state, nq->pending_final_state);
+}
+
+
+/*
+ * nssqdisc_child_cleanup_free_node_callback()
+ *	Invoked on the response to freeing a child shaper node
+ */
+static void nssqdisc_child_cleanup_free_node_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq __attribute__((unused)) = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d): child free response type: %d\n",
+			__func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		nssqdisc_error("%s: Qdisc %p (type %d): free shaper node failed\n",
+				__func__, sch, nq->type);
+	} else {
+		nssqdisc_info("%s: Qdisc %p (type %d): child shaper node "
+				"free complete\n", __func__, sch, nq->type);
+	}
+
+	/*
+	 * Perform final cleanup
+	 */
+	nssqdisc_child_cleanup_final(sch);
+}
+
+/*
+ * nssqdisc_child_cleanup_free_node()
+ *	Free the child shaper node, issue command to do so.
+ */
+static void nssqdisc_child_cleanup_free_node(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct nss_shaper_configure shaper_node_free;
+	nss_tx_status_t rc;
+
+	nssqdisc_info("%s: Qdisc %p (type %d): free shaper node command\n",
+			__func__, sch, nq->type);
+
+	shaper_node_free.interface_num = nq->nss_interface_number;
+	shaper_node_free.i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_free.cb = nssqdisc_child_cleanup_free_node_callback;
+	shaper_node_free.app_data = sch;
+	shaper_node_free.owner = THIS_MODULE;
+	shaper_node_free.type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
+	shaper_node_free.mt.free_shaper_node.qos_tag = nq->qos_tag;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_free);
+	if (rc == NSS_TX_SUCCESS) {
+		return;
+	}
+
+	nssqdisc_error("%s: Qdisc %p (type %d): child free node command send "
+			"failed: %d, qos tag: %x\n", __func__, sch, nq->type,
+			rc, nq->qos_tag);
+
+	/*
+	 * Perform final cleanup
+	 */
+	nssqdisc_child_cleanup_final(sch);
+}
+
+/*
+ * nssqdisc_child_init_alloc_node_callback()
+ *	Invoked on the response to creating a child shaper node
+ */
+static void nssqdisc_child_init_alloc_node_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d): child alloc node, response "
+		"type: %d\n", __func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		/*
+		 * Cleanup from final stage
+		 */
+		nq->pending_final_state = NSSQDISC_STATE_NODE_ALLOC_FAIL_CHILD;
+		nssqdisc_child_cleanup_final(sch);
+		return;
+	}
+
+	/*
+	 * Shaper node has been allocated
+	 */
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node successfully "
+			"created as a child node\n",__func__, sch, nq->type);
+
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_bounce_callback()
+ *	Enqueues packets bounced back from NSS firmware.
+ */
+static void nssqdisc_bounce_callback(void *app_data, struct sk_buff *skb)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+
+	/*
+	 * All we have to do is enqueue for transmit and schedule a dequeue
+	 */
+	__qdisc_enqueue_tail(skb, sch, &sch->q);
+	__netif_schedule(sch);
+}
+
+/*
+ * nssqdisc_peek()
+ *	Called to peek at the head of an nss qdisc
+ */
+static struct sk_buff *nssqdisc_peek(struct Qdisc *sch)
+{
+	return skb_peek(&sch->q);
+}
+
+/*
+ * nssqdisc_drop()
+ *	Called to drop the packet at the head of queue
+ */
+static unsigned int nssqdisc_drop(struct Qdisc *sch)
+{
+	return __qdisc_queue_drop_head(sch, &sch->q);
+}
+
+/*
+ * nssqdisc_reset()
+ *	Called when a qdisc is reset
+ */
+static void nssqdisc_reset(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq __attribute__ ((unused)) = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d) resetting\n",
+			__func__, sch, nq->type);
+
+	/*
+	 * Delete all packets pending in the output queue and reset stats
+	 */
+	qdisc_reset_queue(sch);
+}
+
+/*
+ * nssqdisc_enqueue()
+ *	Generic enqueue call for enqueuing packets into NSS for shaping
+ */
+static int nssqdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	nss_tx_status_t status;
+
+	/*
+	 * If we are not the root qdisc then we should not be getting packets!!
+	 */
+	if (!nq->is_root) {
+		nssqdisc_warning("%s: Qdisc %p (type %d): unexpected packet "
+			"for child qdisc - skb: %p\n", __func__, sch, nq->type, skb);
+		__qdisc_enqueue_tail(skb, sch, &sch->q);
+		__netif_schedule(sch);
+		return NET_XMIT_SUCCESS;
+	}
+
+	/*
+	 * Packet enueued in linux for transmit.
+	 *
+	 * What we do here depends upon whether we are a bridge or not. If not a
+	 * bridge then it depends on if we are a physical or virtual interface
+	 * The decision we are trying to reach is whether to bounce a packet to
+	 * the NSS to be shaped or not.
+	 *
+	 * is_bridge		is_virtual	Meaning
+	 * ---------------------------------------------------------------------------
+	 * false		false		Physical interface in NSS
+	 *
+	 * Action: Simply allow the packet to be dequeued. The packet will be
+	 * shaped by the interface shaper in the NSS by the usual transmit path.
+	 *
+	 *
+	 * false		true		Physical interface in Linux.
+	 * 					NSS still responsible for shaping
+	 *
+	 * Action: Bounce the packet to the NSS virtual interface that represents
+	 * this Linux physical interface for INTERFACE shaping. When the packet is
+	 * returned from being shaped we allow it to be dequeued for transmit.
+	 *
+	 * true			n/a		Logical Linux interface.
+	 *					Root qdisc created a virtual interface
+	 *					to represent it in the NSS for shaping
+	 *					purposes.
+	 *
+	 * Action: Bounce the packet to the NSS virtual interface (for BRIDGE shaping)
+	 * the bridge root qdisc created for it. When the packet is returned from being
+	 * shaped we allow it to be dequeued for transmit.
+	 */
+
+	if (!nq->is_bridge && !nq->is_virtual) {
+		/*
+		 * TX to an NSS physical - the shaping will occur as part of normal
+		 * transmit path.
+		 */
+		__qdisc_enqueue_tail(skb, sch, &sch->q);
+		__netif_schedule(sch);
+		return NET_XMIT_SUCCESS;
+	}
+
+	if (!nq->is_bridge && nq->is_virtual) {
+		/*
+		 * TX to a physical Linux (NSS virtual).  Bounce packet to NSS for
+		 * interface shaping.
+		 */
+		nss_tx_status_t status = nss_shaper_bounce_interface_packet(nq->bounce_context,
+								nq->nss_interface_number, skb);
+		if (status != NSS_TX_SUCCESS) {
+			/*
+			 * Just transmit anyway, don't want to loose the packet
+			 */
+			nssqdisc_warning("%s: Qdisc %p (type %d): failed to bounce for "
+				"interface: %d, skb: %p\n", __func__, sch, nq->type,
+				nq->nss_interface_number, skb);
+
+			__qdisc_enqueue_tail(skb, sch, &sch->q);
+			__netif_schedule(sch);
+		}
+		return NET_XMIT_SUCCESS;
+	}
+
+	/*
+	 * TX to a bridge, this is to be shaped by the b shaper on the virtual interface created
+	 * to represent the bridge interface.
+	 */
+	status = nss_shaper_bounce_bridge_packet(nq->bounce_context, nq->nss_interface_number, skb);
+	if (status != NSS_TX_SUCCESS) {
+		/*
+		 * Just transmit anyway, don't want to loose the packet
+		 */
+		nssqdisc_warning("%s: Qdisc %p (type %d): failed to bounce for bridge %d, skb: %p\n",
+					__func__, sch, nq->type, nq->nss_interface_number, skb);
+		__qdisc_enqueue_tail(skb, sch, &sch->q);
+		__netif_schedule(sch);
+	}
+	return NET_XMIT_SUCCESS;
+}
+
+/*
+ * nssqdisc_dequeue()
+ *	Generic dequeue call for dequeuing bounced packets.
+ */
+static struct sk_buff *nssqdisc_dequeue(struct Qdisc *sch)
+{
+	struct sk_buff *skb;
+
+	/*
+	 * We use __skb_dequeue() to ensure that
+	 * stats don't get updated twice.
+	 */
+	skb = __skb_dequeue(&sch->q);
+
+	return skb;
+}
+
+/*
+ * nssqdisc_set_default_callback()
+ *	The callback function for a shaper node set default
+ */
+static void nssqdisc_set_default_callback(void *app_data,
+			struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node set default, response type: %d\n",
+			__func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
+		return;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): attach complete\n", __func__, sch, nq->type);
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_node_set_default()
+ *	Configuration function that sets shaper node as default for packet enqueue
+ */
+static int nssqdisc_set_default(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct nss_shaper_configure shaper_node_default;
+	int32_t state, rc;
+
+	nssqdisc_info("%s: Setting qdisc %p (type %d) as default\n", __func__,
+			sch, nq->type);
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: Qdisc %p (type %d): not ready: %d\n", __func__,
+				sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	shaper_node_default.interface_num = nq->nss_interface_number;
+	shaper_node_default.i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_default.cb = nssqdisc_set_default_callback;
+	shaper_node_default.app_data = sch;
+	shaper_node_default.owner = THIS_MODULE;
+	shaper_node_default.type = NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT;
+	shaper_node_default.mt.set_default_node.qos_tag = nq->qos_tag;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_default);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_warning("%s: Failed to send set default message for "
+					"qdisc type %d\n", __func__, nq->type);
+		return -1;
+	}
+
+	/*
+	 * Wait until cleanup operation is complete at which point the state
+	 * shall become idle. NOTE: This relies on the NSS driver to be able
+	 * to operate asynchronously which means kernel preemption is required.
+	 */
+	while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+		yield();
+	}
+
+	if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
+		nssqdisc_error("%s: Qdisc %p (type %d): failed to default "
+			"State: %d\n", __func__, sch, nq->type, state);
+		return -1;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node default complete\n",
+			__func__, sch, nq->type);
+	return 0;
+}
+
+/*
+ * nssqdisc_node_attach_callback()
+ *	The callback function for a shaper node attach message
+ */
+static void nssqdisc_node_attach_callback(void *app_data,
+			struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d) shaper node attach response "
+			"type: %d\n", __func__, sch, nq->type, response->type);
+	if (response->type < 0) {
+		atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
+		return;
+	}
+
+	nssqdisc_info("%s: qdisc type %d: %p, attach complete\n", __func__,
+			nq->type, sch);
+
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_node_attach()
+ *	Configuration function that helps attach a child shaper node to a parent.
+ */
+static int nssqdisc_node_attach(struct Qdisc *sch,
+	struct nss_shaper_configure *shaper_node_attach, int32_t attach_type)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	int32_t state, rc;
+
+	nssqdisc_info("%s: Qdisc %p (type %d) attaching\n",
+			__func__, sch, nq->type);
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: Qdisc %p (type %d): not ready, state: %d\n",
+				__func__, sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	shaper_node_attach->interface_num = nq->nss_interface_number;
+	shaper_node_attach->i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_attach->cb = nssqdisc_node_attach_callback;
+	shaper_node_attach->app_data = sch;
+	shaper_node_attach->owner = THIS_MODULE;
+	shaper_node_attach->type = attach_type;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, shaper_node_attach);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_warning("%s: Failed to send configure message for "
+					"qdisc type %d\n", __func__, nq->type);
+		return -1;
+	}
+
+	/*
+	 * Wait until cleanup operation is complete at which point the state
+	 * shall become idle. NOTE: This relies on the NSS driver to be able
+	 * to operate asynchronously which means kernel preemption is required.
+	 */
+	while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+		yield();
+	}
+
+	if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
+		nssqdisc_error("%s: Qdisc %p (type %d) failed to attach child "
+			"node, State: %d\n", __func__, sch, nq->type, state);
+		return -1;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node attach complete\n",
+			__func__, sch, nq->type);
+	return 0;
+}
+
+/*
+ * nssqdisc_node_detach_callback()
+ *	The callback function for a shaper node detach message
+ */
+static void nssqdisc_node_detach_callback(void *app_data,
+			struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node detach response "
+			"type: %d\n", __func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
+		return;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): detach complete\n",
+			__func__, sch, nq->type);
+
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_detach()
+ *	Configuration function that helps detach a child shaper node to a parent.
+ */
+static int nssqdisc_node_detach(struct Qdisc *sch,
+	struct nss_shaper_configure *shaper_node_detach, int32_t detach_type)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	int32_t state, rc;
+
+	nssqdisc_info("%s: Qdisc %p (type %d) detaching\n",
+			__func__, sch, nq->type);
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: Qdisc %p (type %d): not ready, state: %d\n",
+				__func__, sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	shaper_node_detach->interface_num = nq->nss_interface_number;
+	shaper_node_detach->i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_detach->cb = nssqdisc_node_detach_callback;
+	shaper_node_detach->app_data = sch;
+	shaper_node_detach->owner = THIS_MODULE;
+	shaper_node_detach->type = detach_type;
+
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, shaper_node_detach);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_warning("%s: Qdisc %p (type %d): Failed to send configure "
+					"message.", __func__, sch, nq->type);
+		return -1;
+	}
+
+	/*
+	 * Wait until cleanup operation is complete at which point the state shall become idle.
+	 * NOTE: This relies on the NSS driver to be able to operate asynchronously which means
+	 * kernel preemption is required.
+	 */
+	while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+		yield();
+	}
+
+	if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
+		nssqdisc_error("%s: Qdisc %p (type %d): failed to attach child node, "
+				"State: %d\n", __func__, sch, nq->type, state);
+		return -1;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node detach complete\n",
+			__func__, sch, nq->type);
+	return 0;
+}
+
+/*
+ * nssqdisc_configure_callback()
+ *	The call back function for a shaper node configure message
+ */
+static void nssqdisc_configure_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *sch = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node configure "
+		"response type: %d\n", __func__, sch, nq->type, response->type);
+
+	if (response->type < 0) {
+		atomic_set(&nq->state, NSSQDISC_STATE_FAILED_RESPONSE);
+		return;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): configuration complete\n",
+			__func__, sch, nq->type);
+	atomic_set(&nq->state, NSSQDISC_STATE_READY);
+}
+
+/*
+ * nssqdisc_configure()
+ *	Configuration function that aids in tuning of queuing parameters.
+ */
+static int nssqdisc_configure(struct Qdisc *sch,
+	struct nss_shaper_configure *shaper_node_configure, int32_t config_type)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	int32_t state, rc;
+
+	nssqdisc_info("%s: Qdisc %p (type %d) configuring\n", __func__, sch, nq->type);
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: Qdisc %p (type %d): not ready for configure, "
+			"state : %d\n", __func__, sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	shaper_node_configure->interface_num = nq->nss_interface_number;
+	shaper_node_configure->i_shaper = (nq->is_bridge)? false : true;
+	shaper_node_configure->cb = nssqdisc_configure_callback;
+	shaper_node_configure->app_data = sch;
+	shaper_node_configure->owner = THIS_MODULE;
+	shaper_node_configure->type = config_type;
+
+	nssqdisc_info("Sending config type %d\n", config_type);
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, shaper_node_configure);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_warning("%s: Qdisc %p (type %d): Failed to send configure "
+			"message\n", __func__, sch, nq->type);
+		return -1;
+	}
+
+	/*
+	 * Wait until cleanup operation is complete at which point the state
+	 * shall become idle. NOTE: This relies on the NSS driver to be able
+	 * to operate asynchronously which means kernel preemption is required.
+	 */
+	while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+		yield();
+	}
+
+	if (state == NSSQDISC_STATE_FAILED_RESPONSE) {
+		nssqdisc_error("%s: Qdisc %p (type %d): failed to configure shaper "
+			"node: State: %d\n", __func__, sch, nq->type, state);
+		atomic_set(&nq->state, NSSQDISC_STATE_READY);
+		return -1;
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): shaper node configure complete\n",
+			__func__, sch, nq->type);
+	return 0;
+}
+
+/*
+ * nssqdisc_destroy()
+ *	Destroys a shaper in NSS, and the sequence is based on the position of
+ *	this qdisc (child or root) and the interface to which it is attached to.
+ */
+static void nssqdisc_destroy(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	int32_t state;
+
+	nssqdisc_info("%s: Qdisc %p (type %d) destroy\n",
+			__func__, sch, nq->type);
+
+
+	state = atomic_read(&nq->state);
+	if (state != NSSQDISC_STATE_READY) {
+		nssqdisc_error("%s: Qdisc %p (type %d): destroy not ready, "
+				"state: %d\n", __func__, sch, nq->type, state);
+		BUG();
+	}
+
+	/*
+	 * How we begin to tidy up depends on whether we are root or child
+	 */
+	nq->pending_final_state = NSSQDISC_STATE_IDLE;
+	if (nq->is_root) {
+
+		/*
+		 * If this is root on a bridge interface, then unassign
+		 * the bshaper from all the attached interfaces.
+		 */
+		if (nq->is_bridge) {
+			nssqdisc_info("%s: Qdisc %p (type %d): is root on bridge. Need to "
+				"unassign bshapers from its interfaces\n", __func__, sch, nq->type);
+			nssqdisc_refresh_bshaper_assignment(sch, NSSQDISC_UNASSIGN_BSHAPER);
+		}
+
+		/*
+		 * Begin by freeing the root shaper node
+		 */
+		nssqdisc_root_cleanup_free_node(sch);
+	} else {
+		/*
+		 * Begin by freeing the child shaper node
+		 */
+		nssqdisc_child_cleanup_free_node(sch);
+	}
+
+	/*
+	 * Wait until cleanup operation is complete at which point the state
+	 * shall become idle. NOTE: This relies on the NSS driver to be able
+	 * to operate asynchronously which means kernel preemption is required.
+	 */
+	while (NSSQDISC_STATE_IDLE != (state = atomic_read(&nq->state))) {
+		yield();
+	}
+
+	if (nq->destroy_virtual_interface) {
+		nss_destroy_virt_if((void *)nq->nss_interface_number);
+	}
+
+	nssqdisc_info("%s: Qdisc %p (type %d): destroy complete\n",
+			__func__, sch, nq->type);
+}
+
+
+/*
+ * nssqdisc_init()
+ *	Initializes a shaper in NSS, based on the position of this qdisc (child or root)
+ *	and if its a normal interface or a bridge interface.
+ */
+static int nssqdisc_init(struct Qdisc *sch, nss_shaper_node_type_t type)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+	struct Qdisc *root;
+	u32 parent;
+	nss_tx_status_t rc;
+	struct net_device *dev;
+	int32_t state;
+	struct nss_shaper_configure shaper_assign;
+
+	/*
+	 * Record our qdisc and type in the private region for handy use
+	 */
+	nq->qdisc = sch;
+	nq->type = type;
+
+	/*
+	 * We dont have to destroy a virtual interface unless
+	 * we are the ones who created it. So set it to false
+	 * as default.
+	 */
+	nq->destroy_virtual_interface = false;
+
+	/*
+	 * Set shaper node state to IDLE
+	 */
+	atomic_set(&nq->state, NSSQDISC_STATE_IDLE);
+
+	nq->qos_tag = (uint32_t)sch->handle >> 16;
+
+	/*
+	 * The root must be of an nss type (unless we are of course going to be root).
+	 * This is to prevent mixing NSS qdisc with other types of qdisc.
+	 */
+	parent = sch->parent;
+	root = qdisc_root(sch);
+	nssqdisc_info("%s: Qdisc %p (type %d) init root: %p, me: %p, my handle: %x, "
+		"parent: %x rootid: %s owner: %p\n", __func__, sch, nq->type, root,
+		sch, nq->qos_tag, parent, root->ops->id, root->ops->owner);
+
+	if ((parent != TC_H_ROOT) && (root->ops->owner != THIS_MODULE)) {
+		nssqdisc_error("%s: Qdisc %p (type %d) used outside of NSS shaping "
+			"framework. Parent: %x ops: %p Our Module: %p\n", __func__,
+			sch, nq->type, parent, root->ops, THIS_MODULE);
+
+		atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+		return -1;
+	}
+
+	/*
+	 * Register for NSS shaping
+	 */
+	nq->nss_shaping_ctx = nss_register_shaping();
+	if (!nq->nss_shaping_ctx) {
+		nssqdisc_error("%s: no shaping context returned for type %d\n",
+				__func__, nq->type);
+		atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+		return -1;
+	}
+
+	/*
+	 * Are we the root qdisc?
+	 */
+	if (parent == TC_H_ROOT) {
+		nssqdisc_info("%s: Qdisc %p (type %d) is root\n", __func__, sch, nq->type);
+		nq->is_root = true;
+	} else {
+		nssqdisc_info("%s: Qdisc %p (type %d) not root\n", __func__, sch, nq->type);
+		nq->is_root = false;
+	}
+
+	/*
+	 * Get the net device as it will tell us if we are on a bridge,
+	 * or on a net device that is represented by a virtual NSS interface (e.g. WIFI)
+	 */
+	dev = qdisc_dev(sch);
+	nssqdisc_info("%s: Qdisc %p (type %d) init dev: %p\n", __func__, sch, nq->type, dev);
+
+	/*
+	 * Determine if dev is a bridge or not as this determines if we
+	 * interract with an I or B shaper
+	 */
+	if (dev->priv_flags == IFF_EBRIDGE) {
+		nssqdisc_info("%s: Qdisc %p (type %d) init qdisc: %p, is bridge\n",
+			__func__, sch, nq->type, nq->qdisc);
+		nq->is_bridge = true;
+	} else {
+		nssqdisc_info("%s: Qdisc %p (type %d) init qdisc: %p, not bridge\n",
+			__func__, sch, nq->type, nq->qdisc);
+		nq->is_bridge = false;
+	}
+
+	/*
+	 * If we are not the root qdisc then we have a simple enough job to do
+	 */
+	if (!nq->is_root) {
+		struct nss_shaper_configure shaper_node_create;
+
+		nssqdisc_info("%s: Qdisc %p (type %d) initializing non-root qdisc\n",
+				__func__, sch, nq->type);
+
+		/*
+		 * The device we are operational on MUST be recognised as an NSS interface.
+		 * NOTE: We do NOT support non-NSS known interfaces in this implementation.
+		 * NOTE: This will still work where the dev is registered as virtual, in which case
+		 * nss_interface_number shall indicate a virtual NSS interface.
+		 */
+		nq->nss_interface_number = nss_get_interface_number(nq->nss_shaping_ctx, dev);
+		if (nq->nss_interface_number < 0) {
+			nssqdisc_error("%s: Qdisc %p (type %d) net device unknown to "
+				"nss driver %s\n", __func__, sch, nq->type, dev->name);
+			nss_unregister_shaping(nq->nss_shaping_ctx);
+			atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+			return -1;
+		}
+
+		/*
+		 * Create a shaper node for requested type.
+		 * Essentially all we need to do is create the shaper node.
+		 */
+		nssqdisc_info("%s: Qdisc %p (type %d) non-root (child) create\n",
+				__func__, sch, nq->type);
+
+		shaper_node_create.interface_num = nq->nss_interface_number;
+		shaper_node_create.i_shaper = (nq->is_bridge)? false : true;
+		shaper_node_create.cb = nssqdisc_child_init_alloc_node_callback;
+		shaper_node_create.app_data = sch;
+		shaper_node_create.owner = THIS_MODULE;
+		shaper_node_create.type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
+		shaper_node_create.mt.alloc_shaper_node.node_type = nq->type;
+		shaper_node_create.mt.alloc_shaper_node.qos_tag = nq->qos_tag;
+
+		rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_node_create);
+		if (rc != NSS_TX_SUCCESS) {
+			nssqdisc_error("%s: Qdisc %p (type %d) create command "
+				"failed: %d\n", __func__, sch, nq->type, rc);
+			nq->pending_final_state = NSSQDISC_STATE_CHILD_ALLOC_SEND_FAIL;
+			nssqdisc_child_cleanup_final(sch);
+			return -1;
+		}
+
+		/*
+		 * Wait until init operation is complete.
+		 * NOTE: This relies on the NSS driver to be able to operate
+		 * asynchronously which means kernel preemption is required.
+		 */
+		while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+			yield();
+		}
+		nssqdisc_info("%s: Qdisc %p (type %d): initialised with state: %d\n",
+					__func__, sch, nq->type, state);
+		if (state > 0) {
+			return 0;
+		}
+		return -1;
+	}
+
+	/*
+	 * Root qdisc has a lot of work to do. It is responsible for setting up
+	 * the shaper and creating the root and default shaper nodes. Also, when
+	 * operating on a bridge, a virtual NSS interface is created to represent
+	 * bridge shaping. Further, when operating on a bridge, we monitor for
+	 * bridge port changes and assign B shapers to the interfaces of the ports.
+	 */
+	nssqdisc_info("%s: init qdisc type %d : %p, ROOT\n", __func__, nq->type, sch);
+
+	/*
+	 * Detect if we are operating on a bridge or interface
+	 */
+	if (nq->is_bridge) {
+		nssqdisc_info("%s: Qdisc %p (type %d): initializing root qdisc on "
+			"bridge\n", __func__, sch, nq->type);
+
+		/*
+		 * As we are a root qdisc on this bridge then we have to create a
+		 * virtual interface to represent this bridge in the NSS. This will
+		 * allow us to bounce packets to the NSS for bridge shaping action.
+		 * Also set the destroy virtual interface flag so that it is destroyed
+		 * when the module goes down. If this is not done, the OS waits for
+		 * the interface to be released.
+		 */
+		nq->virtual_interface_context = nss_create_virt_if(dev);
+		nq->destroy_virtual_interface = true;
+		if (!nq->virtual_interface_context) {
+			nssqdisc_error("%s: Qdisc %p (type %d): cannot create virtual "
+				"interface\n", __func__, sch, nq->type);
+			nss_unregister_shaping(nq->nss_shaping_ctx);
+			atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+			return -1;
+		}
+		nssqdisc_info("%s: Qdisc %p (type %d): virtual interface registered "
+			"in NSS: %p\n", __func__, sch, nq->type, nq->virtual_interface_context);
+		nq->nss_interface_number = nss_virt_if_get_interface_num(nq->virtual_interface_context);
+		nssqdisc_info("%s: Qdisc %p (type %d) virtual interface number: %d\n",
+				__func__, sch, nq->type, nq->nss_interface_number);
+
+		/*
+		 * The root qdisc will get packets enqueued to it, so it must
+		 * register for bridge bouncing as it will be responsible for
+		 * bouncing packets to the NSS for bridge shaping.
+		 */
+		nq->bounce_context = nss_register_shaper_bounce_bridge(nq->nss_interface_number,
+							nssqdisc_bounce_callback, sch, THIS_MODULE);
+		if (!nq->bounce_context) {
+			nssqdisc_error("%s: Qdisc %p (type %d): root but cannot register "
+					"for bridge bouncing\n", __func__, sch, nq->type);
+			nss_destroy_virt_if(nq->virtual_interface_context);
+			nss_unregister_shaping(nq->nss_shaping_ctx);
+			atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+			return -1;
+		}
+
+	} else {
+		nssqdisc_info("%s: Qdisc %p (type %d): is interface\n", __func__, sch, nq->type);
+
+		/*
+		 * The device we are operational on MUST be recognised as an NSS interface.
+		 * NOTE: We do NOT support non-NSS known interfaces in this basic implementation.
+		 * NOTE: This will still work where the dev is registered as virtual, in which case
+		 * nss_interface_number shall indicate a virtual NSS interface.
+		 */
+		nq->nss_interface_number = nss_get_interface_number(nq->nss_shaping_ctx, dev);
+		if (nq->nss_interface_number < 0) {
+			nssqdisc_error("%s: Qdisc %p (type %d): interface unknown to nss driver %s\n",
+					__func__, sch, nq->type, dev->name);
+			nss_unregister_shaping(nq->nss_shaping_ctx);
+			atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+			return -1;
+		}
+
+		/*
+		 * Is the interface virtual or not?
+		 * NOTE: If this interface is virtual then we have to bounce packets to it for shaping
+		 */
+		nq->is_virtual = nss_interface_is_virtual(nq->nss_shaping_ctx, nq->nss_interface_number);
+		if (!nq->is_virtual) {
+			nssqdisc_info("%s: Qdisc %p (type %d): interface %u is physical\n",
+					__func__, sch, nq->type, nq->nss_interface_number);
+		} else {
+			nssqdisc_info("%s: Qdisc %p (type %d): interface %u is virtual\n",
+					__func__, sch, nq->type, nq->nss_interface_number);
+
+			/*
+			 * Register for interface bounce shaping.
+			 */
+			nq->bounce_context = nss_register_shaper_bounce_interface(nq->nss_interface_number,
+								nssqdisc_bounce_callback, sch, THIS_MODULE);
+			if (!nq->bounce_context) {
+				nssqdisc_error("%s: Qdisc %p (type %d): is root but failed "
+				"to register for interface bouncing\n", __func__, sch, nq->type);
+				nss_unregister_shaping(nq->nss_shaping_ctx);
+				atomic_set(&nq->state, NSSQDISC_STATE_INIT_FAILED);
+				return -1;
+			}
+		}
+	}
+
+	/*
+	 * We need to issue a command to establish a shaper on the interface.
+	 */
+	shaper_assign.interface_num = nq->nss_interface_number;
+	shaper_assign.i_shaper = (nq->is_bridge)? false : true;
+	shaper_assign.cb = nssqdisc_root_init_shaper_assign_callback;
+	shaper_assign.app_data = sch;
+	shaper_assign.owner = THIS_MODULE;
+	shaper_assign.type = NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER;
+	shaper_assign.mt.assign_shaper.shaper_num = 0;	/* Any free shaper will do */
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &shaper_assign);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_error("%s: shaper assign command failed: %d\n", __func__, rc);
+		nq->pending_final_state = NSSQDISC_STATE_ASSIGN_SHAPER_SEND_FAIL;
+		nssqdisc_root_cleanup_final(sch);
+		if (nq->destroy_virtual_interface) {
+			nss_destroy_virt_if(nq->virtual_interface_context);
+		}
+		return -1;
+	}
+
+	/*
+	 * Wait until init operation is complete.
+	 * NOTE: This relies on the NSS driver to be able to operate asynchronously which means
+	 * kernel preemption is required.
+	 */
+	nssqdisc_info("%s: Qdisc %p (type %d): Waiting on response from NSS for "
+			"shaper assign message\n", __func__, sch, nq->type);
+	while (NSSQDISC_STATE_IDLE == (state = atomic_read(&nq->state))) {
+		yield();
+	}
+	nssqdisc_info("%s: Qdisc %p (type %d): is initialised with state: %d\n",
+			__func__, sch, nq->type, state);
+
+	if (state > 0) {
+
+		/*
+		 * Return if this is not a root qdisc on a bridge interface.
+		 */
+		if (!nq->is_root || !nq->is_bridge) {
+			return 0;
+		}
+
+		nssqdisc_info("%s: This is a bridge interface. Linking bridge ...\n",
+				__func__);
+		/*
+		 * This is a root qdisc added to a bridge interface. Now we go ahead
+		 * and add this B-shaper to interfaces known to the NSS
+		 */
+		if (nssqdisc_refresh_bshaper_assignment(sch, NSSQDISC_ASSIGN_BSHAPER) < 0) {
+			nssqdisc_destroy(sch);
+			nssqdisc_error("%s: Bridge linking failed\n", __func__);
+			return -1;
+		}
+		nssqdisc_info("%s: Bridge linking complete\n", __func__);
+		return 0;
+	}
+
+	/*
+	 * Destroy any virtual interfaces created by us before returning a failure.
+	 */
+	if (nq->destroy_virtual_interface) {
+		nss_destroy_virt_if(nq->virtual_interface_context);
+	}
+
+	return -1;
+}
+
+/*
+ * nssqdisc_basic_stats_callback()
+ *	Invoked after getting basic stats
+ */
+static void nssqdisc_basic_stats_callback(void *app_data,
+				struct nss_shaper_response *response)
+{
+	struct Qdisc *qdisc = (struct Qdisc *)app_data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(qdisc);
+
+	if (response->type < 0) {
+		nssqdisc_info("%s: Qdisc %p (type %d): Received stats - "
+			"response: type: %d\n", __func__, qdisc, nq->type,
+			response->type);
+		atomic_sub(1, &nq->pending_stat_requests);
+		return;
+	}
+
+	/*
+	 * Record latest basic stats
+	 */
+	nq->basic_stats_latest = response->rt.shaper_node_basic_stats_get_success;
+
+	/*
+	 * Update qdisc->bstats
+	 */
+	qdisc->bstats.bytes += (__u64)nq->basic_stats_latest.delta.dequeued_bytes;
+	qdisc->bstats.packets += nq->basic_stats_latest.delta.dequeued_packets;
+
+	/*
+	 * Update qdisc->qstats
+	 */
+	qdisc->qstats.backlog = nq->basic_stats_latest.qlen_bytes;
+	qdisc->q.qlen = nq->basic_stats_latest.qlen_packets;
+
+	qdisc->qstats.drops += (nq->basic_stats_latest.delta.enqueued_packets_dropped +
+				nq->basic_stats_latest.delta.dequeued_packets_dropped);
+
+	/*
+	 * Update qdisc->qstats
+	 */
+	qdisc->qstats.qlen = qdisc->limit;
+	qdisc->qstats.requeues = 0;
+	qdisc->qstats.overlimits += nq->basic_stats_latest.delta.queue_overrun;
+
+	if (atomic_read(&qdisc->refcnt) == 0) {
+		atomic_sub(1, &nq->pending_stat_requests);
+		return;
+	}
+
+	/*
+	 * Requests for stats again, after 1 sec.
+	 */
+	nq->stats_get_timer.expires += HZ;
+	if (nq->stats_get_timer.expires <= jiffies) {
+		nssqdisc_error("losing time %lu, jiffies = %lu\n",
+				nq->stats_get_timer.expires, jiffies);
+		nq->stats_get_timer.expires = jiffies + HZ;
+	}
+	add_timer(&nq->stats_get_timer);
+}
+
+/*
+ * nssqdisc_get_stats_timer_callback()
+ *	Invoked periodically to get updated stats
+ */
+static void nssqdisc_get_stats_timer_callback(unsigned long int data)
+{
+	struct Qdisc *qdisc = (struct Qdisc *)data;
+	struct nssqdisc_qdisc *nq = qdisc_priv(qdisc);
+	nss_tx_status_t rc;
+	struct nss_shaper_configure basic_stats_get;
+
+	/*
+	 * Issue command to get stats
+	 * Stats still in progress?  If not then send a new poll
+	 */
+	basic_stats_get.interface_num = nq->nss_interface_number;
+	basic_stats_get.i_shaper = (nq->is_bridge)? false : true;
+	basic_stats_get.cb = nssqdisc_basic_stats_callback;
+	basic_stats_get.app_data = qdisc;
+	basic_stats_get.owner = THIS_MODULE;
+	basic_stats_get.type = NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET;
+	basic_stats_get.mt.shaper_node_basic_stats_get.qos_tag = nq->qos_tag;
+	rc = nss_shaper_config_send(nq->nss_shaping_ctx, &basic_stats_get);
+	if (rc != NSS_TX_SUCCESS) {
+		nssqdisc_error("%s: %p: basic stats get failed to send\n",
+				__func__, qdisc);
+		atomic_sub(1, &nq->pending_stat_requests);
+	}
+}
+
+/*
+ * nssqdisc_start_basic_stats_polling()
+ *	Call to initiate the stats polling timer
+ */
+static void nssqdisc_start_basic_stats_polling(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	init_timer(&nq->stats_get_timer);
+	nq->stats_get_timer.function = nssqdisc_get_stats_timer_callback;
+	nq->stats_get_timer.data = (unsigned long)sch;
+	nq->stats_get_timer.expires = jiffies + HZ;
+	atomic_set(&nq->pending_stat_requests, 1);
+	add_timer(&nq->stats_get_timer);
+}
+
+/*
+ * nssqdisc_stop_basic_stats_polling()
+ *	Call to stop polling of basic stats
+ */
+static void nssqdisc_stop_basic_stats_polling(struct Qdisc *sch)
+{
+	struct nssqdisc_qdisc *nq = qdisc_priv(sch);
+
+	/*
+	 * We wait until we have received the final stats
+	 */
+	while (atomic_read(&nq->pending_stat_requests) != 0) {
+		yield();
+	}
+}
+
+/*
+ * nssqdisc_if_event_cb()
+ *	Callback function that is registered to listen to events on net_device.
+ */
+static int nssqdisc_if_event_cb(struct notifier_block *unused,
+					unsigned long event, void *ptr)
+{
+	struct net_device *dev = (struct net_device *)ptr;
+	struct net_device *br;
+	struct Qdisc *br_qdisc;
+	int if_num, br_num;
+
+	switch (event) {
+	case NETDEV_BR_JOIN:
+		nssqdisc_info("Reveived NETDEV_BR_JOIN on interface %s\n",
+				dev->name);
+	case NETDEV_BR_LEAVE:
+		nssqdisc_info("Reveived NETDEV_BR_LEAVE on interface %s\n",
+				dev->name);
+		br = dev->master;
+		if_num = nss_get_interface_number(nssqdisc_ctx, dev);
+
+		if (br == NULL || br->priv_flags != IFF_EBRIDGE) {
+			nssqdisc_error("Sensed bridge activity on interface %s "
+				"that is not on any bridge\n", dev->name);
+			break;
+		}
+
+		br_num = nss_get_interface_number(nssqdisc_ctx, br);
+		br_qdisc = br->qdisc;
+		/*
+		 * TODO: Properly ensure that the interface and bridge are
+		 * shaped by us.
+		 */
+		if (if_num < 0 || br_num < 0) {
+			nssqdisc_info("No action taken since if_num is %d for %s "
+					"and br_num is %d for bridge %s\n", if_num,
+					dev->name, br_num, br->name);
+			break;
+		}
+
+		/*
+		 * Call attach or detach according as per event type.
+		 */
+		if (event == NETDEV_BR_JOIN) {
+			nssqdisc_info("Instructing interface %s to attach to bridge(%s) "
+					"shaping\n", dev->name, br->name);
+			nssqdisc_attach_bshaper(br_qdisc, if_num);
+		} else if (event == NETDEV_BR_LEAVE) {
+			nssqdisc_info("Instructing interface %s to detach from bridge(%s) "
+					"shaping\n",dev->name, br->name);
+			nssqdisc_detach_bshaper(br_qdisc, if_num);
+		}
+
+		break;
+	default:
+		nssqdisc_info("Received NETDEV_DEFAULT on interface %s\n", dev->name);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nssqdisc_device_notifier = {
+		.notifier_call = nssqdisc_if_event_cb };
+
+/* =========================== NSSFIFO ========================= */
+
+struct nssfifo_sched_data {
+	struct nssqdisc_qdisc nq;	/* Common base class for all nss qdiscs */
+	u32 limit;			/* Queue length in packets */
+					/* TODO: Support for queue length in bytes */
+	u8 set_default;			/* Flag to set qdisc as default qdisc for enqueue */
+};
+
+static int nssfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	return nssqdisc_enqueue(skb, sch);
+}
+
+static struct sk_buff *nssfifo_dequeue(struct Qdisc *sch)
+{
+	return nssqdisc_dequeue(sch);
+}
+
+static unsigned int nssfifo_drop(struct Qdisc *sch)
+{
+	nssqdisc_info("nssfifo dropping");
+	return nssqdisc_drop(sch);
+}
+
+static void nssfifo_reset(struct Qdisc *sch)
+{
+	nssqdisc_info("nssfifo resetting!");
+	nssqdisc_reset(sch);
+}
+
+static void nssfifo_destroy(struct Qdisc *sch)
+{
+	/*
+	 * Stop the polling of basic stats
+	 */
+	nssqdisc_stop_basic_stats_polling(sch);
+
+	nssqdisc_destroy(sch);
+	nssqdisc_info("nssfifo destroyed");
+}
+
+static const struct nla_policy nssfifo_policy[TCA_NSSFIFO_MAX + 1] = {
+	[TCA_NSSFIFO_PARMS] = { .len = sizeof(struct tc_nssfifo_qopt) },
+};
+
+static int nssfifo_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct nssfifo_sched_data *q;
+	struct nlattr *na[TCA_NSSFIFO_MAX + 1];
+	struct tc_nssfifo_qopt *qopt;
+	int err;
+	struct nss_shaper_configure shaper_node_change_param;
+
+	q = qdisc_priv(sch);
+
+	if (opt == NULL) {
+		return -EINVAL;
+	}
+
+	err = nla_parse_nested(na, TCA_NSSFIFO_MAX, opt, nssfifo_policy);
+	if (err < 0)
+		return err;
+
+	if (na[TCA_NSSFIFO_PARMS] == NULL)
+		return -EINVAL;
+
+	qopt = nla_data(na[TCA_NSSFIFO_PARMS]);
+
+	if (!qopt->limit) {
+		nssqdisc_error("%s: limit must be non-zero\n", __func__);
+		return -EINVAL;
+	}
+
+	q->limit = qopt->limit;
+
+	/*
+	 * Required for basic stats display
+	 */
+	sch->limit = qopt->limit;
+
+	q->set_default = qopt->set_default;
+	nssqdisc_info("%s: limit:%u set_default:%u\n", __func__, qopt->limit, qopt->set_default);
+
+	shaper_node_change_param.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+	shaper_node_change_param.mt.shaper_node_config.snc.fifo_param.limit = q->limit;
+	shaper_node_change_param.mt.shaper_node_config.snc.fifo_param.drop_mode = NSS_SHAPER_FIFO_DROP_MODE_TAIL;
+	if (nssqdisc_configure(sch, &shaper_node_change_param, NSS_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM) < 0)
+		return -EINVAL;
+
+	/*
+	 * There is nothing we need to do if the qdisc is not
+	 * set as default qdisc.
+	 */
+	if (q->set_default == 0)
+		return 0;
+
+	/*
+	 * Set this qdisc to be the default qdisc for enqueuing packets.
+	 */
+	if (nssqdisc_set_default(sch) < 0)
+		return -EINVAL;
+
+	nssqdisc_info("%s: nssfifo queue (qos_tag:%u) set as default\n", __func__, q->nq.qos_tag);
+	return 0;
+}
+
+static int nssfifo_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	if (opt == NULL)
+		return -EINVAL;
+
+	nssqdisc_info("Initializing Fifo - type %d\n", NSS_SHAPER_NODE_TYPE_FIFO);
+	nssfifo_reset(sch);
+
+	if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_FIFO) < 0)
+		return -EINVAL;
+
+	nssqdisc_info("NSS fifo initialized - handle %x parent %x\n", sch->handle, sch->parent);
+	if (nssfifo_change(sch, opt) < 0) {
+		nssqdisc_destroy(sch);
+		return -EINVAL;
+	}
+
+	/*
+	 * Start the stats polling timer
+	 */
+	nssqdisc_start_basic_stats_polling(sch);
+
+	return 0;
+}
+
+static int nssfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct nssfifo_sched_data *q;
+	struct nlattr *opts = NULL;
+	struct tc_nssfifo_qopt opt;
+
+	nssqdisc_info("Nssfifo Dumping!");
+
+	q = qdisc_priv(sch);
+	if (q == NULL) {
+		return -1;
+	}
+
+	opt.limit = q->limit;
+
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL) {
+		goto nla_put_failure;
+	}
+	if (nla_put(skb, TCA_NSSFIFO_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
+
+	return nla_nest_end(skb, opts);
+
+nla_put_failure:		
+	nla_nest_cancel(skb, opts);
+	return -EMSGSIZE;
+}
+
+static struct sk_buff *nssfifo_peek(struct Qdisc *sch)
+{
+	nssqdisc_info("Nssfifo Peeking");
+	return nssqdisc_peek(sch);
+}
+
+static struct Qdisc_ops nsspfifo_qdisc_ops __read_mostly = {
+	.id		=	"nsspfifo",
+	.priv_size	=	sizeof(struct nssfifo_sched_data),
+	.enqueue	=	nssfifo_enqueue,
+	.dequeue	=	nssfifo_dequeue,
+	.peek		=	nssfifo_peek,
+	.drop		=	nssfifo_drop,
+	.init		=	nssfifo_init,
+	.reset		=	nssfifo_reset,
+	.destroy	=	nssfifo_destroy,
+	.change		=	nssfifo_change,
+	.dump		=	nssfifo_dump,
+	.owner		=	THIS_MODULE,
+};
+
+static struct Qdisc_ops nssbfifo_qdisc_ops __read_mostly = {
+	.id		=	"nssbfifo",
+	.priv_size	=	sizeof(struct nssfifo_sched_data),
+	.enqueue	=	nssfifo_enqueue,
+	.dequeue	=	nssfifo_dequeue,
+	.peek		=	nssfifo_peek,
+	.drop		=	nssfifo_drop,
+	.init		=	nssfifo_init,
+	.reset		=	nssfifo_reset,
+	.destroy	=	nssfifo_destroy,
+	.change		=	nssfifo_change,
+	.dump		=	nssfifo_dump,
+	.owner		=	THIS_MODULE,
+};
+
+/* =========================== NSSCODEL ========================= */
+
+struct nsscodel_stats {
+	u32 peak_queue_delay;		/* Peak delay experienced by a dequeued packet */
+	u32 peak_drop_delay;		/* Peak delay experienced by a packet that is dropped */
+};
+
+struct nsscodel_sched_data {
+	struct nssqdisc_qdisc nq;	/* Common base class for all nss qdiscs */
+	u32 target;			/* Acceptable value of queue delay */
+	u32 limit;			/* Length of queue */
+	u32 interval;			/* Monitoring interval */
+	u8 set_default;			/* Flag to set qdisc as default qdisc for enqueue */
+	struct nsscodel_stats stats;	/* Contains nsscodel related stats */
+};
+
+static int nsscodel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	return nssqdisc_enqueue(skb, sch);
+}
+
+static struct sk_buff *nsscodel_dequeue(struct Qdisc *sch)
+{
+	return nssqdisc_dequeue(sch);
+}
+
+static unsigned int nsscodel_drop(struct Qdisc *sch)
+{
+	return nssqdisc_drop(sch);
+}
+
+static void nsscodel_reset(struct Qdisc *sch)
+{
+	nssqdisc_info("nsscodel resetting!");
+	nssqdisc_reset(sch);
+}
+
+static void nsscodel_destroy(struct Qdisc *sch)
+{
+	/*
+	 * Stop the polling of basic stats
+	 */
+	nssqdisc_stop_basic_stats_polling(sch);
+	nssqdisc_destroy(sch);
+	nssqdisc_info("nsscodel destroyed");
+}
+
+static const struct nla_policy nsscodel_policy[TCA_NSSCODEL_MAX + 1] = {
+	[TCA_NSSCODEL_PARMS] = { .len = sizeof(struct tc_nsscodel_qopt) },
+};
+
+static int nsscodel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct nsscodel_sched_data *q;
+	struct nlattr *na[TCA_NSSCODEL_MAX + 1];
+	struct tc_nsscodel_qopt *qopt;
+	struct nss_shaper_configure shaper_node_change_param;
+	int err;
+	struct net_device *dev = qdisc_dev(sch);
+
+	q = qdisc_priv(sch);
+
+	if (opt == NULL)
+		return -EINVAL;
+
+	err = nla_parse_nested(na, TCA_NSSCODEL_MAX, opt, nsscodel_policy);
+	if (err < 0)
+		return err;
+
+	if (na[TCA_NSSCODEL_PARMS] == NULL)
+		return -EINVAL;
+
+	qopt = nla_data(na[TCA_NSSCODEL_PARMS]);
+
+	if (!qopt->target || !qopt->interval || !qopt->limit) {
+		nssqdisc_error("nsscodel requires a non-zero value for target, "
+				"interval and limit\n");
+		return -EINVAL;
+	}
+
+	q->target = qopt->target;
+	q->limit = qopt->limit;
+	q->interval = qopt->interval;
+	q->set_default = qopt->set_default;
+
+	/*
+	 * Required for basic stats display
+	 */
+	sch->limit = qopt->limit;
+
+	nssqdisc_info("Target:%u Limit:%u Interval:%u set_default = %u\n",
+		q->target, q->limit, q->interval, qopt->set_default);
+
+
+	shaper_node_change_param.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+	/*
+	 * Target and interval time needs to be provided in milliseconds
+	 * (tc provides us the time in mircoseconds and therefore we divide by 100)
+	 */
+	shaper_node_change_param.mt.shaper_node_config.snc.codel_param.qlen_max = q->limit;
+	shaper_node_change_param.mt.shaper_node_config.snc.codel_param.cap.interval = q->interval/1000;
+	shaper_node_change_param.mt.shaper_node_config.snc.codel_param.cap.target = q->target/1000;
+	shaper_node_change_param.mt.shaper_node_config.snc.codel_param.cap.mtu = dev->mtu;
+	nssqdisc_info("%s: MTU size of interface %s is %u\n", __func__, dev->name, dev->mtu);
+
+	if (nssqdisc_configure(sch, &shaper_node_change_param,
+				NSS_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM) < 0) {
+		return -EINVAL;
+	}
+
+	/*
+	 * There is nothing we need to do if the qdisc is not
+	 * set as default qdisc.
+	 */
+	if (!q->set_default)
+		return 0;
+
+	/*
+	 * Set this qdisc to be the default qdisc for enqueuing packets.
+	 */
+	if (nssqdisc_set_default(sch) < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int nsscodel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	if (opt == NULL)
+		return -EINVAL;
+
+	nsscodel_reset(sch);
+	if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_CODEL) < 0)
+		return -EINVAL;
+
+	if (nsscodel_change(sch, opt) < 0) {
+		nssqdisc_destroy(sch);
+		return -EINVAL;
+	}
+
+	/*
+	 * Start the stats polling timer
+	 */
+	nssqdisc_start_basic_stats_polling(sch);
+
+	return 0;
+}
+
+static int nsscodel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct nsscodel_sched_data *q;
+	struct nlattr *opts = NULL;
+	struct tc_nsscodel_qopt opt;
+
+	nssqdisc_info("NssCodel Dumping!");
+
+	q = qdisc_priv(sch);
+	if (q == NULL) {
+		return -1;
+	}
+
+	opt.target = q->target;
+	opt.limit = q->limit;
+	opt.interval = q->interval;
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL) {
+		goto nla_put_failure;
+	}
+	if (nla_put(skb, TCA_NSSCODEL_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
+
+	return nla_nest_end(skb, opts);
+
+nla_put_failure:
+	nla_nest_cancel(skb, opts);
+	return -EMSGSIZE;
+}
+
+static int nsscodel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct nsscodel_sched_data *q = qdisc_priv(sch);
+	struct tc_nsscodel_xstats st = {
+		.peak_queue_delay = q->nq.basic_stats_latest.packet_latency_peak_msec_dequeued,
+		.peak_drop_delay = q->nq.basic_stats_latest.packet_latency_peak_msec_dropped,
+	};
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct sk_buff *nsscodel_peek(struct Qdisc *sch)
+{
+	nssqdisc_info("Nsscodel Peeking");
+	return nssqdisc_peek(sch);
+}
+
+
+static struct Qdisc_ops nsscodel_qdisc_ops __read_mostly = {
+	.id		=	"nsscodel",
+	.priv_size	=	sizeof(struct nsscodel_sched_data),
+	.enqueue	=	nsscodel_enqueue,
+	.dequeue	=	nsscodel_dequeue,
+	.peek		=	nsscodel_peek,
+	.drop		=	nsscodel_drop,
+	.init		=	nsscodel_init,
+	.reset		=	nsscodel_reset,
+	.destroy	=	nsscodel_destroy,
+	.change		=	nsscodel_change,
+	.dump		=	nsscodel_dump,
+	.dump_stats	=	nsscodel_dump_stats,
+	.owner		=	THIS_MODULE,
+};
+
+/* =========================== NSSTBL ========================= */
+
+struct nsstbl_sched_data {
+	struct nssqdisc_qdisc nq;	/* Common base class for all nss qdiscs */
+	u32 rate;			/* Limiting rate of TBL */
+	u32 peakrate;			/* Maximum rate to control bursts */
+	u32 burst;			/* Maximum allowed burst size */
+	u32 mtu;			/* MTU of the interface attached to */
+	u32 mpu;			/* Minimum size of a packet (when there is
+					 * no data)
+					 */
+	struct Qdisc *qdisc;		/* Qdisc to which it is attached to */
+};
+
+
+static int nsstbl_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	return nssqdisc_enqueue(skb, sch);
+}
+
+static struct sk_buff *nsstbl_dequeue(struct Qdisc *sch)
+{
+	return nssqdisc_dequeue(sch);
+}
+
+static unsigned int nsstbl_drop(struct Qdisc *sch)
+{
+	return nssqdisc_drop(sch);
+}
+
+static struct sk_buff *nsstbl_peek(struct Qdisc *sch)
+{
+	return nssqdisc_peek(sch);
+}
+
+static void nsstbl_reset(struct Qdisc *sch)
+{
+	nssqdisc_reset(sch);
+}
+
+static void nsstbl_destroy(struct Qdisc *sch)
+{
+	struct nsstbl_sched_data *q = qdisc_priv(sch);
+	qdisc_destroy(q->qdisc);
+
+	/*
+	 * Stop the polling of basic stats
+	 */
+	nssqdisc_start_basic_stats_polling(sch);
+	nssqdisc_destroy(sch);
+}
+
+static const struct nla_policy nsstbl_policy[TCA_NSSTBL_MAX + 1] = {
+	[TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nsstbl_qopt) },
+};
+
+static int nsstbl_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct nsstbl_sched_data *q;
+	struct nlattr *na[TCA_NSSTBL_MAX + 1];
+	struct tc_nsstbl_qopt *qopt;
+	struct nss_shaper_configure shaper_node_change_param;
+	int err;
+
+	q = qdisc_priv(sch);
+
+	if (opt == NULL)
+		return -EINVAL;
+
+	err = nla_parse_nested(na, TCA_NSSTBL_MAX, opt, nsstbl_policy);
+	if (err < 0)
+		return err;
+
+	if (na[TCA_NSSTBL_PARMS] == NULL)
+		return -EINVAL;
+
+	qopt = nla_data(na[TCA_NSSTBL_PARMS]);
+
+	/*
+	 * Burst size cannot be less than MTU
+	 */
+	if (qopt->burst < qopt->mtu) {
+		nssqdisc_error("Burst size: %u is less than the specified MTU: %u\n", qopt->burst, qopt->mtu);
+		return -EINVAL;
+	}
+
+	/*
+	 * For peak rate to work, MTU must be specified.
+	 */
+	if (qopt->peakrate > 0 && qopt->mtu == 0) {
+		nssqdisc_error("MTU cannot be zero if peakrate is specified\n");
+		return -EINVAL;
+	}
+
+	
+	/*
+	 * Rate can be zero. Therefore we dont do a check on it.
+	 */
+	q->rate = qopt->rate;
+	nssqdisc_info("Rate = %u", qopt->rate);
+	q->burst = qopt->burst;
+	nssqdisc_info("Burst = %u", qopt->burst);
+	q->mtu = qopt->mtu;
+	nssqdisc_info("MTU = %u", qopt->mtu);
+	q->peakrate = qopt->peakrate;
+	nssqdisc_info("Peak Rate = %u", qopt->peakrate);
+
+	shaper_node_change_param.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.rate = q->rate;
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.burst = q->burst;
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.max_size = q->mtu;
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_cir.short_circuit = false;
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.rate = q->peakrate;
+
+	/*
+	 * It is important to set these two parameters to be the same as MTU.
+	 * This ensures bursts from CIR dont go above the specified peakrate.
+	 */
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.burst = q->mtu;
+	shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.max_size = q->mtu;
+
+	if (q->peakrate) {
+		shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = false;
+	} else {
+		shaper_node_change_param.mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = true;
+	}
+
+	if (nssqdisc_configure(sch, &shaper_node_change_param,
+			NSS_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM) < 0) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nsstbl_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct nsstbl_sched_data *q = qdisc_priv(sch);
+
+	if (opt == NULL)
+		return -EINVAL;
+
+	q->qdisc = &noop_qdisc;
+
+	if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_TBL) < 0)
+		return -EINVAL;
+
+	if (nsstbl_change(sch, opt) < 0) {
+		nssqdisc_info("Failed to configure tbl\n");
+		nssqdisc_destroy(sch);
+		return -EINVAL;
+	}
+
+	/*
+	 * Start the stats polling timer
+	 */
+	nssqdisc_start_basic_stats_polling(sch);
+
+	return 0;
+}
+
+static int nsstbl_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct nsstbl_sched_data *q = qdisc_priv(sch);
+	struct nlattr *opts = NULL;
+	struct tc_nsstbl_qopt opt = {
+		.rate		= q->rate,
+		.peakrate	= q->peakrate,
+		.burst		= q->burst,
+		.mtu		= q->mtu,
+	};
+
+	nssqdisc_info("Nsstbl dumping");
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL)
+		goto nla_put_failure;
+	NLA_PUT(skb, TCA_NSSTBL_PARMS, sizeof(opt), &opt);
+	return nla_nest_end(skb, opts);
+
+nla_put_failure:
+	nla_nest_cancel(skb, opts);
+	return -EMSGSIZE;	
+}
+
+static int nsstbl_dump_class(struct Qdisc *sch, unsigned long cl,
+			     struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct nsstbl_sched_data *q = qdisc_priv(sch);
+	nssqdisc_info("Nsstbl dumping class");
+
+	tcm->tcm_handle |= TC_H_MIN(1);
+	tcm->tcm_info = q->qdisc->handle;
+
+	return 0;
+}
+
+static int nsstbl_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+			struct Qdisc **old)
+{
+	struct nsstbl_sched_data *q = qdisc_priv(sch);
+	struct nssqdisc_qdisc *nq_new = (struct nssqdisc_qdisc *)qdisc_priv(new);
+	struct nss_shaper_configure shaper_node_attach, shaper_node_detach;
+
+	if (new == NULL)
+		new = &noop_qdisc;
+
+	sch_tree_lock(sch);
+	*old = q->qdisc;
+	q->qdisc = new;
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+	qdisc_reset(*old);
+	sch_tree_unlock(sch);
+
+	nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
+	if (*old != &noop_qdisc) {
+		nssqdisc_info("%s: Detaching old: %p\n", __func__, *old);
+		shaper_node_detach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+		if (nssqdisc_node_detach(sch, &shaper_node_detach,
+				NSS_SHAPER_CONFIG_TYPE_TBL_DETACH) < 0) {
+			return -EINVAL;
+		}
+	}
+
+	if (new != &noop_qdisc) {
+		nssqdisc_info("%s: Attaching new: %p\n", __func__, new);
+		shaper_node_attach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+		shaper_node_attach.mt.shaper_node_config.snc.tbl_attach.child_qos_tag = nq_new->qos_tag;
+		if (nssqdisc_node_attach(sch, &shaper_node_attach,
+				NSS_SHAPER_CONFIG_TYPE_TBL_ATTACH) < 0) {
+			return -EINVAL;
+		}
+	}
+
+	nssqdisc_info("Nsstbl grafted");
+
+	return 0;
+}
+
+static struct Qdisc *nsstbl_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	struct nsstbl_sched_data *q = qdisc_priv(sch);
+	nssqdisc_info("Nsstbl returns leaf");
+	return q->qdisc;
+}
+
+static unsigned long nsstbl_get(struct Qdisc *sch, u32 classid)
+{
+	return 1;
+}
+
+static void nsstbl_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static void nsstbl_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+	nssqdisc_info("Nsstbl walk called");
+	if (!walker->stop) {
+		if (walker->count >= walker->skip)
+			if (walker->fn(sch, 1, walker) < 0) {
+				walker->stop = 1;
+				return;
+			}
+		walker->count++;
+	}
+}
+
+static const struct Qdisc_class_ops nsstbl_class_ops = {
+	.graft		=	nsstbl_graft,
+	.leaf		=	nsstbl_leaf,
+	.get		=	nsstbl_get,
+	.put		=	nsstbl_put,
+	.walk		=	nsstbl_walk,
+	.dump		=	nsstbl_dump_class,
+};
+
+static struct Qdisc_ops nsstbl_qdisc_ops __read_mostly = {
+	.next		=	NULL,
+	.id		=	"nsstbl",
+	.priv_size	=	sizeof(struct nsstbl_sched_data),
+	.cl_ops		=	&nsstbl_class_ops,
+	.enqueue	=	nsstbl_enqueue,
+	.dequeue	=	nsstbl_dequeue,
+	.peek		=	nsstbl_peek,
+	.drop		=	nsstbl_drop,
+	.init		=	nsstbl_init,
+	.reset		=	nsstbl_reset,
+	.destroy	=	nsstbl_destroy,
+	.change		=	nsstbl_change,
+	.dump		=	nsstbl_dump,
+	.owner		=	THIS_MODULE,
+};
+
+/* =========================== NSSPRIO ========================= */
+
+struct nssprio_sched_data {
+	struct nssqdisc_qdisc nq;	/* Common base class for all nss qdiscs */
+	int bands;			/* Number of priority bands to use */
+	struct Qdisc *queues[TCA_NSSPRIO_MAX_BANDS];
+					/* Array of child qdisc holder */
+};
+
+static int nssprio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	return nssqdisc_enqueue(skb, sch);
+}
+
+static struct sk_buff *nssprio_dequeue(struct Qdisc *sch)
+{
+	return nssqdisc_dequeue(sch);
+}
+
+static unsigned int nssprio_drop(struct Qdisc *sch)
+{
+	return nssqdisc_drop(sch);
+}
+
+static struct sk_buff *nssprio_peek(struct Qdisc *sch)
+{
+	return nssqdisc_peek(sch);
+}
+
+static void nssprio_reset(struct Qdisc *sch)
+{
+	return nssqdisc_reset(sch);
+}
+
+static void nssprio_destroy(struct Qdisc *sch)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	int i;
+
+	nssqdisc_info("Destroying prio");
+
+	/*
+	 * Destroy all attached child nodes before destroying prio
+	 */
+	for (i = 0; i < q->bands; i++)
+		qdisc_destroy(q->queues[i]);
+
+	/*
+	 * Stop the polling of basic stats
+	 */
+	nssqdisc_stop_basic_stats_polling(sch);
+
+	nssqdisc_destroy(sch);
+}
+
+static const struct nla_policy nssprio_policy[TCA_NSSTBL_MAX + 1] = {
+	[TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nssprio_qopt) },
+};
+
+static int nssprio_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct nssprio_sched_data *q;
+	struct nlattr *na[TCA_NSSTBL_MAX + 1];
+	struct tc_nssprio_qopt *qopt;
+	int err;
+
+	q = qdisc_priv(sch);
+
+	if (opt == NULL) {
+		return -EINVAL;
+	}
+
+	err = nla_parse_nested(na, TCA_NSSPRIO_MAX, opt, nssprio_policy);
+	if (err < 0) {
+		return err;
+	}
+
+	if (na[TCA_NSSPRIO_PARMS] == NULL) {
+		return -EINVAL;
+	}
+
+	qopt = nla_data(na[TCA_NSSPRIO_PARMS]);
+
+	if (qopt->bands > TCA_NSSPRIO_MAX_BANDS) {
+		return -EINVAL;
+	}
+
+	q->bands = qopt->bands;
+	nssqdisc_info("Bands = %u\n", qopt->bands);
+
+	return 0;
+}
+
+static int nssprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	int i;
+
+	if (opt == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < TCA_NSSPRIO_MAX_BANDS; i++)
+		q->queues[i] = &noop_qdisc;
+
+	q->bands = 0;
+	if (nssqdisc_init(sch, NSS_SHAPER_NODE_TYPE_PRIO) < 0)
+		return -EINVAL;
+
+	nssqdisc_info("Nssprio initialized - handle %x parent %x\n",
+			sch->handle, sch->parent);
+	if (nssprio_change(sch, opt) < 0) {
+		nssqdisc_destroy(sch);
+		return -EINVAL;
+	}
+
+	/*
+	 * Start the stats polling timer
+	 */
+	nssqdisc_start_basic_stats_polling(sch);
+	return 0;
+}
+
+static int nssprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	struct nlattr *opts = NULL;
+	struct tc_nssprio_qopt qopt; 
+
+	nssqdisc_info("Nssprio dumping");
+	qopt.bands = q->bands;
+
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL)
+		goto nla_put_failure;
+	NLA_PUT(skb, TCA_NSSPRIO_PARMS, sizeof(qopt), &qopt);
+	return nla_nest_end(skb, opts);
+
+nla_put_failure:
+	nla_nest_cancel(skb, opts);
+	return -EMSGSIZE;	
+}
+
+static int nssprio_graft(struct Qdisc *sch, unsigned long arg,
+				struct Qdisc *new, struct Qdisc **old)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	struct nssqdisc_qdisc *nq_new = (struct nssqdisc_qdisc *)qdisc_priv(new);
+	uint32_t band = (uint32_t)(arg - 1);
+	struct nss_shaper_configure shaper_node_attach, shaper_node_detach;
+
+	nssqdisc_info("Grafting band %u, available bands %u\n", band, q->bands);
+
+	if (new == NULL)
+		new = &noop_qdisc;
+
+	if (band > q->bands)
+		return -EINVAL;
+
+	sch_tree_lock(sch);
+	*old = q->queues[band];
+	q->queues[band] = new;
+	qdisc_reset(*old);
+	sch_tree_unlock(sch);
+
+	nssqdisc_info("%s:Grafting old: %p with new: %p\n", __func__, *old, new);
+	if (*old != &noop_qdisc) {
+		nssqdisc_info("%s:Detaching old: %p\n", __func__, *old);
+		shaper_node_detach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+		shaper_node_detach.mt.shaper_node_config.snc.prio_detach.priority = band;
+		if (nssqdisc_node_detach(sch, &shaper_node_detach,
+				NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH) < 0) {
+			return -EINVAL;
+		}
+	}
+
+	if (new != &noop_qdisc) {
+		nssqdisc_info("%s:Attaching new child with qos tag: %x, priority: %u to "
+				"qos_tag: %x\n", __func__, nq_new->qos_tag, band, q->nq.qos_tag);
+		shaper_node_attach.mt.shaper_node_config.qos_tag = q->nq.qos_tag;
+		shaper_node_attach.mt.shaper_node_config.snc.prio_attach.child_qos_tag = nq_new->qos_tag;
+		shaper_node_attach.mt.shaper_node_config.snc.prio_attach.priority = band;
+		if (nssqdisc_node_attach(sch, &shaper_node_attach,
+				NSS_SHAPER_CONFIG_TYPE_PRIO_ATTACH) < 0) {
+			return -EINVAL;
+		}
+	}
+	nssqdisc_info("Nssprio grafted");
+
+	return 0;
+}
+
+static struct Qdisc *nssprio_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	uint32_t band = (uint32_t)(arg - 1);
+
+	nssqdisc_info("Nssprio returns leaf");
+
+	if (band > q->bands)
+		return NULL;
+
+	return q->queues[band];
+}
+
+static unsigned long nssprio_get(struct Qdisc *sch, u32 classid)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	unsigned long band = TC_H_MIN(classid);
+
+	nssqdisc_info("Inside get. Handle - %x Classid - %x Band %lu Available band %u", sch->handle, classid, band, q->bands);
+
+	if (band > q->bands)
+		return 0;
+
+	return band;
+}
+
+static void nssprio_put(struct Qdisc *sch, unsigned long arg)
+{
+	nssqdisc_info("Inside prio get\n");
+}
+
+static void nssprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	int i;
+
+	if (arg->stop)
+		return;
+
+	for (i = 0; i < q->bands; i++) {
+		if (arg->count < arg->skip) {
+			arg->count++;
+			continue;
+		}
+		if (arg->fn(sch, i + 1, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+		arg->count++;
+	}
+	nssqdisc_info("Nssprio walk called");
+}
+
+static int nssprio_dump_class(struct Qdisc *sch, unsigned long cl,
+			     struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+
+	tcm->tcm_handle |= TC_H_MIN(cl);
+	tcm->tcm_info = q->queues[cl - 1]->handle;
+
+	nssqdisc_info("Nssprio dumping class");
+	return 0;
+}
+
+static int nssprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+			     	    struct gnet_dump *d)
+{
+	struct nssprio_sched_data *q = qdisc_priv(sch);
+	struct Qdisc *cl_q;
+
+	cl_q = q->queues[cl - 1];
+	cl_q->qstats.qlen = cl_q->q.qlen;
+	if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
+	    gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
+		return -1;
+
+	nssqdisc_info("Nssprio dumping class stats");
+	return 0;
+}
+
+static const struct Qdisc_class_ops nssprio_class_ops = {
+	.graft		=	nssprio_graft,
+	.leaf		=	nssprio_leaf,
+	.get		=	nssprio_get,
+	.put		=	nssprio_put,
+	.walk		=	nssprio_walk,
+	.dump		=	nssprio_dump_class,
+	.dump_stats	=	nssprio_dump_class_stats,
+};
+
+static struct Qdisc_ops nssprio_qdisc_ops __read_mostly = {
+	.next		=	NULL,
+	.id		=	"nssprio",
+	.priv_size	=	sizeof(struct nssprio_sched_data),
+	.cl_ops		=	&nssprio_class_ops,
+	.enqueue	=	nssprio_enqueue,
+	.dequeue	=	nssprio_dequeue,
+	.peek		=	nssprio_peek,
+	.drop		=	nssprio_drop,
+	.init		=	nssprio_init,
+	.reset		=	nssprio_reset,
+	.destroy	=	nssprio_destroy,
+	.change		=	nssprio_change,
+	.dump		=	nssprio_dump,
+	.owner		=	THIS_MODULE,
+};
+
+/* ================== Module registration ================= */
+
+static int __init nssqdisc_module_init(void)
+{
+	int ret;
+	nssqdisc_info("Module initializing");
+	nssqdisc_ctx = nss_register_shaping();
+
+	ret = register_qdisc(&nsspfifo_qdisc_ops);
+	if (ret != 0)
+		return ret;
+	nssqdisc_info("NSS pfifo registered");
+
+	ret = register_qdisc(&nssbfifo_qdisc_ops);
+	if (ret != 0)
+		return ret;
+	nssqdisc_info("NSS bfifo registered");
+
+	ret = register_qdisc(&nsscodel_qdisc_ops);
+	if (ret != 0)
+		return ret;
+	nssqdisc_info("NSSCodel registered");
+
+	ret = register_qdisc(&nsstbl_qdisc_ops);
+	if (ret != 0)
+		return ret;
+	nssqdisc_info("NSSTBL registered");
+
+	ret = register_qdisc(&nssprio_qdisc_ops);
+	if (ret != 0)
+		return ret;
+	nssqdisc_info("NSSPRIO registered");
+
+	ret = register_netdevice_notifier(&nssqdisc_device_notifier);
+	if (ret != 0)
+		return ret;
+ 	nssqdisc_info("NSS qdisc device notifiers registered");
+
+	return 0;
+}
+
+static void __exit nssqdisc_module_exit(void)
+{
+	unregister_qdisc(&nsspfifo_qdisc_ops);
+	nssqdisc_info("NSSPFIFO Unregistered");
+	unregister_qdisc(&nssbfifo_qdisc_ops);
+	nssqdisc_info("NSSBFIFO Unregistered");
+	unregister_qdisc(&nsscodel_qdisc_ops);
+	nssqdisc_info("NSSCODEL Unregistered");
+	unregister_qdisc(&nsstbl_qdisc_ops);
+	nssqdisc_info("NSSTBL Unregistered");
+	unregister_qdisc(&nssprio_qdisc_ops);
+	nssqdisc_info("NSSPRIO Unregistered");
+	unregister_netdevice_notifier(&nssqdisc_device_notifier);
+}
+
+module_init(nssqdisc_module_init)
+module_exit(nssqdisc_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/nss_tx_rx.c b/nss_tx_rx.c
index ff17ae6..029cbb7 100755
--- a/nss_tx_rx.c
+++ b/nss_tx_rx.c
@@ -24,6 +24,8 @@
 #include <linux/module.h>
 #include <linux/ppp_channel.h>
 #include <net/arp.h>
+//#include <linux/sched.h>
+#include <net/pkt_sched.h>
 
 /*
  * Global variables/extern declarations
@@ -98,6 +100,7 @@
 {
 	struct nss_ipv4_cb_params nicp;
 
+	// GGG FIXME THIS SHOULD NOT BE A MEMCPY
 	nicp.reason = NSS_IPV4_CB_REASON_ESTABLISH;
 	memcpy(&nicp.params, nire, sizeof(struct nss_ipv4_establish));
 
@@ -138,6 +141,21 @@
 	nicp.params.sync.return_tx_packet_count = nirs->return_tx_packet_count;
 	nicp.params.sync.return_tx_byte_count = nirs->return_tx_byte_count;
 
+	nicp.params.sync.qos_tag = nirs->qos_tag;
+
+	nicp.params.sync.flags = 0;
+	if (nirs->flags & NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
+		nicp.params.sync.flags |= NSS_IPV4_CREATE_FLAG_NO_SEQ_CHECK;
+	}
+
+	if (nirs->flags & NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW) {
+		nicp.params.sync.flags |= NSS_IPV4_CREATE_FLAG_BRIDGE_FLOW;
+	}
+
+	if (nirs->flags & NSS_IPV4_RULE_CREATE_FLAG_ROUTED) {
+		nicp.params.sync.flags |= NSS_IPV4_CREATE_FLAG_ROUTED;
+	}
+
 	switch (nirs->reason) {
 	case NSS_IPV4_RULE_SYNC_REASON_STATS:
 		nicp.params.sync.reason = NSS_IPV4_SYNC_REASON_STATS;
@@ -224,6 +242,7 @@
 {
 	struct nss_ipv6_cb_params nicp;
 
+	// GGG FIXME THIS SHOULD NOT BE A MEMCPY
 	nicp.reason = NSS_IPV6_CB_REASON_ESTABLISH;
 	memcpy(&nicp.params, nire, sizeof(struct nss_ipv6_establish));
 
@@ -264,6 +283,21 @@
 	nicp.params.sync.return_tx_packet_count = nirs->return_tx_packet_count;
 	nicp.params.sync.return_tx_byte_count = nirs->return_tx_byte_count;
 
+	nicp.params.sync.qos_tag = nirs->qos_tag;
+
+	nicp.params.sync.flags = 0;
+	if (nirs->flags & NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK) {
+		nicp.params.sync.flags |= NSS_IPV6_CREATE_FLAG_NO_SEQ_CHECK;
+	}
+
+	if (nirs->flags & NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW) {
+		nicp.params.sync.flags |= NSS_IPV6_CREATE_FLAG_BRIDGE_FLOW;
+	}
+
+	if (nirs->flags & NSS_IPV6_RULE_CREATE_FLAG_ROUTED) {
+		nicp.params.sync.flags |= NSS_IPV6_CREATE_FLAG_ROUTED;
+	}
+
 	switch(nirs->reason) {
 	case NSS_IPV6_RULE_SYNC_REASON_FLUSH:
 	case NSS_IPV6_RULE_SYNC_REASON_DESTROY:
@@ -884,6 +918,352 @@
 }
 
 /*
+ * nss_rx_metadata_shaper_response()
+ *	Called to process a shaper response (to a shaper config command issued)
+ */
+static void nss_rx_metadata_shaper_response(struct nss_ctx_instance *nss_ctx, struct nss_rx_shaper_response *sr)
+{
+	struct nss_tx_shaper_configure *ntsc = &sr->request;
+	nss_shaper_config_response_callback_t cb;
+	void *cb_app_data;
+	struct module *owner;
+	struct nss_shaper_response response;
+
+	/*
+	 * Pass the response to the originator
+	 */
+	cb = (nss_shaper_config_response_callback_t)ntsc->opaque1;
+	cb_app_data = (void *)ntsc->opaque2;
+	owner = (struct module *)ntsc->opaque3;
+
+	nss_info("%p: shaper response: %p, cb: %p, arg: %p, owner: %p, response type: %d, request type: %d\n",
+			nss_ctx, sr, cb, cb_app_data, owner, sr->type, ntsc->type);
+//	printk(KERN_INFO "%p: shaper response: %p, cb: %p, arg: %p, owner: %p, response type: %d, request type: %d\n",
+//			nss_ctx, sr, cb, cb_app_data, owner, sr->type, ntsc->type);
+
+	/*
+	 * Create a response structure from the NSS metadata response
+	 */
+	switch(sr->type) {
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_ASSIGN_SUCCESS:
+		nss_info("%p: assign shaper success num: %u", nss_ctx, sr->rt.shaper_assign_success.shaper_num);
+		response.rt.shaper_assign_success.shaper_num = sr->rt.shaper_assign_success.shaper_num;
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_ASSIGN_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPERS:
+		nss_info("%p: no shapers", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_NO_SHAPERS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPER:
+		nss_info("%p: no shaper", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODE:
+		nss_info("%p: no shaper node", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODE;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODES:
+		nss_info("%p: no shaper nodes", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODES;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_OLD:
+		nss_info("%p: old request", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_OLD;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_UNRECOGNISED:
+		nss_info("%p: unrecognised command", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_UNRECOGNISED;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_FIFO_QUEUE_LIMIT_INVALID:
+		nss_info("%p: fifo queue limit set fail", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_FIFO_QUEUE_LIMIT_INVALID;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_FIFO_DROP_MODE_INVALID:
+		nss_info("%p: fifo drop mode fail", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_FIFO_DROP_MODE_INVALID;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_BAD_DEFAULT_CHOICE:
+		nss_info("%p: bad default choice", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_BAD_DEFAULT_CHOICE;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_DUPLICATE_QOS_TAG:
+		nss_info("%p: Duplicate qos tag", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_DUPLICATE_QOS_TAG;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CIR_RATE_AND_BURST_REQUIRED:
+		nss_info("%p: Burst size and rate must be provided for CIR", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_TBL_CIR_RATE_AND_BURST_REQUIRED;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CIR_BURST_LESS_THAN_MTU:
+		nss_info("%p: CIR burst size cannot be smaller than mtu", nss_ctx);
+		response.type = NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CIR_BURST_LESS_THAN_MTU;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_LESS_THAN_MTU:
+		nss_info("%p: PIR burst size cannot be smaller than mtu", nss_ctx);
+		response.type = NSS_RX_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_LESS_THAN_MTU;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_REQUIRED:
+		nss_info("%p: PIR burst size required if peakrate is specifies", nss_ctx);
+		response.type = NSS_RX_SHAPER_RESPONSE_TYPE_TBL_PIR_BURST_REQUIRED;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_CODEL_ALL_PARAMS_REQUIRED:
+		nss_info("%p: Codel requires non-zero value for target, interval and limit", nss_ctx);
+		response.type = NSS_RX_SHAPER_RESPONSE_TYPE_CODEL_ALL_PARAMS_REQUIRED;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_NODE_ALLOC_SUCCESS:
+		nss_info("%p: node alloc success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_NODE_ALLOC_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_PRIO_ATTACH_SUCCESS:
+		nss_info("%p: prio attach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_PRIO_ATTACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_PRIO_DETACH_SUCCESS:
+		nss_info("%p: prio detach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_PRIO_DETACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_CODEL_CHANGE_PARAM_SUCCESS:
+		nss_info("%p: codel configure success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_CODEL_CHANGE_PARAM_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_ATTACH_SUCCESS:
+		nss_info("%p: tbl attach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_TBL_ATTACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_DETACH_SUCCESS:
+		nss_info("%p: tbl detach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_TBL_DETACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_TBL_CHANGE_PARAM_SUCCESS:
+		nss_info("%p: tbl configure success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_TBL_CHANGE_PARAM_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_BF_ATTACH_SUCCESS:
+		nss_info("%p: bf attach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_BF_ATTACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_BF_DETACH_SUCCESS:
+		nss_info("%p: bf detach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_BF_DETACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_BF_GROUP_ATTACH_SUCCESS:
+		nss_info("%p: bf group attach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_ATTACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_BF_GROUP_DETACH_SUCCESS:
+		nss_info("%p: bf group detach success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_DETACH_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_BF_GROUP_CHANGE_PARAM_SUCCESS:
+		nss_info("%p: bf group configure success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_CHANGE_PARAM_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_SET_ROOT_SUCCESS:
+		nss_info("%p: shaper root set success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_SET_ROOT_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_SET_DEFAULT_SUCCESS:
+		nss_info("%p: shaper default set success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_SET_DEFAULT_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_NODE_FREE_SUCCESS:
+		nss_info("%p: node free success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_NODE_FREE_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_UNASSIGN_SUCCESS:
+		nss_info("%p: unassign shaper success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_UNASSIGN_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_FIFO_CHANGE_PARAM_SUCCESS:
+		nss_info("%p: fifo limit set success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_FIFO_CHANGE_PARAM_SUCCESS;
+		break;
+	case NSS_RX_SHAPER_RESPONSE_TYPE_SHAPER_NODE_BASIC_STATS_GET_SUCCESS:
+		nss_info("%p: basic stats success", nss_ctx);
+		response.type = NSS_SHAPER_RESPONSE_TYPE_SHAPER_NODE_BASIC_STATS_GET_SUCCESS;
+		response.rt.shaper_node_basic_stats_get_success.delta.enqueued_bytes = sr->rt.shaper_node_basic_stats_get_success.delta.enqueued_bytes;
+		response.rt.shaper_node_basic_stats_get_success.delta.enqueued_packets = sr->rt.shaper_node_basic_stats_get_success.delta.enqueued_packets;
+		response.rt.shaper_node_basic_stats_get_success.delta.enqueued_bytes_dropped = sr->rt.shaper_node_basic_stats_get_success.delta.enqueued_bytes_dropped;
+		response.rt.shaper_node_basic_stats_get_success.delta.enqueued_packets_dropped = sr->rt.shaper_node_basic_stats_get_success.delta.enqueued_packets_dropped;
+		response.rt.shaper_node_basic_stats_get_success.delta.dequeued_bytes = sr->rt.shaper_node_basic_stats_get_success.delta.dequeued_bytes;
+		response.rt.shaper_node_basic_stats_get_success.delta.dequeued_packets = sr->rt.shaper_node_basic_stats_get_success.delta.dequeued_packets;
+		response.rt.shaper_node_basic_stats_get_success.delta.dequeued_bytes_dropped = sr->rt.shaper_node_basic_stats_get_success.delta.dequeued_bytes_dropped;
+		response.rt.shaper_node_basic_stats_get_success.delta.dequeued_packets_dropped = sr->rt.shaper_node_basic_stats_get_success.delta.dequeued_packets_dropped;
+		response.rt.shaper_node_basic_stats_get_success.delta.queue_overrun = sr->rt.shaper_node_basic_stats_get_success.delta.queue_overrun;
+		response.rt.shaper_node_basic_stats_get_success.qlen_bytes = sr->rt.shaper_node_basic_stats_get_success.qlen_bytes;
+		response.rt.shaper_node_basic_stats_get_success.qlen_packets = sr->rt.shaper_node_basic_stats_get_success.qlen_packets;
+		response.rt.shaper_node_basic_stats_get_success.packet_latency_peak_msec_dequeued = sr->rt.shaper_node_basic_stats_get_success.packet_latency_peak_msec_dequeued;
+		response.rt.shaper_node_basic_stats_get_success.packet_latency_minimum_msec_dequeued = sr->rt.shaper_node_basic_stats_get_success.packet_latency_minimum_msec_dequeued;
+		response.rt.shaper_node_basic_stats_get_success.packet_latency_peak_msec_dropped = sr->rt.shaper_node_basic_stats_get_success.packet_latency_peak_msec_dropped;
+		response.rt.shaper_node_basic_stats_get_success.packet_latency_minimum_msec_dropped = sr->rt.shaper_node_basic_stats_get_success.packet_latency_minimum_msec_dropped;
+		break;
+	default:
+		module_put(owner);
+		nss_warning("%p: unknown response type: %d\n", nss_ctx, response.type);
+		return;
+	}
+
+	/*
+	 * Re-Create original request
+	 */
+	response.request.i_shaper = ntsc->i_shaper;
+	response.request.interface_num = ntsc->interface_num;
+	switch(ntsc->type) {
+	case NSS_TX_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER:
+		nss_info("%p: assign shaper num: %u", nss_ctx, ntsc->mt.assign_shaper.shaper_num);
+		response.request.mt.assign_shaper.shaper_num = ntsc->mt.assign_shaper.shaper_num;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE:
+		nss_info("%p: Alloc shaper node type: %d, qos_tag: %x",
+				nss_ctx, ntsc->mt.alloc_shaper_node.node_type, ntsc->mt.alloc_shaper_node.qos_tag);
+		response.request.mt.alloc_shaper_node.node_type = ntsc->mt.alloc_shaper_node.node_type;
+		response.request.mt.alloc_shaper_node.qos_tag = ntsc->mt.alloc_shaper_node.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE:
+		nss_info("%p: Free shaper node qos_tag: %x",
+				nss_ctx, ntsc->mt.alloc_shaper_node.qos_tag);
+		response.request.mt.free_shaper_node.qos_tag = ntsc->mt.free_shaper_node.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_PRIO_ATTACH:
+		nss_info("%p: Prio node: %x, attach: %x, priority: %u",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag,
+				ntsc->mt.shaper_node_config.snc.prio_attach.child_qos_tag, ntsc->mt.shaper_node_config.snc.prio_attach.priority);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.prio_attach.child_qos_tag = ntsc->mt.shaper_node_config.snc.prio_attach.child_qos_tag;
+		response.request.mt.shaper_node_config.snc.prio_attach.priority = ntsc->mt.shaper_node_config.snc.prio_attach.priority;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_PRIO_ATTACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_PRIO_DETACH:
+		nss_info("%p: Prio node: %x, detach @ priority: %u",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag,
+				ntsc->mt.shaper_node_config.snc.prio_detach.priority);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.prio_detach.priority = ntsc->mt.shaper_node_config.snc.prio_detach.priority;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM:
+		nss_info("%p: Codel node: %x, configure", nss_ctx, ntsc->mt.shaper_node_config.qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.codel_param.qlen_max = ntsc->mt.shaper_node_config.snc.codel_param.qlen_max;
+		response.request.mt.shaper_node_config.snc.codel_param.cap.interval = ntsc->mt.shaper_node_config.snc.codel_param.cap.interval;
+		response.request.mt.shaper_node_config.snc.codel_param.cap.target = ntsc->mt.shaper_node_config.snc.codel_param.cap.target;
+		response.request.mt.shaper_node_config.snc.codel_param.cap.mtu = ntsc->mt.shaper_node_config.snc.codel_param.cap.mtu;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_TBL_ATTACH:
+		nss_info("%p: Tbl node: %x, attach: %x",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag,
+				ntsc->mt.shaper_node_config.snc.tbl_attach.child_qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.tbl_attach.child_qos_tag = ntsc->mt.shaper_node_config.snc.tbl_attach.child_qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_TBL_ATTACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_TBL_DETACH:
+		nss_info("%p: Tbl node: %x, detach",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_TBL_DETACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM:
+		nss_info("%p: Tbl node: %x, configure", nss_ctx, ntsc->mt.shaper_node_config.qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.tbl_param.qlen_bytes = ntsc->mt.shaper_node_config.snc.tbl_param.qlen_bytes;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_cir.rate = ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.rate;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_cir.burst = ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.burst;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_cir.max_size = ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.max_size;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_cir.short_circuit = ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.short_circuit;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_pir.rate = ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.rate;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_pir.burst = ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.burst;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_pir.max_size = ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.max_size;
+		response.request.mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_BF_ATTACH:
+		nss_info("%p: Bigfoot node: %x, attach: %x",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag,
+				ntsc->mt.shaper_node_config.snc.bf_attach.child_qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.bf_attach.child_qos_tag = ntsc->mt.shaper_node_config.snc.bf_attach.child_qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_BF_ATTACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_BF_DETACH:
+		nss_info("%p: Bigfoot node: %x, detach: %x",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag,
+				ntsc->mt.shaper_node_config.snc.bf_detach.child_qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.bf_detach.child_qos_tag = ntsc->mt.shaper_node_config.snc.bf_detach.child_qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_BF_DETACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH:
+		nss_info("%p: Bigfoot group node: %x, attach: %x",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag,
+				ntsc->mt.shaper_node_config.snc.bf_group_attach.child_qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.bf_group_attach.child_qos_tag = ntsc->mt.shaper_node_config.snc.bf_group_attach.child_qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH:
+		nss_info("%p: Bigfoot group node: %x, detach",
+				nss_ctx, ntsc->mt.shaper_node_config.qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM:
+		nss_info("%p: Bigfoot group node: %x, configure", nss_ctx, ntsc->mt.shaper_node_config.qos_tag);
+		response.request.mt.shaper_node_config.qos_tag = ntsc->mt.shaper_node_config.qos_tag;
+		response.request.mt.shaper_node_config.snc.bf_group_param.qlen_bytes = ntsc->mt.shaper_node_config.snc.bf_group_param.qlen_bytes;
+		response.request.mt.shaper_node_config.snc.bf_group_param.quantum = ntsc->mt.shaper_node_config.snc.bf_group_param.quantum;
+		response.request.mt.shaper_node_config.snc.bf_group_param.lap.rate = ntsc->mt.shaper_node_config.snc.bf_group_param.lap.rate;
+		response.request.mt.shaper_node_config.snc.bf_group_param.lap.burst = ntsc->mt.shaper_node_config.snc.bf_group_param.lap.burst;
+		response.request.mt.shaper_node_config.snc.bf_group_param.lap.max_size = ntsc->mt.shaper_node_config.snc.bf_group_param.lap.max_size;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_SET_DEFAULT:
+		nss_info("%p: Set default node qos_tag: %x",
+				nss_ctx, ntsc->mt.set_default_node.qos_tag);
+		response.request.mt.set_default_node.qos_tag = ntsc->mt.set_default_node.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_SET_ROOT:
+		nss_info("%p: Set root node qos_tag: %x",
+				nss_ctx, ntsc->mt.set_root_node.qos_tag);
+		response.request.mt.set_root_node.qos_tag = ntsc->mt.set_root_node.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_SET_ROOT;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER:
+		nss_info("%p: unassign shaper num: %u", nss_ctx, ntsc->mt.unassign_shaper.shaper_num);
+		response.request.mt.unassign_shaper.shaper_num = ntsc->mt.unassign_shaper.shaper_num;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM:
+		nss_info("%p: fifo param limit set: %u, drop_mode: %d", nss_ctx, ntsc->mt.shaper_node_config.snc.fifo_param.limit,
+				ntsc->mt.shaper_node_config.snc.fifo_param.drop_mode);
+		response.request.mt.shaper_node_config.snc.fifo_param.limit = ntsc->mt.shaper_node_config.snc.fifo_param.limit;
+		response.request.mt.shaper_node_config.snc.fifo_param.drop_mode = (nss_shaper_config_fifo_drop_mode_t)ntsc->mt.shaper_node_config.snc.fifo_param.drop_mode;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM;
+		break;
+	case NSS_TX_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET:
+		nss_info("%p: basic stats get for: %u", nss_ctx, ntsc->mt.shaper_node_basic_stats_get.qos_tag);
+		response.request.mt.shaper_node_basic_stats_get.qos_tag = ntsc->mt.shaper_node_basic_stats_get.qos_tag;
+		response.request.type = NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET;
+		break;
+	default:
+		module_put(owner);
+		nss_warning("%p: Unknown request type: %d", nss_ctx, ntsc->type);
+		return;
+	}
+
+	/*
+	 * Return the response
+	 */
+	cb(cb_app_data, &response);
+	module_put(owner);
+}
+
+/*
  * nss_rx_handle_status_pkt()
  *	Handle the metadata/status packet.
  */
@@ -892,6 +1272,7 @@
 	struct nss_rx_metadata_object *nrmo;
 
 	nrmo = (struct nss_rx_metadata_object *)nbuf->data;
+
 	switch (nrmo->type) {
 	case NSS_RX_METADATA_TYPE_IPV4_RULE_ESTABLISH:
 		nss_rx_metadata_ipv4_rule_establish(nss_ctx, &nrmo->sub.ipv4_rule_establish);
@@ -953,6 +1334,9 @@
 		nss_rx_metadata_ipsec_events_sync(nss_ctx, &nrmo->sub.ipsec_events_sync);
 		break;
 
+	case NSS_RX_METADATA_TYPE_SHAPER_RESPONSE:
+		nss_rx_metadata_shaper_response(nss_ctx, &nrmo->sub.shaper_response);
+		break;
 	default:
 		/*
 		 * WARN: Unknown metadata type
@@ -1012,6 +1396,7 @@
 
 	nirc = &ntmo->sub.ipv4_rule_create;
 	nirc->protocol = (uint8_t)unic->protocol;
+	nirc->qos_tag = unic->qos_tag;
 
 	nirc->flow_pppoe_session_id = unic->flow_pppoe_session_id;
 	memcpy(nirc->flow_pppoe_remote_mac, unic->flow_pppoe_remote_mac, ETH_ALEN);
@@ -1057,6 +1442,10 @@
 		nirc->flags |= NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW;
 	}
 
+	if (unic->flags & NSS_IPV4_CREATE_FLAG_ROUTED) {
+		nirc->flags |= NSS_IPV4_RULE_CREATE_FLAG_ROUTED;
+	}
+
 	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
 	if (status != NSS_CORE_STATUS_SUCCESS) {
 		dev_kfree_skb_any(nbuf);
@@ -1160,6 +1549,7 @@
 
 	nirc = &ntmo->sub.ipv6_rule_create;
 	nirc->protocol = (uint8_t)unic->protocol;
+	nirc->qos_tag = unic->qos_tag;
 
 	nirc->flow_pppoe_session_id = unic->flow_pppoe_session_id;
 	memcpy(nirc->flow_pppoe_remote_mac, unic->flow_pppoe_remote_mac, ETH_ALEN);
@@ -1203,6 +1593,10 @@
 		nirc->flags |= NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW;
 	}
 
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_ROUTED) {
+		nirc->flags |= NSS_IPV6_RULE_CREATE_FLAG_ROUTED;
+	}
+
 	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
 	if (status != NSS_CORE_STATUS_SUCCESS) {
 		dev_kfree_skb_any(nbuf);
@@ -2263,6 +2657,17 @@
 }
 
 /*
+ * nss_virt_if_get_interface_num()
+ *	Get interface number for a virtual interface
+ */
+int32_t nss_virt_if_get_interface_num(void *if_ctx)
+{
+	int32_t if_num = (int32_t)if_ctx;
+	nss_assert(NSS_IS_IF_TYPE(VIRTUAL, if_num));
+	return if_num;
+}
+
+/*
  * nss_create_virt_if()
  */
 void *nss_create_virt_if(struct net_device *if_ctx)
@@ -2526,6 +2931,588 @@
 	nss_top_main.profiler_ctx[core_id] = NULL;
 }
 
+/*
+ * nss_register_shaping()
+ *	Register to obtain an NSS context for basic shaping operations
+ */
+void *nss_register_shaping(void)
+{
+	if (nss_top_main.shaping_handler_id == (uint8_t)-1) {
+		nss_warning("%p: SHAPING IS NOT ENABLED", __func__);
+		return NULL;
+	}
+	return (void *)&nss_top_main.nss[nss_top_main.shaping_handler_id];
+}
+
+/*
+ * nss_unregister_shaping()
+ *	Unregister an NSS shaping context
+ */
+void nss_unregister_shaping(void *nss_ctx)
+{
+}
+
+/*
+ * nss_shaper_config_send()
+ *	Issue a config message to the shaping subsystem of the NSS.
+ */
+nss_tx_status_t nss_shaper_config_send(void *ctx, struct nss_shaper_configure *config)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct sk_buff *nbuf;
+	int32_t status;
+	struct nss_tx_metadata_object *ntmo;
+	struct nss_tx_shaper_configure *ntsc;
+
+	nss_info("%p:Shaper config: %p send:  if_num: %u i_shaper: %u, type: %d, owner: %p\n", nss_ctx,
+		config, config->interface_num, config->i_shaper, config->type, config->owner);
+	NSS_VERIFY_CTX_MAGIC(nss_ctx);
+
+	/*
+	 * Core should be ready
+	 */
+	if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) {
+		nss_warning("%p: Shaper config: %p core not ready", nss_ctx, config);
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	/*
+	 * Allocate buffer for command
+	 */
+	nbuf = dev_alloc_skb(NSS_NBUF_PAYLOAD_SIZE);
+	if (unlikely(!nbuf)) {
+		spin_lock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_NBUF_ALLOC_FAILS]++;
+		spin_unlock_bh(&nss_ctx->nss_top->stats_lock);
+		nss_warning("%p: Shaper config: %p alloc fail", nss_ctx, config);
+		return NSS_TX_FAILURE;
+	}
+
+	/*
+	 * Hold the module until we are done with the request
+	 */
+	if (!try_module_get(config->owner)) {
+		nss_warning("%p: Shaper config: %p module shutting down: %p", nss_ctx, config, config->owner);
+		return NSS_TX_FAILURE;
+	}
+
+	/*
+	 * Copy the HLOS API structures command into the NSS metadata object command.
+	 */
+	nss_info("%p: config type: %d", nss_ctx, config->type);
+	ntmo = (struct nss_tx_metadata_object *)skb_put(nbuf, sizeof(struct nss_tx_metadata_object));
+	ntmo->type = NSS_TX_METADATA_TYPE_SHAPER_CONFIGURE;
+	ntsc = &ntmo->sub.shaper_configure;
+
+	ntsc->opaque1 = (uint32_t)config->cb;
+	ntsc->opaque2 = (uint32_t)config->app_data;
+	ntsc->opaque3 = (uint32_t)config->owner;
+	ntsc->i_shaper = config->i_shaper;
+	ntsc->interface_num = config->interface_num;
+
+	switch(config->type) {
+	case NSS_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER:
+		nss_info("%p: Assign shaper num: %u", nss_ctx, config->mt.assign_shaper.shaper_num);
+		ntsc->mt.assign_shaper.shaper_num = config->mt.assign_shaper.shaper_num;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_ASSIGN_SHAPER;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE:
+		nss_info("%p: Alloc shaper node type: %d, qos_tag: %x",
+				nss_ctx, config->mt.alloc_shaper_node.node_type, config->mt.alloc_shaper_node.qos_tag);
+		ntsc->mt.alloc_shaper_node.node_type = config->mt.alloc_shaper_node.node_type;
+		ntsc->mt.alloc_shaper_node.qos_tag = config->mt.alloc_shaper_node.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE:
+		nss_info("%p: Free shaper node qos_tag: %x",
+				nss_ctx, config->mt.alloc_shaper_node.qos_tag);
+		ntsc->mt.free_shaper_node.qos_tag = config->mt.free_shaper_node.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_PRIO_ATTACH:
+		nss_info("%p: Prio node: %x, attach: %x, priority: %u",
+				nss_ctx, config->mt.shaper_node_config.qos_tag,
+				config->mt.shaper_node_config.snc.prio_attach.child_qos_tag, config->mt.shaper_node_config.snc.prio_attach.priority);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.prio_attach.child_qos_tag = config->mt.shaper_node_config.snc.prio_attach.child_qos_tag;
+		ntsc->mt.shaper_node_config.snc.prio_attach.priority = config->mt.shaper_node_config.snc.prio_attach.priority;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_PRIO_ATTACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_PRIO_DETACH:
+		nss_info("%p: Prio node: %x, detach @ priority: %u",
+				nss_ctx, config->mt.shaper_node_config.qos_tag,
+				config->mt.shaper_node_config.snc.prio_detach.priority);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.prio_detach.priority = config->mt.shaper_node_config.snc.prio_detach.priority;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_PRIO_DETACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM:
+		nss_info("%p: Shaper node: %x", nss_ctx, config->mt.shaper_node_config.qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.codel_param.qlen_max = config->mt.shaper_node_config.snc.codel_param.qlen_max;
+		ntsc->mt.shaper_node_config.snc.codel_param.cap.interval = config->mt.shaper_node_config.snc.codel_param.cap.interval;
+		ntsc->mt.shaper_node_config.snc.codel_param.cap.target = config->mt.shaper_node_config.snc.codel_param.cap.target;
+		ntsc->mt.shaper_node_config.snc.codel_param.cap.mtu = config->mt.shaper_node_config.snc.codel_param.cap.mtu;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_CODEL_CHANGE_PARAM;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_TBL_ATTACH:
+		nss_info("%p: Tbl node: %x attach: %x",
+				nss_ctx, config->mt.shaper_node_config.qos_tag,
+				config->mt.shaper_node_config.snc.tbl_attach.child_qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.tbl_attach.child_qos_tag = config->mt.shaper_node_config.snc.tbl_attach.child_qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_TBL_ATTACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_TBL_DETACH:
+		nss_info("%p: Tbl node: %x, detach",
+				nss_ctx, config->mt.shaper_node_config.qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_TBL_DETACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM:
+		nss_info("%p: Tbl node: %x configure", nss_ctx, config->mt.shaper_node_config.qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.tbl_param.qlen_bytes = config->mt.shaper_node_config.snc.tbl_param.qlen_bytes;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.rate = config->mt.shaper_node_config.snc.tbl_param.lap_cir.rate;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.burst = config->mt.shaper_node_config.snc.tbl_param.lap_cir.burst;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.max_size = config->mt.shaper_node_config.snc.tbl_param.lap_cir.max_size;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_cir.short_circuit = config->mt.shaper_node_config.snc.tbl_param.lap_cir.short_circuit;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.rate = config->mt.shaper_node_config.snc.tbl_param.lap_pir.rate;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.burst = config->mt.shaper_node_config.snc.tbl_param.lap_pir.burst;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.max_size = config->mt.shaper_node_config.snc.tbl_param.lap_pir.max_size;
+		ntsc->mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit = config->mt.shaper_node_config.snc.tbl_param.lap_pir.short_circuit;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_TBL_CHANGE_PARAM;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_BF_ATTACH:
+		nss_info("%p: Bigfoot node: %x attach: %x",
+				nss_ctx, config->mt.shaper_node_config.qos_tag,
+				config->mt.shaper_node_config.snc.bf_attach.child_qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.bf_attach.child_qos_tag = config->mt.shaper_node_config.snc.bf_attach.child_qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_BF_ATTACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_BF_DETACH:
+		nss_info("%p: Bigfoot node: %x, detach: %x",
+				nss_ctx, config->mt.shaper_node_config.qos_tag,
+				config->mt.shaper_node_config.snc.bf_attach.child_qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.bf_detach.child_qos_tag = config->mt.shaper_node_config.snc.bf_detach.child_qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_BF_DETACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH:
+		nss_info("%p: Bigfoot group node: %x attach: %x",
+				nss_ctx, config->mt.shaper_node_config.qos_tag,
+				config->mt.shaper_node_config.snc.bf_group_attach.child_qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.bf_group_attach.child_qos_tag = config->mt.shaper_node_config.snc.bf_group_attach.child_qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_ATTACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH:
+		nss_info("%p: Bigfoot group node: %x, detach",
+				nss_ctx, config->mt.shaper_node_config.qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_DETACH;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM:
+		nss_info("%p: Tbl node: %x configure", nss_ctx, config->mt.shaper_node_config.qos_tag);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.bf_group_param.qlen_bytes = config->mt.shaper_node_config.snc.bf_group_param.qlen_bytes;
+		ntsc->mt.shaper_node_config.snc.bf_group_param.quantum = config->mt.shaper_node_config.snc.bf_group_param.quantum;
+		ntsc->mt.shaper_node_config.snc.bf_group_param.lap.rate = config->mt.shaper_node_config.snc.bf_group_param.lap.rate;
+		ntsc->mt.shaper_node_config.snc.bf_group_param.lap.burst = config->mt.shaper_node_config.snc.bf_group_param.lap.burst;
+		ntsc->mt.shaper_node_config.snc.bf_group_param.lap.max_size = config->mt.shaper_node_config.snc.bf_group_param.lap.max_size;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_BF_GROUP_CHANGE_PARAM;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT:
+		nss_info("%p: Set default node qos_tag: %x",
+				nss_ctx, config->mt.set_default_node.qos_tag);
+		ntsc->mt.set_default_node.qos_tag = config->mt.set_default_node.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_SET_DEFAULT;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_SET_ROOT:
+		nss_info("%p: Set root node qos_tag: %x",
+				nss_ctx, config->mt.set_root_node.qos_tag);
+		ntsc->mt.set_root_node.qos_tag = config->mt.set_root_node.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_SET_ROOT;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER:
+		nss_info("%p: UNassign shaper num: %u", nss_ctx, config->mt.unassign_shaper.shaper_num);
+		ntsc->mt.unassign_shaper.shaper_num = config->mt.unassign_shaper.shaper_num;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_UNASSIGN_SHAPER;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM:
+		nss_info("%p: fifo parameter set: %u, drop mode: %d", nss_ctx, config->mt.shaper_node_config.snc.fifo_param.limit,
+				config->mt.shaper_node_config.snc.fifo_param.drop_mode);
+		ntsc->mt.shaper_node_config.qos_tag = config->mt.shaper_node_config.qos_tag;
+		ntsc->mt.shaper_node_config.snc.fifo_param.limit = config->mt.shaper_node_config.snc.fifo_param.limit;
+		ntsc->mt.shaper_node_config.snc.fifo_param.drop_mode = (nss_tx_shaper_config_fifo_drop_mode_t)config->mt.shaper_node_config.snc.fifo_param.drop_mode;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_FIFO_CHANGE_PARAM;
+		break;
+	case NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET:
+		nss_info("%p: Get basic statistics for: %u", nss_ctx, config->mt.shaper_node_basic_stats_get.qos_tag);
+		ntsc->mt.shaper_node_basic_stats_get.qos_tag = config->mt.shaper_node_basic_stats_get.qos_tag;
+		ntsc->type = NSS_TX_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET;
+		break;
+	default:
+		/*
+		 * Release module
+		 */
+		module_put(config->owner);
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Unknown type: %d", nss_ctx, config->type);
+		return NSS_TX_FAILURE;
+	}
+
+	status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_CMD_QUEUE, H2N_BUFFER_CTRL, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		/*
+		 * Release module
+		 */
+		module_put(config->owner);
+		dev_kfree_skb_any(nbuf);
+		nss_warning("%p: Shaper config: %p Unable to enqueue\n", nss_ctx, config);
+		return NSS_TX_FAILURE;
+	}
+
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_register_shaper_bounce_interface()
+ *	Register for performing shaper bounce operations for interface shaper
+ */
+void *nss_register_shaper_bounce_interface(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner)
+{
+	struct nss_top_instance *nss_top = &nss_top_main;
+	struct nss_shaper_bounce_registrant *reg;
+
+	nss_info("Shaper bounce interface register: %u, cb: %p, app_data: %p, owner: %p",
+			if_num, cb, app_data, owner);
+
+	/*
+	 * Must be valid interface number
+	 */
+	if (if_num >= NSS_MAX_NET_INTERFACES) {
+		nss_warning("Invalid if_num: %u", if_num);
+		BUG_ON(false);
+	}
+
+	/*
+ 	 * Shaping enabled?
+	 */
+	if (nss_top_main.shaping_handler_id == (uint8_t)-1) {
+		nss_warning("%p: SHAPING IS NOT ENABLED", __func__);
+		return NULL;
+	}
+
+	/*
+	 * Can we hold the module?
+	 */
+	if (!try_module_get(owner)) {
+		nss_warning("%p: Unable to hold owner", __func__);
+		return NULL;
+	}
+
+	spin_lock_bh(&nss_top->lock);
+
+	/*
+	 * Must not have existing registrant
+	 */
+	reg = &nss_top->bounce_interface_registrants[if_num];
+	if (reg->registered) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		module_put(owner);
+		nss_warning("Already registered: %u", if_num);
+		BUG_ON(false);
+	}
+
+	/*
+	 * Register
+	 */
+	reg->bounced_callback = cb;
+	reg->app_data = app_data;
+	reg->owner = owner;
+	reg->registered = true;
+	spin_unlock_bh(&nss_top->lock);
+	
+	return (void *)&nss_top->nss[nss_top->shaping_handler_id];
+}
+
+/*
+ * nss_unregister_shaper_bounce_interface()
+ *	Unregister for shaper bounce operations for interface shaper
+ */
+void nss_unregister_shaper_bounce_interface(uint32_t if_num)
+{
+	struct nss_top_instance *nss_top = &nss_top_main;
+	struct nss_shaper_bounce_registrant *reg;
+	struct module *owner;
+
+	nss_info("Shaper bounce interface unregister: %u", if_num);
+
+	/*
+	 * Must be valid interface number
+	 */
+	if (if_num >= NSS_MAX_NET_INTERFACES) {
+		nss_warning("Invalid if_num: %u", if_num);
+		BUG_ON(false);
+	}
+
+	spin_lock_bh(&nss_top->lock);
+
+	/*
+	 * Must have existing registrant
+	 */
+	reg = &nss_top->bounce_interface_registrants[if_num];
+	if (!reg->registered) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		nss_warning("Already unregistered: %u", if_num);
+		BUG_ON(false);
+	}
+
+	/*
+	 * Unegister
+	 */
+	owner = reg->owner;
+	reg->owner = NULL;
+	reg->registered = false;
+	spin_unlock_bh(&nss_top->lock);
+	
+	module_put(owner);
+}
+
+/*
+ * nss_register_shaper_bounce_bridge()
+ *	Register for performing shaper bounce operations for bridge shaper
+ */
+void *nss_register_shaper_bounce_bridge(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner)
+{
+	struct nss_top_instance *nss_top = &nss_top_main;
+	struct nss_ctx_instance *nss_ctx;
+	struct nss_shaper_bounce_registrant *reg;
+
+	nss_info("Shaper bounce bridge register: %u, cb: %p, app_data: %p, owner: %p",
+			if_num, cb, app_data, owner);
+
+	/*
+	 * Must be valid interface number
+	 */
+	if (if_num >= NSS_MAX_NET_INTERFACES) {
+		nss_warning("Invalid if_num: %u", if_num);
+		BUG_ON(false);
+	}
+
+	/*
+ 	 * Shaping enabled?
+	 */
+	if (nss_top_main.shaping_handler_id == (uint8_t)-1) {
+		nss_warning("%p: SHAPING IS NOT ENABLED", __func__);
+		return NULL;
+	}
+
+	/*
+	 * Can we hold the module?
+	 */
+	if (!try_module_get(owner)) {
+		nss_warning("%p: Unable to hold owner", __func__);
+		return NULL;
+	}
+
+	spin_lock_bh(&nss_top->lock);
+
+	/*
+	 * Must not have existing registrant
+	 */
+	reg = &nss_top->bounce_bridge_registrants[if_num];
+	if (reg->registered) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		module_put(owner);
+		nss_warning("Already registered: %u", if_num);
+		BUG_ON(false);
+	}
+
+	/*
+	 * Register
+	 */
+	reg->bounced_callback = cb;
+	reg->app_data = app_data;
+	reg->owner = owner;
+	reg->registered = true;
+	spin_unlock_bh(&nss_top->lock);
+
+	nss_ctx = &nss_top->nss[nss_top->shaping_handler_id];
+	return (void *)nss_ctx;
+}
+
+/*
+ * nss_unregister_shaper_bounce_bridge()
+ *	Unregister for shaper bounce operations for bridge shaper
+ */
+void nss_unregister_shaper_bounce_bridge(uint32_t if_num)
+{
+	struct nss_top_instance *nss_top = &nss_top_main;
+	struct nss_shaper_bounce_registrant *reg;
+	struct module *owner;
+
+	nss_info("Shaper bounce bridge unregister: %u", if_num);
+
+	/*
+	 * Must be valid interface number
+	 */
+	if (if_num >= NSS_MAX_NET_INTERFACES) {
+		nss_warning("Invalid if_num: %u", if_num);
+		BUG_ON(false);
+	}
+
+	spin_lock_bh(&nss_top->lock);
+
+	/*
+	 * Must have existing registrant
+	 */
+	reg = &nss_top->bounce_bridge_registrants[if_num];
+	if (!reg->registered) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		nss_warning("Already unregistered: %u", if_num);
+		BUG_ON(false);
+	}
+
+	/*
+	 * Wait until any bounce callback that is active is finished
+	 */
+	while (reg->callback_active) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		yield();
+		spin_lock_bh(&nss_top->stats_lock);
+	}
+
+	/*
+	 * Unegister
+	 */
+	owner = reg->owner;
+	reg->owner = NULL;
+	reg->registered = false;
+	spin_unlock_bh(&nss_top->lock);
+	
+	module_put(owner);
+}
+
+/*
+ * nss_shaper_bounce_interface_packet()
+ *	Bounce a packet to the NSS for interface shaping.
+ *
+ * You must have registered for interface bounce shaping to call this.
+ */
+nss_tx_status_t nss_shaper_bounce_interface_packet(void *ctx, uint32_t if_num, struct sk_buff *skb)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+	struct nss_top_instance *nss_top = nss_ctx->nss_top;
+	struct nss_shaper_bounce_registrant *reg;
+	int32_t status;
+
+	/*
+	 * Must be valid interface number
+	 */
+	if (if_num >= NSS_MAX_NET_INTERFACES) {
+		nss_warning("Invalid if_num: %u", if_num);
+		BUG_ON(false);
+	}
+
+
+	/*
+	 * Must have existing registrant
+	 */
+	spin_lock_bh(&nss_top->lock);
+	reg = &nss_top->bounce_interface_registrants[if_num];
+	if (!reg->registered) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		nss_warning("unregistered: %u", if_num);
+		return NSS_TX_FAILURE;
+	}
+	spin_unlock_bh(&nss_top->lock);
+
+	status = nss_core_send_buffer(nss_ctx, if_num, skb, 0, H2N_BUFFER_SHAPER_BOUNCE_INTERFACE, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		return NSS_TX_FAILURE;
+	}
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_shaper_bounce_bridge_packet()
+ *	Bounce a packet to the NSS for bridge shaping.
+ *
+ * You must have registered for bridge bounce shaping to call this.
+ */
+nss_tx_status_t nss_shaper_bounce_bridge_packet(void *ctx, uint32_t if_num, struct sk_buff *skb)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx;
+	struct nss_top_instance *nss_top = nss_ctx->nss_top;
+	struct nss_shaper_bounce_registrant *reg;
+	int32_t status;
+
+	/*
+	 * Must be valid interface number
+	 */
+	if (if_num >= NSS_MAX_NET_INTERFACES) {
+		nss_warning("Invalid if_num: %u", if_num);
+		BUG_ON(false);
+	}
+
+
+	/*
+	 * Must have existing registrant
+	 */
+	spin_lock_bh(&nss_top->lock);
+	reg = &nss_top->bounce_bridge_registrants[if_num];
+	if (!reg->registered) {
+		spin_unlock_bh(&nss_top->stats_lock);
+		nss_warning("unregistered: %u", if_num);
+		return NSS_TX_FAILURE;
+	}
+	spin_unlock_bh(&nss_top->lock);
+
+	nss_info("%s: Bridge bounce skb: %p, if_num: %u, ctx: %p", __func__, skb, if_num, nss_ctx);
+	status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_CMD_QUEUE, H2N_BUFFER_SHAPER_BOUNCE_BRIDGE, 0);
+	if (status != NSS_CORE_STATUS_SUCCESS) {
+		nss_info("%s: Bridge bounce core send rejected", __func__);
+		return NSS_TX_FAILURE;
+	}
+	nss_hal_send_interrupt(nss_ctx->nmap, nss_ctx->h2n_desc_rings[NSS_IF_CMD_QUEUE].desc_ring.int_bit,
+								NSS_REGS_H2N_INTR_STATUS_DATA_COMMAND_QUEUE);
+
+	NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_TX_CMD_REQ]);
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_interface_is_virtual()
+ * 	Return true if the interface number is a virtual NSS interface
+ */
+bool nss_interface_is_virtual(void *nss_ctx, int32_t interface_num)
+{
+	return (NSS_IS_IF_TYPE(VIRTUAL, interface_num));
+}
+
+EXPORT_SYMBOL(nss_virt_if_get_interface_num);
+EXPORT_SYMBOL(nss_interface_is_virtual);
+EXPORT_SYMBOL(nss_shaper_bounce_bridge_packet);
+EXPORT_SYMBOL(nss_shaper_bounce_interface_packet);
+EXPORT_SYMBOL(nss_unregister_shaper_bounce_interface);
+EXPORT_SYMBOL(nss_register_shaper_bounce_interface);
+EXPORT_SYMBOL(nss_unregister_shaper_bounce_bridge);
+EXPORT_SYMBOL(nss_register_shaper_bounce_bridge);
+EXPORT_SYMBOL(nss_register_shaping);
+EXPORT_SYMBOL(nss_unregister_shaping);
+EXPORT_SYMBOL(nss_shaper_config_send);
+
 EXPORT_SYMBOL(nss_get_interface_number);
 EXPORT_SYMBOL(nss_get_interface_dev);
 EXPORT_SYMBOL(nss_get_state);