Merge "[qca-nss-qdisc] Fix 10Gbps rate limit issue."
diff --git a/nss_qdisc/Makefile b/nss_qdisc/Makefile
index cacc493..a31b312 100644
--- a/nss_qdisc/Makefile
+++ b/nss_qdisc/Makefile
@@ -1,5 +1,7 @@
 ccflags-y := -I$(obj)/../exports -I$(srctree)/net/bridge -DNSS_QDISC_DEBUG_LEVEL=2
 
+# If NSS_QDISC_PPE_SUPPORT is removed, then remove/disable
+# nss_ppe.o nss_ppe_mc.o from qca-nss-qdisc-objs list.
 ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64))
 ccflags-y += -DNSS_QDISC_PPE_SUPPORT -DNSS_QDISC_BRIDGE_SUPPORT
 endif
diff --git a/nss_qdisc/nss_htb.c b/nss_qdisc/nss_htb.c
index 5023ff8..c89c5fc 100644
--- a/nss_qdisc/nss_htb.c
+++ b/nss_qdisc/nss_htb.c
@@ -951,6 +951,7 @@
 	struct tc_nsshtb_qopt *qopt;
 	int err;
 	unsigned int accel_mode;
+	unsigned int r2q = 0;
 
 	nss_qdisc_trace("initializing htb qdisc %x\n", sch->handle);
 
@@ -968,9 +969,10 @@
 			return -EINVAL;
 		}
 		accel_mode = qopt->accel_mode;
+		r2q = qopt->r2q;
 	}
 
-	nss_qdisc_info("r2q = %u accel_mode = %u\n", qopt->r2q, accel_mode);
+	nss_qdisc_info("r2q = %u accel_mode = %u\n", r2q, accel_mode);
 
 	/*
 	 * Initialize the NSSHTB shaper in NSS
diff --git a/nss_qdisc/nss_ppe.c b/nss_qdisc/nss_ppe.c
index 0aea203..fe582bd 100644
--- a/nss_qdisc/nss_ppe.c
+++ b/nss_qdisc/nss_ppe.c
@@ -560,10 +560,10 @@
  * nss_ppe_all_queue_disable()
  *	Disables all queues corresponding to a port in SSDK.
  */
-static void nss_ppe_all_queue_disable(struct nss_qdisc *nq)
+static void nss_ppe_all_queue_disable(uint32_t port_num)
 {
-	uint32_t port_num = nss_ppe_port_num_get(nq);
 	uint32_t qid = nss_ppe_base_get(port_num, NSS_PPE_UCAST_QUEUE);
+	uint32_t mcast_qid = nss_ppe_base_get(port_num, NSS_PPE_MCAST_QUEUE);
 	uint32_t offset;
 
 	/*
@@ -575,6 +575,12 @@
 		fal_queue_flush(0, port_num, qid + offset);
 	}
 
+	for (offset = 0; offset < nss_ppe_max_get(port_num, NSS_PPE_MCAST_QUEUE); offset++) {
+		fal_qm_enqueue_ctrl_set(0, mcast_qid + offset, false);
+		fal_scheduler_dequeue_ctrl_set(0, mcast_qid + offset, false);
+		fal_queue_flush(0, port_num, mcast_qid + offset);
+	}
+
 	nss_qdisc_info("Disable SSDK level0 queue scheduler successful\n");
 }
 
@@ -582,10 +588,10 @@
  * nss_ppe_all_queue_enable()
  *	Enables all level L0 queues corresponding to a port in SSDK.
  */
-static void nss_ppe_all_queue_enable(struct nss_qdisc *nq)
+static void nss_ppe_all_queue_enable(uint32_t port_num)
 {
-	uint32_t port_num = nss_ppe_port_num_get(nq);
 	uint32_t qid = nss_ppe_base_get(port_num, NSS_PPE_UCAST_QUEUE);
+	uint32_t mcast_qid = nss_ppe_base_get(port_num, NSS_PPE_MCAST_QUEUE);
 	uint32_t offset;
 
 	/*
@@ -596,6 +602,11 @@
 		fal_scheduler_dequeue_ctrl_set(0, qid + offset, true);
 	}
 
+	for (offset = 0; offset < nss_ppe_max_get(port_num, NSS_PPE_MCAST_QUEUE); offset++) {
+		fal_qm_enqueue_ctrl_set(0, mcast_qid + offset, true);
+		fal_scheduler_dequeue_ctrl_set(0, mcast_qid + offset, true);
+	}
+
 	nss_qdisc_info("Enable SSDK level0 queue scheduler successful\n");
 }
 
@@ -619,7 +630,7 @@
 	 * We need to disable and flush the queues before
 	 * changing scheduler's sp_id/drr_id/priority.
 	 */
-	nss_ppe_all_queue_disable(nq);
+	nss_ppe_all_queue_disable(port_num);
 
 	memset(&l1cfg, 0, sizeof(l1cfg));
 	l1cfg.sp_id = port_num;
@@ -637,10 +648,11 @@
 			port_num, npq->l0spid, l1cfg.c_drr_id, l1cfg.c_pri, l1cfg.c_drr_wt, l1cfg.e_drr_id, l1cfg.e_pri, l1cfg.e_drr_wt, l1cfg.sp_id);
 	if (fal_queue_scheduler_set(0, npq->l0spid, NSS_PPE_FLOW_LEVEL - 1, port_num, &l1cfg) != 0) {
 		nss_qdisc_error("SSDK level1 queue scheduler configuration failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
-	nss_ppe_all_queue_enable(nq);
+	nss_ppe_all_queue_enable(port_num);
 
 	nss_qdisc_info("SSDK level1 queue scheduler configuration successful\n");
 	return 0;
@@ -755,7 +767,7 @@
 	uint32_t port_num = nss_ppe_port_num_get(nq);
 	struct nss_ppe_qdisc *npq = &nq->npq;
 
-	nss_ppe_queue_disable(nq);
+	nss_ppe_all_queue_disable(port_num);
 
 	/*
 	 * Reset Level 0 configuration
@@ -771,9 +783,12 @@
 			port_num, npq->q.ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
 	if (fal_queue_scheduler_set(0, npq->q.ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
 		nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
+	nss_ppe_all_queue_enable(port_num);
+
 	nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n");
 	return 0;
 }
@@ -887,9 +902,11 @@
 	}
 
 	/*
-	 * Disable queue and set Level 0 SSDK configuration
+	 * Disable all queues and set Level 0 SSDK configuration
+	 * We need to disable and flush the queues before
+	 * changing scheduler's sp_id/drr_id/priority.
 	 */
-	nss_ppe_queue_disable(nq);
+	nss_ppe_all_queue_disable(port_num);
 
 	memset(&l0cfg, 0, sizeof(l0cfg));
 	l0cfg.sp_id = npq->l0spid;
@@ -906,6 +923,7 @@
 			port_num, npq->q.ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
 	if (fal_queue_scheduler_set(0, npq->q.ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
 		nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
@@ -923,11 +941,12 @@
 				port_num, npq->q.mcast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
 		if (fal_queue_scheduler_set(0, npq->q.mcast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
 			nss_qdisc_error("SSDK level0 multicast queue scheduler configuration failed\n");
+			nss_ppe_all_queue_enable(port_num);
 			return -EINVAL;
 		}
 	}
 
-	nss_ppe_queue_enable(nq);
+	nss_ppe_all_queue_enable(port_num);
 
 	nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n");
 	return 0;
@@ -1382,6 +1401,13 @@
 	}
 
 	/*
+	 * Disable all queues and set SSDK configuration
+	 * We need to disable and flush the queues before
+	 * changing scheduler's sp_id/drr_id/priority.
+	 */
+	nss_ppe_all_queue_disable(port_num);
+
+	/*
 	 * Reset Level 1 Configuration
 	 */
 	memset(&l1cfg, 0, sizeof(l1cfg));
@@ -1398,6 +1424,7 @@
 			port_num, l0spid, l1cfg.c_drr_id, l1cfg.c_pri, l1cfg.c_drr_wt, l1cfg.e_drr_id, l1cfg.e_pri, l1cfg.e_drr_wt, l1cfg.sp_id);
 	if (fal_queue_scheduler_set(0, l0spid, NSS_PPE_FLOW_LEVEL - 1, port_num, &l1cfg) != 0) {
 		nss_qdisc_error("SSDK level1 queue scheduler configuration failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
@@ -1420,6 +1447,7 @@
 			port_num, ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
 	if (fal_queue_scheduler_set(0, ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
 		nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
@@ -1427,6 +1455,7 @@
 			port_num, mcast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
 	if (fal_queue_scheduler_set(0, mcast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
 		nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
@@ -1437,14 +1466,6 @@
 	fal_ac_prealloc_buffer_set(0, &obj, 0);
 
 	/*
-	 * Enable queue enqueue and dequeue.
-	 */
-	fal_qm_enqueue_ctrl_set(0, ucast_qid, true);
-	fal_qm_enqueue_ctrl_set(0, mcast_qid, true);
-	fal_scheduler_dequeue_ctrl_set(0, ucast_qid, true);
-	fal_scheduler_dequeue_ctrl_set(0, mcast_qid, true);
-
-	/*
 	 * Disable force drop.
 	 * Setting ac_fc_en as 0 means queue will
 	 * honor flow control.
@@ -1455,9 +1476,12 @@
 	nss_qdisc_trace("SSDK queue flow control set: ucast_qid:%d, enable:%d\n", ucast_qid, cfg.ac_fc_en);
 	if (fal_ac_ctrl_set(0, &obj, &cfg) != 0) {
 		nss_qdisc_error("SSDK queue flow control set failed\n");
+		nss_ppe_all_queue_enable(port_num);
 		return -EINVAL;
 	}
 
+	nss_ppe_all_queue_enable(port_num);
+
 	/*
 	 * Set the default queue configuration status.
 	 */
@@ -1826,6 +1850,11 @@
 		 * And the below class check is applicable only for the classful qdiscs.
 		 */
 		if ((parent_nq) && (parent_nq->npq.sub_type != NSS_SHAPER_CONFIG_PPE_SN_TYPE_PRIO) && (TC_H_MIN(parent))) {
+			if (!parent_qdisc) {
+				nss_qdisc_info("HW qdisc/class %p cannot be attached to non-existing class %x\n", nq->qdisc, parent);
+				return NSS_PPE_QDISC_PARENT_NOT_EXISTING;
+			}
+
 			parent_class = parent_qdisc->ops->cl_ops->get(parent_qdisc, parent);
 
 			if (!parent_class) {
diff --git a/nss_qdisc/nss_wrr.c b/nss_qdisc/nss_wrr.c
index e21bdd9..06a98c4 100644
--- a/nss_qdisc/nss_wrr.c
+++ b/nss_qdisc/nss_wrr.c
@@ -111,9 +111,11 @@
 {
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct tc_nsswrr_class_qopt *qopt;
-	struct nss_wrr_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
 	bool is_wrr = (sch->ops == &nss_wrr_qdisc_ops);
+#if defined(NSS_QDISC_PPE_SUPPORT)
+	struct nss_wrr_sched_data *q = qdisc_priv(sch);
+#endif
 
 	nss_qdisc_trace("validating parameters for nsswrr class of qdisc:%x\n", sch->handle);