Merge "qca-wifi: Add Application support for tx msdu, bytes, retries"
diff --git a/configs/hif/ap_hif.config b/configs/hif/ap_hif.config
new file mode 100644
index 0000000..f49aee9
--- /dev/null
+++ b/configs/hif/ap_hif.config
@@ -0,0 +1,3 @@
+EXTRA_CFLAGS += -DQCA_NAPI_DEF_SCALE_BIN_SHIFT=1
+EXTRA_CFLAGS += -DHIF_MAX_GROUP=12
+EXTRA_CFLAGS += -DHTC_MSG_NACK_SUSPEND=7
diff --git a/configs/umac/cmn_services/mgmt_txrx/ap_mgmt_txrx.config b/configs/umac/cmn_services/mgmt_txrx/ap_mgmt_txrx.config
new file mode 100644
index 0000000..ce93ede
--- /dev/null
+++ b/configs/umac/cmn_services/mgmt_txrx/ap_mgmt_txrx.config
@@ -0,0 +1,7 @@
+ifeq ($(strip ${CONFIG_WIFI_IPQ_MEM_PROFILE}),256)
+EXTRA_CFLAGS += -DMGMT_DESC_POOL_MAX=256
+else ifeq ($(strip ${CONFIG_WIFI_IPQ_MEM_PROFILE}),512)
+EXTRA_CFLAGS += -DMGMT_DESC_POOL_MAX=384
+else
+EXTRA_CFLAGS += -DMGMT_DESC_POOL_MAX=512
+endif
diff --git a/configs/umac/scan/ap_scan.config b/configs/umac/scan/ap_scan.config
new file mode 100644
index 0000000..c5b5b47
--- /dev/null
+++ b/configs/umac/scan/ap_scan.config
@@ -0,0 +1,11 @@
+EXTRA_CFLAGS += -DMAX_SCAN_CACHE_SIZE=1024
+EXTRA_CFLAGS += -DSCAN_MAX_REST_TIME=0
+EXTRA_CFLAGS += -DSCAN_MIN_REST_TIME=50
+EXTRA_CFLAGS += -DSCAN_BURST_DURATION=0
+EXTRA_CFLAGS += -DSCAN_PROBE_SPACING_TIME=0
+EXTRA_CFLAGS += -DSCAN_PROBE_DELAY=0
+EXTRA_CFLAGS += -DSCAN_MAX_SCAN_TIME=50000
+EXTRA_CFLAGS += -DSCAN_NETWORK_IDLE_TIMEOUT=200
+EXTRA_CFLAGS += -DHIDDEN_SSID_TIME=0xFFFFFFFF
+EXTRA_CFLAGS += -DSCAN_CHAN_STATS_EVENT_ENAB=1
+EXTRA_CFLAGS += -DMAX_BCN_PROBE_IN_SCAN_QUEUE=2000
diff --git a/configs/wlan_cfg/ap_wlan_cfg.config b/configs/wlan_cfg/ap_wlan_cfg.config
new file mode 100644
index 0000000..b3751f5
--- /dev/null
+++ b/configs/wlan_cfg/ap_wlan_cfg.config
@@ -0,0 +1,30 @@
+EXTRA_CFLAGS += -DWLAN_CFG_PER_PDEV_TX_RING=0
+EXTRA_CFLAGS += -DWLAN_CFG_IPA_UC_TX_BUF_SIZE=0
+EXTRA_CFLAGS += -DWLAN_CFG_IPA_UC_TX_PARTITION_BASE=0
+EXTRA_CFLAGS += -DWLAN_CFG_IPA_UC_RX_IND_RING_COUNT=0
+EXTRA_CFLAGS += -DWLAN_CFG_PER_PDEV_RX_RING=0
+EXTRA_CFLAGS += -DWLAN_CFG_PER_PDEV_LMAC_RING=1
+EXTRA_CFLAGS += -DWLAN_LRO_ENABLE=0
+EXTRA_CFLAGS += -DWLAN_CFG_NUM_TX_EXT_DESC=0x80000
+EXTRA_CFLAGS += -DWLAN_CFG_INT_BATCH_THRESHOLD_TX=256
+EXTRA_CFLAGS += -DWLAN_CFG_INT_BATCH_THRESHOLD_RX=128
+EXTRA_CFLAGS += -DWLAN_CFG_INT_BATCH_THRESHOLD_OTHER=1
+EXTRA_CFLAGS += -DWLAN_CFG_INT_TIMER_THRESHOLD_TX=1000
+EXTRA_CFLAGS += -DWLAN_CFG_INT_TIMER_THRESHOLD_RX=500
+EXTRA_CFLAGS += -DWLAN_CFG_INT_TIMER_THRESHOLD_OTHER=1000
+EXTRA_CFLAGS += -DWLAN_CFG_TX_RING_SIZE=512
+EXTRA_CFLAGS += -DWLAN_CFG_TX_COMP_RING_SIZE=0x80000
+EXTRA_CFLAGS += -DWLAN_CFG_TX_FLOW_START_QUEUE_OFFSET=0
+EXTRA_CFLAGS += -DWLAN_CFG_TX_FLOW_STOP_QUEUE_TH=0
+EXTRA_CFLAGS += -DWLAN_CFG_RXDMA1_ENABLE=1
+EXTRA_CFLAGS += -DDP_PPDU_TXLITE_STATS_BITMASK_CFG=0xFFFF
+EXTRA_CFLAGS += -DDP_TX_NAPI_BUDGET_DIV_MASK=0xFFFF
+EXTRA_CFLAGS += -DCONFIG_PROCESS_RX_STATUS=0
+EXTRA_CFLAGS += -DCONFIG_PROCESS_TX_STATUS=0
+EXTRA_CFLAGS += -DWLAN_CFG_MAC_PER_TARGET=3
+ifeq ($(strip ${QCA_WIFI_QCA8074_VP}),1)
+EXTRA_CFLAGS += -DWLAN_CFG_NUM_TX_DESC=0x2000
+else
+EXTRA_CFLAGS += -DWLAN_CFG_NUM_TX_DESC=0x320000
+endif
+
diff --git a/dp/wifi3.0/dp_htt_logger.c b/dp/wifi3.0/dp_htt_logger.c
index 9723db5..4b40120 100644
--- a/dp/wifi3.0/dp_htt_logger.c
+++ b/dp/wifi3.0/dp_htt_logger.c
@@ -31,7 +31,7 @@
  * HTT_DISPLAY_SIZE   : Supported Number of command/event/wbm_event to be
  * read at one shot through wdf debugfs framework
  */
-#define HTT_DISPLAY_SIZE 29
+#define HTT_DISPLAY_SIZE 25
 
 /**
  * Used for stop_seq
diff --git a/dp/wifi3.0/dp_rx_mon_feature.c b/dp/wifi3.0/dp_rx_mon_feature.c
index c8b4704..71ece5c 100644
--- a/dp/wifi3.0/dp_rx_mon_feature.c
+++ b/dp/wifi3.0/dp_rx_mon_feature.c
@@ -27,6 +27,7 @@
 #include "dp_rx_mon.h"
 #include "dp_internal.h"
 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
+#include "wlan_cfg.h"
 
 #ifdef WLAN_RX_PKT_CAPTURE_ENH
 
@@ -112,10 +113,88 @@
 		cdp_mpdu_info->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
 }
 
+#ifdef WLAN_SUPPORT_RX_FLOW_TAG
+/**
+ * dp_rx_mon_enh_capture_set_flow_tag() - Tags the actual nbuf with
+ * cached flow tag data read from TLV
+ * @pdev: pdev structure
+ * @ppdu_info: ppdu info structure from monitor status ring
+ * @user_id: user ID on which the PPDU is received
+ * @nbuf: packet buffer on which metadata have to be updated
+ *
+ * Return: None
+ */
+void dp_rx_mon_enh_capture_set_flow_tag(struct dp_pdev *pdev,
+					struct hal_rx_ppdu_info *ppdu_info,
+					uint32_t user_id, qdf_nbuf_t nbuf)
+{
+	struct dp_soc *soc = pdev->soc;
+	uint16_t fse_metadata;
+
+	if (user_id >= MAX_MU_USERS)
+		return;
+
+	if (qdf_likely(!wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)))
+		return;
+
+	if (ppdu_info->rx_msdu_info[user_id].is_flow_idx_invalid)
+		return;
+
+	if (ppdu_info->rx_msdu_info[user_id].is_flow_idx_timeout)
+		return;
+
+	fse_metadata =
+	  (uint16_t)ppdu_info->rx_msdu_info[user_id].fse_metadata & 0xFFFF;
+
+	/* update the skb->cb with the user-specified tag/metadata */
+	qdf_nbuf_set_rx_flow_tag(nbuf, fse_metadata);
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+		  "Setting flow tag %u for userID %u", fse_metadata, user_id);
+
+	ppdu_info->rx_msdu_info[user_id].fse_metadata = 0;
+	ppdu_info->rx_msdu_info[user_id].flow_idx = 0;
+	ppdu_info->rx_msdu_info[user_id].is_flow_idx_timeout = false;
+	ppdu_info->rx_msdu_info[user_id].is_flow_idx_invalid = false;
+}
+
+/**
+ * dp_rx_mon_enh_capture_set_flow_tag_in_trailer - update msdu trailer
+ *                                                 with flow tag
+ * @nbuf: packet buffer on which metadata have to be updated
+ * @trailer: pointer to rx monitor-lite trailer
+ *
+ * Return: None
+ */
+static inline void dp_rx_mon_enh_capture_set_flow_tag_in_trailer(
+					qdf_nbuf_t nbuf, void *trailer)
+{
+	uint16_t flow_tag = qdf_nbuf_get_rx_flow_tag(nbuf);
+	struct dp_rx_mon_enh_trailer_data *nbuf_trailer =
+			(struct dp_rx_mon_enh_trailer_data *)trailer;
+
+	if (!flow_tag)
+		return;
+
+	nbuf_trailer->flow_tag = flow_tag;
+}
+#else
+void dp_rx_mon_enh_capture_set_flow_tag(struct dp_pdev *pdev,
+					struct hal_rx_ppdu_info *ppdu_info,
+					uint32_t user_id, qdf_nbuf_t nbuf)
+{
+}
+
+static inline void dp_rx_mon_enh_capture_set_flow_tag_in_trailer(
+					qdf_nbuf_t nbuf, void *trailer)
+{
+}
+#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
+
 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
 /*
  * dp_rx_mon_enh_capture_set_protocol_tag() - Tags the actual nbuf with
- * cached data read from TLV
+ * cached protocol tag data read from TLV
  * @pdev: pdev structure
  * @ppdu_info: ppdu info structure from monitor status ring
  * @user_id: user ID on which the PPDU is received
@@ -162,8 +241,8 @@
 	 * by looking up tag value for received protocol type.
 	 */
 	protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
-	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
-		  "%s: Setting ProtoID:%d Tag %u", __func__,
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+		  "Setting ProtoID:%d Tag %u in mon nbuf",
 		  cce_metadata, protocol_tag);
 	qdf_nbuf_set_rx_protocol_tag(nbuf, protocol_tag);
 }
@@ -256,6 +335,7 @@
 {
 	uint64_t trailer;
 	uint8_t  *dest;
+	struct dp_soc *soc = pdev->soc;
 	struct dp_rx_mon_enh_trailer_data *nbuf_trailer =
 			(struct dp_rx_mon_enh_trailer_data *)&trailer;
 
@@ -264,7 +344,12 @@
 
 	trailer = RX_MON_CAP_ENH_TRAILER;
 
-	dp_rx_mon_enh_capture_set_protocol_tag_in_trailer(nbuf, nbuf_trailer);
+	if (wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx)) {
+		dp_rx_mon_enh_capture_set_protocol_tag_in_trailer(nbuf,
+								  nbuf_trailer);
+		dp_rx_mon_enh_capture_set_flow_tag_in_trailer(nbuf,
+							      nbuf_trailer);
+	}
 
 	/**
 	 * Overwrite last 8 bytes of data with trailer. This is ok since we
@@ -424,6 +509,7 @@
 	/* Tag the MSDU/MPDU if a cce_metadata is valid */
 	if ((tlv_status == HAL_TLV_STATUS_MSDU_END) &&
 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
+		bool is_rx_mon_protocol_flow_tag_en;
 		/**
 		 * Proceed only if this is a data frame.
 		 * We could also rx probes, etc.
@@ -445,12 +531,18 @@
 		 */
 		nbuf = msdu_list->tail;
 
-		/**
-		 * Set the protocol tag value from CCE metadata.
-		 */
-		dp_rx_mon_enh_capture_tag_protocol_type(pdev, ppdu_info,
-							user_id, nbuf);
+		is_rx_mon_protocol_flow_tag_en =
+		    wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(
+					pdev->soc->wlan_cfg_ctx);
 
+		if (is_rx_mon_protocol_flow_tag_en) {
+			 /* Set the protocol tag value from CCE metadata */
+			dp_rx_mon_enh_capture_tag_protocol_type(pdev, ppdu_info,
+								user_id, nbuf);
+			/* Set the flow tag from FSE metadata */
+			dp_rx_mon_enh_capture_set_flow_tag(pdev, ppdu_info,
+							   user_id, nbuf);
+		}
 		if (!pdev->is_rx_enh_capture_trailer_enabled)
 			return;
 		/**
diff --git a/dp/wifi3.0/dp_tx_capture.c b/dp/wifi3.0/dp_tx_capture.c
index 85648f3..9adcd9f 100644
--- a/dp/wifi3.0/dp_tx_capture.c
+++ b/dp/wifi3.0/dp_tx_capture.c
@@ -32,6 +32,27 @@
 #include "dp_tx_capture.h"
 
 #ifdef WLAN_TX_PKT_CAPTURE_ENH
+/**
+ * dp_peer_or_pdev_tx_cap_enabled - Returns status of tx_cap_enabled
+ * based on global per-pdev setting or per-peer setting
+ * @pdev: Datapath pdev handle
+ * @peer: Datapath peer
+ *
+ * Return: true if feature is enabled on a per-pdev basis or if
+ * enabled for the given peer when per-peer mode is set, false otherwise
+ */
+inline bool
+dp_peer_or_pdev_tx_cap_enabled(struct dp_pdev *pdev,
+			       struct dp_peer *peer)
+{
+	if ((pdev->tx_capture_enabled ==
+	     CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS) ||
+	    ((pdev->tx_capture_enabled ==
+	      CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) &&
+	     peer->tx_cap_enabled))
+		return true;
+	return false;
+}
 
 /*
  * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
@@ -120,11 +141,14 @@
 		dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
 				     nbuf, HTT_INVALID_PEER,
 				     WDI_NO_VAL, pdev->pdev_id);
-	} else if (pdev->tx_capture_enabled) {
+		return;
+	}
+	if (pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS ||
+	    pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) {
 		/* invoke WDI event handler here send mgmt pkt here */
+
 		/* pull ppdu_id from the packet */
 		qdf_nbuf_pull_head(nbuf, sizeof(uint32_t));
-
 		tx_capture_info.frame_payload = 1;
 		tx_capture_info.mpdu_nbuf = nbuf;
 
@@ -162,6 +186,8 @@
 	qdf_spinlock_create(&pdev->tx_capture.ppdu_stats_lock);
 	pdev->tx_capture.ppdu_stats_queue_depth = 0;
 	pdev->tx_capture.ppdu_stats_next_sched = 0;
+	pdev->tx_capture.ppdu_stats_defer_queue_depth = 0;
+	pdev->tx_capture.ppdu_dropped = 0;
 }
 
 /**
@@ -298,7 +324,7 @@
 {
 	int ret = QDF_STATUS_E_FAILURE;
 
-	if (desc->pdev->tx_capture_enabled == 1 &&
+	if (desc->pdev->tx_capture_enabled != CDP_TX_ENH_CAPTURE_DISABLED &&
 	    ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
 		ret = dp_update_msdu_to_list(soc, desc->pdev,
 					     peer, ts, desc->nbuf);
@@ -449,13 +475,14 @@
  * Return: QDF_STATUS
  */
 QDF_STATUS
-dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, int val)
+dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
 {
 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
 
 	pdev->tx_capture_enabled = val;
 
-	if (pdev->tx_capture_enabled) {
+	if (pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS ||
+	    pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) {
 		dp_soc_set_txrx_ring_map_single(pdev->soc);
 		if (!pdev->pktlog_ppdu_stats)
 			dp_h2t_cfg_stats_msg_send(pdev,
@@ -565,13 +592,15 @@
 static uint32_t dp_tx_update_80211_hdr(struct dp_pdev *pdev,
 				       struct dp_peer *peer,
 				       void *data,
-				       qdf_nbuf_t nbuf)
+				       qdf_nbuf_t nbuf,
+				       uint16_t ether_type)
 {
 	struct cdp_tx_completion_ppdu *ppdu_desc;
 	struct ieee80211_frame *ptr_wh;
 	struct ieee80211_qoscntl *ptr_qoscntl;
 	uint32_t mpdu_buf_len;
 	uint8_t *ptr_hdr;
+	uint16_t eth_type = qdf_htons(ether_type);
 
 	ppdu_desc = (struct cdp_tx_completion_ppdu *)data;
 	ptr_wh = &peer->tx_capture.tx_wifi_hdr;
@@ -625,9 +654,9 @@
 	*(ptr_hdr + 3) = 0x00;
 	*(ptr_hdr + 4) = 0x00;
 	*(ptr_hdr + 5) = 0x00;
-	/* TYPE: IPV4 ?? */
-	*(ptr_hdr + 6) = (ETHERTYPE_IPV4 & 0xFF00) >> 8;
-	*(ptr_hdr + 7) = (ETHERTYPE_IPV4 & 0xFF);
+	*(ptr_hdr + 6) = (eth_type & 0xFF00) >> 8;
+	*(ptr_hdr + 7) = (eth_type & 0xFF);
+
 
 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - mpdu_buf_len);
 	return 0;
@@ -663,6 +692,8 @@
 	uint8_t last_msdu = 0;
 	uint32_t frag_list_sum_len = 0;
 	uint8_t first_msdu_not_seen = 1;
+	uint16_t ether_type = 0;
+	qdf_ether_header_t *eh = NULL;
 
 	num_mpdu = ppdu_desc->num_mpdu;
 	mpdu = &ppdu_desc->mpdu_q;
@@ -676,6 +707,10 @@
 		first_msdu = ptr_msdu_info->first_msdu;
 		last_msdu = ptr_msdu_info->last_msdu;
 
+		eh = (qdf_ether_header_t *)(curr_nbuf->data +
+					   sizeof(struct msdu_completion_info));
+		ether_type = eh->ether_type;
+
 		/* pull msdu_completion_info added in pre header */
 		/* pull ethernet header from header */
 		qdf_nbuf_pull_head(curr_nbuf,
@@ -700,7 +735,8 @@
 			}
 
 			dp_tx_update_80211_hdr(pdev, peer,
-					       ppdu_desc, mpdu_nbuf);
+					       ppdu_desc, mpdu_nbuf,
+					       ether_type);
 
 			/* update first buffer to previous buffer */
 			prev_nbuf = curr_nbuf;
@@ -855,22 +891,23 @@
 
 		wbm_tsf = ptr_msdu_info->tsf;
 
+		if (wbm_tsf < start_tsf) {
+			/* remove the aged packet */
+			nbuf = qdf_nbuf_queue_remove(
+					&tx_tid->msdu_comp_q);
+
+			qdf_nbuf_free(nbuf);
+
+			curr_msdu = qdf_nbuf_queue_first(
+					&tx_tid->msdu_comp_q);
+			prev_msdu = NULL;
+			continue;
+		}
 		if (msdu_ppdu_id == ppdu_id) {
 			matched = 1;
 
 			if (wbm_tsf > start_tsf && wbm_tsf < end_tsf) {
 				/*packet found */
-			} else if (wbm_tsf < start_tsf) {
-				/* remove the aged packet */
-				nbuf = qdf_nbuf_queue_remove(
-						&tx_tid->msdu_comp_q);
-
-				qdf_nbuf_free(nbuf);
-
-				curr_msdu = qdf_nbuf_queue_first(
-						&tx_tid->msdu_comp_q);
-				prev_msdu = NULL;
-				continue;
 			} else if (wbm_tsf > end_tsf) {
 				/*
 				 * Do we need delta in above case.
@@ -1258,7 +1295,7 @@
  * which doesn't include BAR and other non data frame
  * ~50 is maximum scheduled ppdu count
  */
-#define SCHED_MAX_PPDU_CNT 50
+#define SCHED_MAX_PPDU_CNT 64
 /**
  * dp_tx_ppdu_stats_process - Deferred PPDU stats handler
  * @context: Opaque work context (PDEV)
@@ -1277,15 +1314,14 @@
 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
 	struct ppdu_info *sched_ppdu_list[SCHED_MAX_PPDU_CNT];
 	qdf_nbuf_t nbuf_ppdu_desc_list[SCHED_MAX_PPDU_CNT];
-	struct dp_pdev_tx_capture *ptr_tx_cap;
-	uint32_t tx_capture = pdev->tx_capture_enabled;
-
-	ptr_tx_cap = &pdev->tx_capture;
+	struct dp_pdev_tx_capture *ptr_tx_cap = &pdev->tx_capture;
 
 	/* Move the PPDU entries to defer list */
 	qdf_spin_lock_bh(&ptr_tx_cap->ppdu_stats_lock);
 	STAILQ_CONCAT(&ptr_tx_cap->ppdu_stats_defer_queue,
 		      &ptr_tx_cap->ppdu_stats_queue);
+	ptr_tx_cap->ppdu_stats_defer_queue_depth +=
+		ptr_tx_cap->ppdu_stats_queue_depth;
 	ptr_tx_cap->ppdu_stats_queue_depth = 0;
 	qdf_spin_unlock_bh(&ptr_tx_cap->ppdu_stats_lock);
 
@@ -1300,8 +1336,9 @@
 				    ppdu_info_queue_elem, tmp_ppdu_info) {
 			if (curr_sched_cmdid != ppdu_info->sched_cmdid)
 				break;
-			sched_ppdu_list[ppdu_cnt++] = ppdu_info;
-			qdf_assert_always(ppdu_cnt <= SCHED_MAX_PPDU_CNT);
+			qdf_assert_always(ppdu_cnt < SCHED_MAX_PPDU_CNT);
+			sched_ppdu_list[ppdu_cnt] = ppdu_info;
+			ppdu_cnt++;
 		}
 		if (ppdu_info && (curr_sched_cmdid == ppdu_info->sched_cmdid) &&
 		    ptr_tx_cap->ppdu_stats_next_sched < now_ms)
@@ -1311,6 +1348,7 @@
 		STAILQ_REMOVE_HEAD_UNTIL(&ptr_tx_cap->ppdu_stats_defer_queue,
 					 sched_ppdu_list[ppdu_cnt - 1],
 					 ppdu_info_queue_elem);
+		ptr_tx_cap->ppdu_stats_defer_queue_depth -= ppdu_cnt;
 
 		ppdu_desc_cnt = 0;
 		/* Process tx buffer list based on last_ppdu_id stored above */
@@ -1349,7 +1387,8 @@
 				qdf_nbuf_data(nbuf);
 
 			/* send WDI event */
-			if (!tx_capture) {
+			if (pdev->tx_capture_enabled ==
+			    CDP_TX_ENH_CAPTURE_DISABLED) {
 				/**
 				 * Deliver PPDU stats only for valid (acked)
 				 * data frames if sniffer mode is not enabled.
@@ -1389,7 +1428,7 @@
 
 			peer = dp_peer_find_by_id(pdev->soc,
 						  ppdu_desc->user[0].peer_id);
-			/*
+			/**
 			 * peer can be NULL
 			 */
 			if (!peer) {
@@ -1397,13 +1436,27 @@
 				continue;
 			}
 
-			/*
+			/**
 			 * check whether it is bss peer,
 			 * if bss_peer no need to process further
 			 */
-			if (!peer->bss_peer &&
-			    tx_capture &&
-			    (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA) &&
+			if (peer->bss_peer) {
+				dp_peer_unref_del_find_by_id(peer);
+				qdf_nbuf_free(nbuf);
+				continue;
+			}
+
+			/**
+			 * check whether tx_capture feature is enabled
+			 * for this peer or globally for all peers
+			 */
+			if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer)) {
+				dp_peer_unref_del_find_by_id(peer);
+				qdf_nbuf_free(nbuf);
+				continue;
+			}
+
+			if ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA) &&
 			    (!ppdu_desc->user[0].completion_status)) {
 				/* print the bit map */
 				dp_tx_print_bitmap(pdev, ppdu_desc,
@@ -1430,7 +1483,7 @@
 				/*
 				 * retrieve msdu buffer based on ppdu_id & tid
 				 * based msdu queue and store it in local queue
-				 * sometimes, wbm comes late than per ppdu
+				 * sometimes, wbm comes later than per ppdu
 				 * stats. Assumption: all packets are SU,
 				 * and packets comes in order
 				 */
@@ -1501,15 +1554,12 @@
 				nbuf->next =
 				qdf_nbuf_queue_first(&ppdu_desc->mpdu_q);
 			} else if (ppdu_desc->frame_type ==
-				   CDP_PPDU_FTYPE_CTRL &&
-				   tx_capture) {
+				   CDP_PPDU_FTYPE_CTRL) {
 				nbuf->next =
 				qdf_nbuf_queue_first(&ppdu_desc->mpdu_q);
-
 				nbuf_ppdu_desc_list[ppdu_desc_cnt++] = nbuf;
 			} else {
 				qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
-
 				qdf_nbuf_free(nbuf);
 			}
 
@@ -1549,14 +1599,24 @@
 			qdf_nbuf_data(ppdu_info->nbuf);
 
 	qdf_spin_lock_bh(&pdev->tx_capture.ppdu_stats_lock);
-	STAILQ_INSERT_TAIL(&pdev->tx_capture.ppdu_stats_queue,
-			   ppdu_info, ppdu_info_queue_elem);
-	pdev->tx_capture.ppdu_stats_queue_depth++;
+
+	if (qdf_unlikely(!pdev->tx_capture_enabled &&
+	    (pdev->tx_capture.ppdu_stats_queue_depth +
+	    pdev->tx_capture.ppdu_stats_defer_queue_depth) >
+	    DP_TX_PPDU_PROC_MAX_DEPTH)) {
+		qdf_nbuf_free(ppdu_info->nbuf);
+		qdf_mem_free(ppdu_info);
+		pdev->tx_capture.ppdu_dropped++;
+	} else {
+		STAILQ_INSERT_TAIL(&pdev->tx_capture.ppdu_stats_queue,
+				   ppdu_info, ppdu_info_queue_elem);
+		pdev->tx_capture.ppdu_stats_queue_depth++;
+	}
 	qdf_spin_unlock_bh(&pdev->tx_capture.ppdu_stats_lock);
 
 	if ((pdev->tx_capture.ppdu_stats_queue_depth >
 	    DP_TX_PPDU_PROC_THRESHOLD) ||
-	    (pdev->tx_capture.ppdu_stats_next_sched > now_ms)) {
+	    (pdev->tx_capture.ppdu_stats_next_sched <= now_ms)) {
 		qdf_queue_work(0, pdev->tx_capture.ppdu_stats_workqueue,
 			       &pdev->tx_capture.ppdu_stats_work);
 		pdev->tx_capture.ppdu_stats_next_sched =
diff --git a/dp/wifi3.0/dp_tx_capture.h b/dp/wifi3.0/dp_tx_capture.h
index d94379f..f15974e 100644
--- a/dp/wifi3.0/dp_tx_capture.h
+++ b/dp/wifi3.0/dp_tx_capture.h
@@ -21,6 +21,8 @@
 
 #ifdef WLAN_TX_PKT_CAPTURE_ENH
 
+#define DP_TX_PPDU_PROC_MAX_DEPTH 512
+
 struct dp_soc;
 struct dp_pdev;
 struct dp_vdev;
@@ -39,11 +41,13 @@
 	STAILQ_HEAD(, ppdu_info) ppdu_stats_defer_queue;
 
 	uint32_t ppdu_stats_queue_depth;
+	uint32_t ppdu_stats_defer_queue_depth;
 	uint32_t ppdu_stats_next_sched;
 	qdf_spinlock_t msdu_comp_q_list_lock;
 	uint32_t missed_ppdu_id;
 	uint32_t last_msdu_id;
 	qdf_event_t miss_ppdu_event;
+	uint32_t ppdu_dropped;
 };
 
 /* Tx TID */
@@ -161,7 +165,7 @@
  * Return: QDF_STATUS
  */
 QDF_STATUS
-dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, int val);
+dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val);
 
 /*
  * dp_deliver_mgmt_frm: Process
diff --git a/dp/wifi3.0/dp_txrx_wds.c b/dp/wifi3.0/dp_txrx_wds.c
index e706f87..05ecca2 100644
--- a/dp/wifi3.0/dp_txrx_wds.c
+++ b/dp/wifi3.0/dp_txrx_wds.c
@@ -15,16 +15,23 @@
  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  * PERFORMANCE OF THIS SOFTWARE.
  */
+#include "../../../cmn_dev/fw_hdr/fw/htt.h"
 #include "dp_peer.h"
 #include "hal_rx.h"
 #include "hal_api.h"
 #include "qdf_nbuf.h"
 #include "dp_types.h"
 #include "dp_internal.h"
+#include "dp_tx.h"
+#include "enet.h"
 #include "dp_txrx_wds.h"
 
 /* Generic AST entry aging timer value */
 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
+#define DP_VLAN_UNTAGGED 0
+#define DP_VLAN_TAGGED_MULTICAST 1
+#define DP_VLAN_TAGGED_UNICAST 2
+#define DP_MAX_VLAN_IDS 4096
 
 static void dp_ast_aging_timer_fn(void *soc_hdl)
 {
@@ -375,3 +382,398 @@
 	return 0;
 }
 #endif
+
+/**
+ * dp_tx_add_groupkey_metadata - Add group key in metadata
+ * @vdev: DP vdev handle
+ * @msdu_info: MSDU info to be setup in MSDU descriptor
+ * @group_key: Group key index programmed in metadata
+ *
+ * Return: void
+ */
+#ifdef QCA_MULTIPASS_SUPPORT
+static
+void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
+		struct dp_tx_msdu_info_s *msdu_info, uint16_t group_key)
+{
+	struct htt_tx_msdu_desc_ext2_t *meta_data =
+		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
+
+	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
+
+	/*
+	 * When attempting to send a multicast packet with multi-passphrase,
+	 * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
+	 * ref htt.h indicating the group_id field in "key_flags" also having
+	 * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
+	 */
+	HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0], 1);
+	HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
+}
+
+/**
+ * dp_tx_remove_vlan_tag - Remove 4 bytes of vlan tag
+ * @vdev: DP vdev handle
+ * @tx_desc: Tx Descriptor Handle
+ *
+ * Return: void
+ */
+static
+void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+{
+	struct vlan_ethhdr veth_hdr;
+	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)nbuf->data;
+
+	/*
+	 * Extract VLAN header of 4 bytes:
+	 * Frame Format : {dst_addr[6], src_addr[6], 802.1Q header[4], EtherType[2], Payload}
+	 * Before Removal : xx xx xx xx xx xx xx xx xx xx xx xx 81 00 00 02 08 00 45 00 00...
+	 * After Removal  : xx xx xx xx xx xx xx xx xx xx xx xx 08 00 45 00 00...
+	 */
+	qdf_mem_copy(&veth_hdr, veh, sizeof(veth_hdr));
+	qdf_nbuf_pull_head(nbuf, ETHERTYPE_VLAN_LEN);
+	veh = (struct vlan_ethhdr *)nbuf->data;
+	qdf_mem_copy(veh, &veth_hdr, 2 * QDF_MAC_ADDR_SIZE);
+	return;
+}
+
+/**
+ * dp_tx_need_multipass_process - If frame needs multipass phrase processing
+ * @vdev: DP vdev handle
+ * @tx_desc: Tx Descriptor Handle
+ * @vlan_id: vlan id of frame
+ *
+ * Return: whether peer is special or classic
+ */
+static
+uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
+			   qdf_nbuf_t buf, uint16_t *vlan_id)
+{
+	struct dp_peer *peer = NULL;
+	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
+	struct vlan_ethhdr *veh = NULL;
+	bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
+			(htons(eh->ether_type) != ETH_P_8021Q));
+
+	if (qdf_unlikely(not_vlan))
+		return DP_VLAN_UNTAGGED;
+
+	veh = (struct vlan_ethhdr *)eh;
+	*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
+
+	if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
+		qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
+		TAILQ_FOREACH(peer, &vdev->mpass_peer_list,
+			      mpass_peer_list_elem) {
+			if (*vlan_id == peer->vlan_id) {
+				qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
+				return DP_VLAN_TAGGED_MULTICAST;
+			}
+		}
+		qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
+		return DP_VLAN_UNTAGGED;
+	}
+
+	peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL);
+
+	if (qdf_unlikely(peer == NULL))
+		return DP_VLAN_UNTAGGED;
+
+	/*
+	 * Do not drop the frame when vlan_id doesn't match.
+	 * Send the frame as it is.
+	 */
+	if (*vlan_id == peer->vlan_id) {
+		dp_peer_unref_delete(peer);
+		return DP_VLAN_TAGGED_UNICAST;
+	}
+
+	return DP_VLAN_UNTAGGED;
+}
+
+/**
+ * dp_tx_multipass_process - Process vlan frames in tx path
+ * @soc: dp soc handle
+ * @vdev: DP vdev handle
+ * @nbuf: skb
+ * @msdu_info: msdu descriptor
+ *
+ * Return: status whether frame needs to be dropped or transmitted
+ */
+bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
+			     qdf_nbuf_t nbuf,
+			     struct dp_tx_msdu_info_s *msdu_info)
+{
+	uint16_t vlan_id = 0;
+	uint16_t group_key = 0;
+	uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
+	qdf_nbuf_t nbuf_copy = NULL;
+
+	if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0])) {
+		return true;
+	}
+
+	is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
+
+	if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
+	    (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
+		return true;
+
+	if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
+		dp_tx_remove_vlan_tag(vdev, nbuf);
+		return true;
+	}
+
+	/* AP can have classic clients, special clients &
+	 * classic repeaters.
+	 * 1. Classic clients & special client:
+	 *	Remove vlan header, find corresponding group key
+	 *	index, fill in metaheader and enqueue multicast
+	 *	frame to TCL.
+	 * 2. Classic repeater:
+	 *	Pass through to classic repeater with vlan tag
+	 *	intact without any group key index. Hardware
+	 *	will know which key to use to send frame to
+	 *	repeater.
+	 */
+	nbuf_copy = qdf_nbuf_copy(nbuf);
+
+	/*
+	 * Send multicast frame to special peers even
+	 * if pass through to classic repeater fails.
+	 */
+	if (nbuf_copy) {
+		struct dp_tx_msdu_info_s msdu_info_copy;
+		qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
+		msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
+		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
+		nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, &msdu_info_copy, HTT_INVALID_PEER, NULL);
+		if (nbuf_copy) {
+			qdf_nbuf_free(nbuf_copy);
+			qdf_err("nbuf_copy send failed");
+		}
+	}
+
+	group_key = vdev->iv_vlan_map[vlan_id];
+
+	/*
+	 * If group key is not installed, drop the frame.
+	 */
+	if (!group_key)
+		return false;
+
+	dp_tx_remove_vlan_tag(vdev, nbuf);
+	dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
+	msdu_info->exception_fw = 1;
+	return true;
+}
+
+/**
+ * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
+ * @vdev: DP vdev handle
+ * @nbuf: skb
+ * @tid: traffic priority
+ *
+ * Return: bool: true if tag is inserted else false
+ */
+bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
+{
+	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
+	struct vlan_ethhdr vethhdr;
+
+	if (qdf_unlikely(!peer->vlan_id))
+	       return false;
+
+	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < ETHERTYPE_VLAN_LEN))
+		return false;
+
+	/*
+	 * Form the VLAN header and insert in nbuf
+	 */
+	qdf_mem_copy(vethhdr.h_dest, eh->ether_dhost, QDF_MAC_ADDR_SIZE);
+	qdf_mem_copy(vethhdr.h_source, eh->ether_shost, QDF_MAC_ADDR_SIZE);
+	vethhdr.h_vlan_proto = htons(QDF_ETH_TYPE_8021Q);
+	vethhdr.h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
+			      (peer->vlan_id & VLAN_VID_MASK));
+
+	/*
+	 * Packet format : DSTMAC | SRCMAC | <VLAN HEADERS TO BE INSERTED> | ETHERTYPE | IP HEADER
+	 * DSTMAC: 6 BYTES
+	 * SRCMAC: 6 BYTES
+	 * VLAN HEADER: 4 BYTES ( TPID | PCP | VLAN ID)
+	 * ETHERTYPE: 2 BYTES
+	 */
+	qdf_nbuf_push_head(nbuf, sizeof(struct vlan_hdr));
+	qdf_mem_copy(qdf_nbuf_data(nbuf), &vethhdr,
+		     sizeof(struct vlan_ethhdr)- ETHERNET_TYPE_LEN);
+
+	return true;
+}
+
+/**
+ * dp_peer_multipass_list_remove: remove peer from list
+ * @peer: pointer to peer
+ *
+ * return: void
+ */
+void dp_peer_multipass_list_remove(struct dp_peer *peer)
+{
+	struct dp_vdev *vdev = peer->vdev;
+	struct dp_peer *tpeer = NULL;
+	bool found = 0;
+
+	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
+	TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
+		if (tpeer == peer) {
+			found = 1;
+			TAILQ_REMOVE(&vdev->mpass_peer_list, peer, mpass_peer_list_elem);
+			break;
+		}
+	}
+
+	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
+
+	if (found)
+		dp_peer_unref_delete(peer);
+}
+
+/**
+ * dp_peer_multipass_list_add: add to new multipass list
+ * @dp_soc: soc handle
+ * @dp_vdev: vdev handle
+ * @peer_mac: mac address
+ *
+ * return: void
+ */
+static void dp_peer_multipass_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
+					uint8_t *peer_mac)
+{
+	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
+						      vdev->vdev_id);
+
+	if (!peer) {
+		return;
+	}
+
+	/*
+	 * Ref_cnt is incremented inside dp_peer_find_hash_find().
+	 * Decrement it when element is deleted from the list.
+	 */
+	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
+	TAILQ_INSERT_HEAD(&vdev->mpass_peer_list, peer, mpass_peer_list_elem);
+	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
+}
+
+/**
+ * dp_peer_set_vlan_id: set vlan_id for this peer
+ * @cdp_soc: soc handle
+ * @peer_mac: mac address
+ * @vlan_id: vlan id for peer
+ *
+ * return: void
+ */
+void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
+		struct cdp_vdev *vdev_handle, uint8_t *peer_mac,
+		uint16_t vlan_id)
+{
+	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+	struct dp_peer *peer = NULL;
+
+	if (!vdev->multipass_en)
+		return;
+
+	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev->vdev_id);
+
+	if (qdf_unlikely(!peer)) {
+		qdf_err("NULL peer");
+		return;
+	}
+
+	peer->vlan_id = vlan_id;
+
+	/* Ref_cnt is incremented inside dp_peer_find_hash_find().
+	 * Decrement it here.
+	 */
+	dp_peer_unref_delete(peer);
+	dp_peer_multipass_list_add(soc, vdev, peer_mac);
+}
+
+/**
+ * dp_set_vlan_groupkey: set vlan map for vdev
+ * @vdev_handle: pointer to vdev
+ * @vlan_id: vlan_id
+ * @group_key: group key for vlan
+ *
+ * return: set success/failure
+ */
+QDF_STATUS dp_set_vlan_groupkey(struct cdp_vdev *vdev_handle,
+		uint16_t vlan_id, uint16_t group_key)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+
+	if (!vdev->multipass_en)
+		return QDF_STATUS_E_INVAL;
+
+	if (!vdev->iv_vlan_map) {
+		uint16_t vlan_map_size = (sizeof(uint16_t))*DP_MAX_VLAN_IDS;
+		vdev->iv_vlan_map = (uint16_t *)qdf_mem_malloc(vlan_map_size);
+
+		if (!vdev->iv_vlan_map) {
+			QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "iv_vlan_map");
+			return QDF_STATUS_E_NOMEM;
+		}
+
+		/*
+		 * 0 is invalid group key.
+		 * Initilalize array with invalid group keys.
+		 */
+		qdf_mem_zero(vdev->iv_vlan_map, vlan_map_size);
+	}
+
+	if (vlan_id >= DP_MAX_VLAN_IDS)
+		return QDF_STATUS_E_INVAL;
+
+	vdev->iv_vlan_map[vlan_id] = group_key;
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_tx_vdev_multipass_deinit: set vlan map for vdev
+ * @vdev_handle: pointer to vdev
+ *
+ * return: void
+ */
+void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
+{
+	struct dp_peer *peer = NULL;
+	qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
+	TAILQ_FOREACH(peer, &vdev->mpass_peer_list, mpass_peer_list_elem)
+		qdf_err("Peers present in mpass list : %llx",
+			peer->mac_addr.raw);
+	qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
+
+	if (vdev->iv_vlan_map) {
+		qdf_mem_free(vdev->iv_vlan_map);
+		vdev->iv_vlan_map = NULL;
+	}
+
+	qdf_spinlock_destroy(&vdev->mpass_peer_mutex);
+}
+
+/**
+ * dp_peer_multipass_list_init: initialize peer mulitpass list
+ * @vdev_handle: pointer to vdev
+ *
+ * return: set success/failure
+ */
+void dp_peer_multipass_list_init(struct dp_vdev *vdev)
+{
+	/*
+	 * vdev->iv_vlan_map is allocated when the first configuration command
+	 * is issued to avoid unnecessary allocation for regular mode VAP.
+	 */
+	TAILQ_INIT(&vdev->mpass_peer_list);
+	qdf_spinlock_create(&vdev->mpass_peer_mutex);
+}
+#endif
diff --git a/tools/linux/peerstats.c b/tools/linux/peerstats.c
index 008c543..0ab2a17 100644
--- a/tools/linux/peerstats.c
+++ b/tools/linux/peerstats.c
@@ -92,7 +92,7 @@
 		if (rx_stats->rix != INVALID_CACHE_IDX) {
 			PRINT(" %10u | %10u | %10u | %10u | %10u | %10u |",
 			      rx_stats->rate,
-			      rx_stats->rix,
+			      GET_DP_PEER_STATS_RIX(rx_stats->rix),
 			      rx_stats->num_bytes,
 			      rx_stats->num_msdus,
 			      rx_stats->num_mpdus,
@@ -314,7 +314,7 @@
 		if (tx_stats->rix != INVALID_CACHE_IDX) {
 			PRINT("\t\t%10u | %10u | %10u | %10u | %10u\n",
 			      tx_stats->rate,
-			      tx_stats->rix,
+			      GET_DP_PEER_STATS_RIX(tx_stats->rix),
 			      tx_stats->mpdu_attempts,
 			      tx_stats->mpdu_success,
 			      tx_stats->num_ppdus);
diff --git a/umac/dfs/core/src/misc/dfs_zero_cac.c b/umac/dfs/core/src/misc/dfs_zero_cac.c
index 06b68b3..7734ca6 100644
--- a/umac/dfs/core/src/misc/dfs_zero_cac.c
+++ b/umac/dfs/core/src/misc/dfs_zero_cac.c
@@ -1288,6 +1288,12 @@
 	int nchans = 0;
 	QDF_STATUS status;
 
+	/* Right now, only ETSI domain supports preCAC. Check if current
+	 * DFS domain is ETSI and only then build the preCAC list.
+	 */
+	if (utils_get_dfsdomain(dfs->dfs_pdev_obj) != DFS_ETSI_DOMAIN)
+		return;
+
 	/*
 	 * We need to prepare list of uniq VHT80 center frequencies. But at the
 	 * beginning we do not know how many uniq frequencies are present.
@@ -2484,33 +2490,12 @@
 
 uint32_t dfs_get_precac_enable(struct wlan_dfs *dfs)
 {
-	struct wlan_objmgr_psoc *psoc;
-	struct target_psoc_info *tgt_hdl;
-	uint32_t retval = 0;
-	struct tgt_info *info;
+	return dfs->dfs_precac_enable;
+}
 
-	psoc = wlan_pdev_get_psoc(dfs->dfs_pdev_obj);
-	if (!psoc) {
-		dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS,  "psoc is NULL");
-		dfs->dfs_agile_precac_enable = 0;
-		retval = 0;
-	}
-
-	tgt_hdl = wlan_psoc_get_tgt_if_handle(psoc);
-
-	info = (struct tgt_info *)(&tgt_hdl->info);
-	if (!tgt_hdl) {
-		dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "target_psoc_info is null");
-		dfs->dfs_agile_precac_enable = 0;
-		retval = 0;
-	}
-
-	if (info->wlan_res_cfg.agile_capability == 0)
-		retval = dfs->dfs_precac_enable;
-	else
-		retval = dfs->dfs_agile_precac_enable;
-
-	return retval;
+bool dfs_get_agile_precac_enable(struct wlan_dfs *dfs)
+{
+	return dfs->dfs_agile_precac_enable;
 }
 
 #ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT
diff --git a/wmi/src/wmi_unified_ap_tlv.c b/wmi/src/wmi_unified_ap_tlv.c
index be19348..6064f3c 100644
--- a/wmi/src/wmi_unified_ap_tlv.c
+++ b/wmi/src/wmi_unified_ap_tlv.c
@@ -1437,6 +1437,9 @@
 	if (tchan_info->dfs_set)
 		WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS);
 
+	if (tchan_info->dfs_set_cfreq2)
+		WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS_CFREQ2);
+
 	if (tchan_info->allow_vht)
 		WMI_SET_CHANNEL_FLAG(chan_info,
 				     WMI_CHAN_FLAG_ALLOW_VHT);
diff --git a/wmi/src/wmi_unified_non_tlv.c b/wmi/src/wmi_unified_non_tlv.c
index 20b83e6..5a26492 100644
--- a/wmi/src/wmi_unified_non_tlv.c
+++ b/wmi/src/wmi_unified_non_tlv.c
@@ -3230,6 +3230,7 @@
 	wmi_buf_t buf;
 	int len = 0;
 	int ret;
+	uint8_t rs = 0, irs = 0;
 
 	len = sizeof(wmi_peer_sant_set_train_antenna_cmd);
 	buf = wmi_buf_alloc(wmi_handle, len);
@@ -3241,10 +3242,16 @@
 	cmd = (wmi_peer_sant_set_train_antenna_cmd *)wmi_buf_data(buf);
 	cmd->vdev_id = param->vdev_id;
 	WMI_CHAR_ARRAY_TO_MAC_ADDR(macaddr, &cmd->peer_macaddr);
-	qdf_mem_copy(&cmd->train_rate_series[0], &param->rate_array[0],
-			(sizeof(uint32_t)*SMART_ANT_MAX_RATE_SERIES));
 	qdf_mem_copy(&cmd->train_antenna_series[0], &param->antenna_array[0],
 			(sizeof(uint32_t)*SMART_ANT_MAX_RATE_SERIES));
+	for (rs = 0; rs < SMART_ANT_MAX_RATE_SERIES; rs++) {
+		cmd->train_rate_series[rs] =
+		((param->rate_array[irs] & SA_MASK_BYTE) |
+		((param->rate_array[irs] & SA_MASK_BYTE3) >> 8) |
+		((param->rate_array[irs + 1] & SA_MASK_BYTE) << 16) |
+		(param->rate_array[irs + 1] & SA_MASK_BYTE3));
+		irs += 2;
+	}
 	cmd->num_pkts = param->numpkts;
 	ret = wmi_unified_cmd_send(wmi_handle,
 				   buf,
@@ -9781,6 +9788,9 @@
 	if (tchan_info->dfs_set)
 		WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS);
 
+	if (tchan_info->dfs_set_cfreq2)
+		WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS_CFREQ2);
+
 	if (tchan_info->allow_vht)
 		WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_ALLOW_VHT);
 	else  if (tchan_info->allow_ht)
@@ -10367,6 +10377,7 @@
 		WMI_SERVICE_VDEV_DELETE_ALL_PEER;
 	wmi_service[wmi_service_cfr_capture_support] =
 		WMI_SERVICE_CFR_CAPTURE_SUPPORT;
+	wmi_service[wmi_service_rx_fse_support] = WMI_SERVICE_UNAVAILABLE;
 }
 
 /**
diff --git a/wmi/src/wmi_unified_smart_ant_tlv.c b/wmi/src/wmi_unified_smart_ant_tlv.c
index 64b5c3b..cb8dd34 100644
--- a/wmi/src/wmi_unified_smart_ant_tlv.c
+++ b/wmi/src/wmi_unified_smart_ant_tlv.c
@@ -334,7 +334,7 @@
 	uint8_t *buf_ptr;
 	int32_t len = 0;
 	QDF_STATUS ret;
-	int loop;
+	uint8_t loop = 0, itr = 0;
 
 	len = sizeof(*cmd) + WMI_TLV_HDR_SIZE;
 	len += (WMI_SMART_ANT_MAX_RATE_SERIES) *
@@ -372,7 +372,13 @@
 		WMITLV_TAG_STRUC_wmi_peer_smart_ant_set_train_antenna_param,
 			    WMITLV_GET_STRUCT_TLVLEN(
 				wmi_peer_smart_ant_set_train_antenna_param));
-		train_param->train_rate_series = param->rate_array[loop];
+		train_param->train_rate_series_lo =
+			((param->rate_array[itr] & SA_MASK_RCODE) |
+			(param->rate_array[itr] & (SA_MASK_RCODE << 16)));
+		train_param->train_rate_series_hi =
+			((param->rate_array[itr + 1] & SA_MASK_RCODE) |
+			(param->rate_array[itr + 1] & (SA_MASK_RCODE << 16)));
+		itr += 2;
 		train_param->train_antenna_series = param->antenna_array[loop];
 		train_param->rc_flags = 0;
 		WMI_LOGI(FL("Series number:%d\n"), loop);
@@ -509,17 +515,18 @@
 	htindex = 0;
 	if (rate_cap->ratecount[0]) {
 		if (param_buf->num_ratecode_legacy >
-				SA_MAX_LEGACY_RATE_DWORDS) {
+				SA_MAX_LEGACY_RATE_WORDS) {
 			WMI_LOGE("Invalid Number of ratecode_legacy %d",
 					param_buf->num_ratecode_legacy);
 			return QDF_STATUS_E_FAILURE;
 		}
+		ofdm_rate = param_buf->ratecode_legacy;
 		for (i = 0; i < param_buf->num_ratecode_legacy; i++) {
-			ofdm_rate = param_buf->ratecode_legacy;
-			for (j = 0; j < SA_BYTES_IN_DWORD; j++) {
+			for (j = 0; j < SA_WORDS_IN_DWORD; j++) {
+				shift = (SA_WORD_BITS_LEN * j);
 				rate_cap->ratecode_legacy[htindex] =
-					((ofdm_rate->ratecode_legacy >> (8*j)) &
-					SA_MASK_BYTE);
+					((ofdm_rate->ratecode_legacy >> shift) &
+						SA_MASK_RCODE);
 				htindex++;
 			}
 			ofdm_rate++;
@@ -527,21 +534,21 @@
 	}
 
 	htindex = 0;
-	if (param_buf->num_ratecode_mcs > SA_MAX_HT_RATE_DWORDS) {
+	if (param_buf->num_ratecode_mcs > SA_MAX_HT_RATE_WORDS) {
 		WMI_LOGE("Invalid Number of ratecode_mcs %d",
 				param_buf->num_ratecode_mcs);
 		return QDF_STATUS_E_FAILURE;
 	}
+	mcs_rate = param_buf->ratecode_mcs;
 	for (i = 0; i < param_buf->num_ratecode_mcs; i++) {
-		mcs_rate = param_buf->ratecode_mcs;
-		for (j = 0; j < SA_BYTES_IN_DWORD; j++) {
-			shift = (8*j);
+		for (j = 0; j < SA_WORDS_IN_DWORD; j++) {
+			shift = (SA_WORD_BITS_LEN * j);
 			rate_cap->ratecode_20[htindex] =
-			    ((mcs_rate->ratecode_20 >> (shift)) & SA_MASK_BYTE);
+			((mcs_rate->ratecode_20 >> (shift)) & SA_MASK_RCODE);
 			rate_cap->ratecode_40[htindex] =
-			    ((mcs_rate->ratecode_40 >> (shift)) & SA_MASK_BYTE);
+			((mcs_rate->ratecode_40 >> (shift)) & SA_MASK_RCODE);
 			rate_cap->ratecode_80[htindex] =
-			    ((mcs_rate->ratecode_80 >> (shift)) & SA_MASK_BYTE);
+			((mcs_rate->ratecode_80 >> (shift)) & SA_MASK_RCODE);
 			htindex++;
 		}
 		mcs_rate++;