Merge "qcacmn: Remove redundant data structure for wmi response extraction"
diff --git a/dp/wifi3.0/dp_full_mon.c b/dp/wifi3.0/dp_full_mon.c
new file mode 100644
index 0000000..eda94b7
--- /dev/null
+++ b/dp/wifi3.0/dp_full_mon.c
@@ -0,0 +1,639 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "hal_rx.h"
+#include "hal_api.h"
+#include "qdf_trace.h"
+#include "qdf_nbuf.h"
+#include "hal_api_mon.h"
+#include "dp_rx.h"
+#include "dp_rx_mon.h"
+#include "dp_internal.h"
+#include "dp_htt.h"
+#include "dp_full_mon.h"
+#include "qdf_mem.h"
+
+#ifdef QCA_SUPPORT_FULL_MON
+
+uint32_t
+dp_rx_mon_status_process(struct dp_soc *soc,
+			 uint32_t mac_id,
+			 uint32_t quota);
+
+/*
+ * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
+ *
+ * @pdev: DP pdev object
+ * @head_msdu: Head msdu
+ * @tail_msdu: Tail msdu
+ *
+ */
+static inline struct dp_mon_mpdu *
+dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
+			   qdf_nbuf_t head_msdu,
+			   qdf_nbuf_t tail_msdu)
+{
+	struct dp_mon_mpdu *mon_mpdu = NULL;
+
+	mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
+
+	if (!mon_mpdu) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  FL("Monitor MPDU object allocation failed -- %pK"),
+			     pdev);
+		qdf_assert_always(0);
+	}
+
+	mon_mpdu->head = head_msdu;
+	mon_mpdu->tail = tail_msdu;
+	mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
+	mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
+	mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
+	mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
+	mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
+
+	return mon_mpdu;
+}
+
+/*
+ * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
+ * to upper layer stack
+ *
+ * @soc: DP soc handle
+ * @mac_id: lmac id
+ */
+static inline QDF_STATUS
+dp_rx_monitor_deliver_ppdu(struct dp_soc *soc, uint32_t mac_id)
+{
+	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	struct dp_mon_mpdu *mpdu = NULL;
+	struct dp_mon_mpdu *temp_mpdu = NULL;
+
+	if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
+		TAILQ_FOREACH_SAFE(mpdu,
+				   &pdev->mon_mpdu_q,
+				   mpdu_list_elem,
+				   temp_mpdu) {
+			TAILQ_REMOVE(&pdev->mon_mpdu_q,
+				     mpdu, mpdu_list_elem);
+
+			pdev->ppdu_info.rx_status.rs_flags = mpdu->rs_flags;
+			pdev->ppdu_info.rx_status.ant_signal_db =
+				mpdu->ant_signal_db;
+			pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
+			pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
+			pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
+
+			dp_rx_mon_deliver(soc, mac_id,
+					  mpdu->head, mpdu->tail);
+
+			qdf_mem_free(mpdu);
+		}
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
+ * status ring.
+ *
+ * @soc: DP soc handle
+ * @mac_id: mac id on which interrupt is received
+ * @quota: number of status ring entries to be reaped
+ * @desc_info: Rx ppdu desc info
+ */
+static inline uint32_t
+dp_rx_mon_reap_status_ring(struct dp_soc *soc,
+			   uint32_t mac_id,
+			   uint32_t quota,
+			   struct hal_rx_mon_desc_info *desc_info)
+{
+	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	uint8_t status_buf_count;
+	uint32_t work_done;
+
+	status_buf_count = desc_info->status_buf_count;
+
+	qdf_mem_copy(&pdev->mon_desc, desc_info,
+		     sizeof(struct hal_rx_mon_desc_info));
+
+	work_done = dp_rx_mon_status_process(soc, mac_id, status_buf_count);
+
+	if (desc_info->ppdu_id != pdev->ppdu_info.com_info.ppdu_id) {
+		qdf_err("DEBUG: count: %d quota: %d", status_buf_count, quota);
+		dp_print_ring_stats(pdev);
+		qdf_assert_always(0);
+	}
+
+	/* DEBUG */
+	if (work_done != status_buf_count) {
+		qdf_err("Reaped status ring buffers are not equal to "
+			"status buf count from destination ring work_done:"
+			" %d status_buf_count: %d",
+			work_done, status_buf_count);
+
+		dp_print_ring_stats(pdev);
+		qdf_assert_always(0);
+	}
+
+	return work_done;
+}
+
+/**
+ * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
+ * and returns link descriptor to HW (WBM)
+ *
+ * @soc: DP soc handle
+ * @mac_id: lmac id
+ * @ring_desc: SW monitor ring desc
+ * @head_msdu: nbuf pointing to first msdu in a chain
+ * @tail_msdu: nbuf pointing to last msdu in a chain
+ * @head_desc: head pointer to free desc list
+ * @tail_desc: tail pointer to free desc list
+ *
+ * Return: number of reaped buffers
+ */
+static inline uint32_t
+dp_rx_mon_mpdu_reap(struct dp_soc *soc, uint32_t mac_id, void *ring_desc,
+		    qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
+		    union dp_rx_desc_list_elem_t **head_desc,
+		    union dp_rx_desc_list_elem_t **tail_desc)
+{
+	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	struct dp_rx_desc *rx_desc = NULL;
+	struct hal_rx_msdu_list msdu_list;
+	uint32_t rx_buf_reaped = 0;
+	uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
+	uint32_t total_frag_len = 0, frag_len = 0;
+	bool drop_mpdu = false;
+	bool msdu_frag = false;
+	void *link_desc_va;
+	uint8_t *rx_tlv_hdr;
+	qdf_nbuf_t msdu = NULL, last_msdu = NULL;
+	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
+	struct hal_rx_mon_desc_info *desc_info;
+
+	desc_info = pdev->mon_desc;
+
+	qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
+
+	/* Read SW Mon ring descriptor */
+	hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
+				    ring_desc,
+				    (void *)desc_info);
+
+	/* If end_of_ppdu is 1, return*/
+	if (desc_info->end_of_ppdu)
+		return rx_buf_reaped;
+
+	/* If there is rxdma error, drop mpdu */
+	if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
+			== QDF_STATUS_SUCCESS)) {
+		drop_mpdu = true;
+		pdev->rx_mon_stats.dest_mpdu_drop++;
+	}
+
+	/*
+	 * while loop iterates through all link descriptors and
+	 * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
+	 * and forms nbuf queue.
+	 */
+	while (desc_info->msdu_count && desc_info->link_desc.paddr) {
+		link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
+							    desc_info->link_desc,
+							    mac_id);
+
+		qdf_assert_always(link_desc_va);
+
+		hal_rx_msdu_list_get(soc->hal_soc,
+				     link_desc_va,
+				     &msdu_list,
+				     &num_msdus);
+
+		for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
+			rx_desc = dp_rx_get_mon_desc(soc,
+						     msdu_list.sw_cookie[msdu_index]);
+
+			qdf_assert_always(rx_desc);
+
+			msdu = rx_desc->nbuf;
+
+			if (rx_desc->unmapped == 0) {
+				qdf_nbuf_unmap_single(soc->osdev,
+						      msdu,
+						      QDF_DMA_FROM_DEVICE);
+				rx_desc->unmapped = 1;
+			}
+
+			if (drop_mpdu) {
+				qdf_nbuf_free(msdu);
+				msdu = NULL;
+				desc_info->msdu_count--;
+				goto next_msdu;
+			}
+
+			rx_tlv_hdr = qdf_nbuf_data(msdu);
+
+			if (hal_rx_desc_is_first_msdu(soc->hal_soc,
+						      rx_tlv_hdr))
+				hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
+								   rx_tlv_hdr,
+								   &pdev->ppdu_info.rx_status);
+
+			/** If msdu is fragmented, spread across multiple
+			 *  buffers
+			 *   a. calculate len of each fragmented buffer
+			 *   b. calculate the number of fragmented buffers for
+			 *      a msdu and decrement one msdu_count
+			 */
+			if (msdu_list.msdu_info[msdu_index].msdu_flags
+			    & HAL_MSDU_F_MSDU_CONTINUATION) {
+				if (!msdu_frag) {
+					total_frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
+					msdu_frag = true;
+				}
+				dp_mon_adjust_frag_len(&total_frag_len,
+						       &frag_len);
+			} else {
+				if (msdu_frag)
+					dp_mon_adjust_frag_len(&total_frag_len,
+							       &frag_len);
+				else
+					frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
+				msdu_frag = false;
+				desc_info->msdu_count--;
+			}
+
+			rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
+
+			/*
+			 * HW structures call this L3 header padding.
+			 * this is actually the offset
+			 * from the buffer beginning where the L2
+			 * header begins.
+			 */
+
+			l3_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get(
+								soc->hal_soc,
+								rx_tlv_hdr);
+
+			/*******************************************************
+			 *                    RX_PACKET                        *
+			 * ----------------------------------------------------*
+			 |   RX_PKT_TLVS  |   L3 Padding header  |  msdu data| |
+			 * ----------------------------------------------------*
+			 ******************************************************/
+
+			qdf_nbuf_set_pktlen(msdu,
+					    rx_hdr_tlv_len +
+					    l3_hdr_pad +
+					    frag_len);
+
+			if (head_msdu && !*head_msdu)
+				*head_msdu = msdu;
+			else if (last_msdu)
+				qdf_nbuf_set_next(last_msdu, msdu);
+
+			last_msdu = msdu;
+
+next_msdu:
+			rx_buf_reaped++;
+			dp_rx_add_to_free_desc_list(head_desc,
+						    tail_desc,
+						    rx_desc);
+
+			QDF_TRACE(QDF_MODULE_ID_DP,
+				  QDF_TRACE_LEVEL_DEBUG,
+				  FL("%s total_len %u frag_len %u flags %u"),
+				  total_frag_len, frag_len,
+				  msdu_list.msdu_info[msdu_index].msdu_flags);
+		}
+
+		hal_rxdma_buff_addr_info_set(rx_link_buf_info,
+					     desc_info->link_desc.paddr,
+					     desc_info->link_desc.sw_cookie,
+					     desc_info->link_desc.rbm);
+
+		/* Get next link desc VA from current link desc */
+		hal_rx_mon_next_link_desc_get(link_desc_va,
+					      &desc_info->link_desc);
+
+		/* return msdu link descriptor to WBM */
+		if (dp_rx_monitor_link_desc_return(pdev,
+						   (hal_buff_addrinfo_t)rx_link_buf_info,
+						   mac_id,
+						   HAL_BM_ACTION_PUT_IN_IDLE_LIST)
+				!= QDF_STATUS_SUCCESS) {
+			dp_print_ring_stats(pdev);
+			qdf_assert_always(0);
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				  "dp_rx_monitor_link_desc_return failed");
+		}
+	}
+
+	if (last_msdu)
+		qdf_nbuf_set_next(last_msdu, NULL);
+
+	*tail_msdu = msdu;
+
+	return rx_buf_reaped;
+}
+
+/**
+ * dp_rx_mon_process () - Core brain processing for monitor mode
+ *
+ * This API processes monitor destination ring followed by monitor status ring
+ * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
+ *
+ * @soc: datapath soc context
+ * @mac_id: mac_id on which interrupt is received
+ * @quota: Number of status ring entry that can be serviced in one shot.
+ *
+ * @Return: Number of reaped status ring entries
+ */
+uint32_t dp_rx_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
+{
+	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	union dp_rx_desc_list_elem_t *head_desc = NULL;
+	union dp_rx_desc_list_elem_t *tail_desc = NULL;
+	uint32_t rx_bufs_reaped = 0;
+	struct dp_mon_mpdu *mon_mpdu;
+	struct cdp_pdev_mon_stats *rx_mon_stats = &pdev->rx_mon_stats;
+	hal_rxdma_desc_t ring_desc;
+	hal_soc_handle_t hal_soc;
+	hal_ring_handle_t mon_dest_srng;
+	qdf_nbuf_t head_msdu = NULL;
+	qdf_nbuf_t tail_msdu = NULL;
+	struct hal_rx_mon_desc_info *desc_info;
+	int mac_for_pdev = mac_id;
+	QDF_STATUS status;
+
+	if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev)))
+		return quota;
+
+	mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
+
+	if (qdf_unlikely(!mon_dest_srng ||
+			 !hal_srng_initialized(mon_dest_srng))) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  FL("HAL Monitor Destination Ring Init Failed -- %pK"),
+			  mon_dest_srng);
+		goto done;
+	}
+
+	hal_soc = soc->hal_soc;
+
+	qdf_assert_always(hal_soc && pdev);
+
+	qdf_spin_lock_bh(&pdev->mon_lock);
+
+	desc_info = pdev->mon_desc;
+
+	if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dest_srng))) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  FL("HAL Monitor Destination Ring access Failed -- %pK"),
+			  mon_dest_srng);
+		goto done1;
+	}
+
+	/* Each entry in mon dest ring carries mpdu data
+	 * reap all msdus for a mpdu and form skb chain
+	 */
+	while (qdf_likely(ring_desc =
+			  hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
+		head_msdu = NULL;
+		tail_msdu = NULL;
+		rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, mac_id,
+						     ring_desc, &head_msdu,
+						     &tail_msdu, &head_desc,
+						     &tail_desc);
+
+		/* Assert if end_of_ppdu is zero and number of reaped buffers
+		 * are zero.
+		 */
+		if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
+			dp_print_ring_stats(pdev);
+			qdf_assert_always(0);
+		}
+
+		rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
+
+		/* replenish rx_bufs_reaped buffers back to
+		 * RxDMA Monitor buffer ring
+		 */
+		if (rx_bufs_reaped) {
+			status = dp_rx_buffers_replenish(soc, mac_id,
+							 dp_rxdma_get_mon_buf_ring(pdev,
+										   mac_for_pdev),
+							 dp_rx_get_mon_desc_pool(soc, mac_id,
+										 pdev->pdev_id),
+										 rx_bufs_reaped,
+										 &head_desc, &tail_desc);
+			if (status != QDF_STATUS_SUCCESS)
+				qdf_assert_always(0);
+
+			rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
+		}
+
+		head_desc = NULL;
+		tail_desc = NULL;
+
+		/* If end_of_ppdu is zero, it is a valid data mpdu
+		 *    a. Add head_msdu and tail_msdu to mpdu list
+		 *    b. continue reaping next SW_MONITOR_RING descriptor
+		 */
+
+		if (!desc_info->end_of_ppdu) {
+			/*
+			 * In case of rxdma error, MPDU is dropped
+			 * from sw_monitor_ring descriptor.
+			 * in this case, head_msdu remains NULL.
+			 * move srng to next and continue reaping next entry
+			 */
+			if (!head_msdu) {
+				ring_desc = hal_srng_dst_get_next(hal_soc,
+								  mon_dest_srng);
+				continue;
+			}
+
+			/*
+			 * Prepare a MPDU object which holds chain of msdus
+			 * and MPDU specific status and add this is to
+			 * monitor mpdu queue
+			 */
+			mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
+							      head_msdu,
+							      tail_msdu);
+
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				  FL("Dest_srng: %pK MPDU_OBJ: %pK "
+				  "head_msdu: %pK tail_msdu: %pK -- "),
+				  mon_dest_srng,
+				  mon_mpdu,
+				  head_msdu,
+				  tail_msdu);
+
+			TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
+					  mon_mpdu,
+					  mpdu_list_elem);
+
+			head_msdu = NULL;
+			tail_msdu = NULL;
+			ring_desc = hal_srng_dst_get_next(hal_soc,
+							  mon_dest_srng);
+			continue;
+		}
+
+		/*
+		 * end_of_ppdu is one,
+		 *  a. update ppdu_done stattistics
+		 *  b. Replenish buffers back to mon buffer ring
+		 *  c. reap status ring for a PPDU and deliver all mpdus
+		 *     to upper layer
+		 */
+		rx_mon_stats->dest_ppdu_done++;
+
+		if (pdev->ppdu_info.com_info.ppdu_id !=
+		    pdev->mon_desc->ppdu_id) {
+			pdev->rx_mon_stats.ppdu_id_mismatch++;
+			qdf_err("PPDU id mismatch, status_ppdu_id: %d"
+				"dest_ppdu_id: %d status_ppdu_done: %d "
+				"dest_ppdu_done: %d ppdu_id_mismatch_cnt: %u"
+				"dest_mpdu_drop: %u",
+				pdev->ppdu_info.com_info.ppdu_id,
+				pdev->mon_desc->ppdu_id,
+				pdev->rx_mon_stats.status_ppdu_done,
+				pdev->rx_mon_stats.dest_ppdu_done,
+				pdev->rx_mon_stats.ppdu_id_mismatch,
+				pdev->rx_mon_stats.dest_mpdu_drop);
+
+		/* WAR: It is observed that in some cases, status ring ppdu_id
+		 *     and destination ring ppdu_id doesn't match.
+		 *     Following WAR is added to fix it.
+		 *     a. If status ppdu_id is less than destination ppdu_id,
+		 *        hold onto destination ring until ppdu_id matches
+		 *     b. If status ppdu_id is greater than destination ring
+		 *        ppdu_Id, move tp in destination ring.
+		 */
+			if (pdev->ppdu_info.com_info.ppdu_id <
+			    pdev->mon_desc->ppdu_id) {
+				break;
+			} else {
+				ring_desc = hal_srng_dst_get_next(hal_soc,
+								  mon_dest_srng);
+				continue;
+			}
+		}
+
+		/*
+		 * At this point, end_of_ppdu is one here,
+		 * When 'end_of_ppdu' is one, status buffer_count and
+		 * status_buf_addr must be valid.
+		 *
+		 *  Assert if
+		 *  a. status_buf_count is zero
+		 *  b. status_buf.paddr is NULL
+		 */
+		if (!pdev->mon_desc->status_buf_count ||
+		    !pdev->mon_desc->status_buf.paddr) {
+			qdf_assert_always(0);
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				  FL("Status buffer info is NULL"
+				  "status_buf_count: %d"
+				  "status_buf_addr: %pK"
+				  "ring_desc: %pK-- "),
+				  pdev->mon_desc->status_buf_count,
+				  pdev->mon_desc->status_buf.paddr,
+				  ring_desc);
+				goto done2;
+		}
+
+		/* Deliver all MPDUs for a PPDU */
+		dp_rx_monitor_deliver_ppdu(soc, mac_id);
+
+		hal_srng_dst_get_next(hal_soc, mon_dest_srng);
+		break;
+	}
+
+done2:
+	hal_srng_access_end(hal_soc, mon_dest_srng);
+
+done1:
+	qdf_spin_unlock_bh(&pdev->mon_lock);
+
+done:
+	return quota;
+}
+
+/**
+ * dp_full_mon_attach() - attach full monitor mode
+ *              resources
+ * @pdev: Datapath PDEV handle
+ *
+ * Return: void
+ */
+void dp_full_mon_attach(struct dp_pdev *pdev)
+{
+	struct dp_soc *soc = pdev->soc;
+
+	if (!soc->full_mon_mode) {
+		qdf_debug("Full monitor is not enabled");
+		return;
+	}
+
+	pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
+
+	if (!pdev->mon_desc) {
+		qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
+		return;
+	}
+	TAILQ_INIT(&pdev->mon_mpdu_q);
+}
+
+/**
+ * dp_full_mon_detach() - detach full monitor mode
+ *              resources
+ * @pdev: Datapath PDEV handle
+ *
+ * Return: void
+ *
+ */
+void dp_full_mon_detach(struct dp_pdev *pdev)
+{
+	struct dp_soc *soc = pdev->soc;
+	struct dp_mon_mpdu *mpdu = NULL;
+	struct dp_mon_mpdu *temp_mpdu = NULL;
+
+	if (!soc->full_mon_mode) {
+		qdf_debug("Full monitor is not enabled");
+		return;
+	}
+
+	if (pdev->mon_desc)
+		qdf_mem_free(pdev->mon_desc);
+
+	if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
+		TAILQ_FOREACH_SAFE(mpdu,
+				   &pdev->mon_mpdu_q,
+				   mpdu_list_elem,
+				   temp_mpdu) {
+			qdf_mem_free(mpdu);
+		}
+	}
+}
+#endif
diff --git a/dp/wifi3.0/dp_full_mon.h b/dp/wifi3.0/dp_full_mon.h
new file mode 100644
index 0000000..defee35
--- /dev/null
+++ b/dp/wifi3.0/dp_full_mon.h
@@ -0,0 +1,60 @@
+/**
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_FULL_MON_H_
+#define _DP_FULL_MON_H_
+
+/**
+ * struct dp_mon_mpdu () - DP Monitor mpdu object
+ *
+ * @head: Head msdu
+ * @tail: Tail msdu
+ * @mpdu_list_elem: mpdu list element
+ * @rs_flags: Rx status flags
+ * @ant_signal_db: RSSI in dBm
+ * @is_stbc: is stbc is enabled
+ * @sgi: SGI
+ * @beamformed: if beamformed
+ */
+struct dp_mon_mpdu {
+	qdf_nbuf_t head;
+	qdf_nbuf_t tail;
+	TAILQ_ENTRY(dp_mon_mpdu) mpdu_list_elem;
+
+	uint8_t  rs_flags;
+	uint8_t  ant_signal_db;
+	uint8_t  is_stbc;
+	uint8_t  sgi;
+	uint8_t  beamformed;
+};
+
+static inline QDF_STATUS
+dp_rx_mon_is_rxdma_error(struct hal_rx_mon_desc_info *desc_info)
+{
+	enum hal_rxdma_error_code rxdma_err = desc_info->rxdma_error_code;
+
+	if (qdf_unlikely(desc_info->rxdma_push_reason ==
+				HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) {
+		if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) ||
+				 (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) ||
+				 (rxdma_err == HAL_RXDMA_ERR_OVERFLOW))) {
+			return QDF_STATUS_SUCCESS;
+		}
+	}
+	return QDF_STATUS_E_FAILURE;
+}
+
+#endif /* _DP_FULL_MON_H_ */
diff --git a/dp/wifi3.0/dp_tx_capture.c b/dp/wifi3.0/dp_tx_capture.c
index bd02318..99e7f41 100644
--- a/dp/wifi3.0/dp_tx_capture.c
+++ b/dp/wifi3.0/dp_tx_capture.c
@@ -224,7 +224,7 @@
 	DP_PRINT_STATS(" mgmt control retry queue stats:");
 	for (i = 0; i < TXCAP_MAX_TYPE; i++) {
 		for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
-			if (ptr_tx_cap->ctl_mgmt_q[i][j].qlen)
+			if (ptr_tx_cap->retries_ctl_mgmt_q[i][j].qlen)
 				DP_PRINT_STATS(" retries_ctl_mgmt_q[%d][%d] = queue_len[%d]",
 				i, j,
 				ptr_tx_cap->retries_ctl_mgmt_q[i][j].qlen);
@@ -418,6 +418,9 @@
 			"dlvr mgmt frm(0x%08x): fc 0x%x %x, dur 0x%x%x\n",
 			ptr_mgmt_hdr->ppdu_id, wh->i_fc[1], wh->i_fc[0],
 			wh->i_dur[1], wh->i_dur[0]);
+	} else {
+		qdf_nbuf_free(nbuf);
+		return;
 	}
 }
 
@@ -1053,6 +1056,7 @@
 							continue;
 						qdf_nbuf_free(
 							ppdu_desc->mpdus[i]);
+						ppdu_desc->mpdus[i] = NULL;
 					}
 					qdf_mem_free(ppdu_desc->mpdus);
 					ppdu_desc->mpdus = NULL;
@@ -1375,21 +1379,6 @@
 			/* pull ethernet header from first MSDU alone */
 			qdf_nbuf_pull_head(curr_nbuf,
 					   sizeof(qdf_ether_header_t));
-			mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
-						   MAX_MONITOR_HEADER,
-						   MAX_MONITOR_HEADER,
-						   4, FALSE);
-
-			if (!mpdu_nbuf) {
-				QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
-					  QDF_TRACE_LEVEL_FATAL,
-					  "MPDU head allocation failed !!!");
-				goto free_ppdu_desc_mpdu_q;
-			}
-
-			dp_tx_update_80211_hdr(pdev, peer,
-					       ppdu_desc, mpdu_nbuf,
-					       ether_type, eh->ether_shost);
 
 			/* update first buffer to previous buffer */
 			prev_nbuf = curr_nbuf;
@@ -1432,6 +1421,23 @@
 		frag_list_sum_len += qdf_nbuf_len(curr_nbuf);
 
 		if (last_msdu) {
+
+			mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
+						   MAX_MONITOR_HEADER,
+						   MAX_MONITOR_HEADER,
+						   4, FALSE);
+
+			if (!mpdu_nbuf) {
+				QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
+					  QDF_TRACE_LEVEL_FATAL,
+					  "MPDU head allocation failed !!!");
+				goto free_ppdu_desc_mpdu_q;
+			}
+
+			dp_tx_update_80211_hdr(pdev, peer,
+					       ppdu_desc, mpdu_nbuf,
+					       ether_type, eh->ether_shost);
+
 			/*
 			 * first nbuf will hold list of msdu
 			 * stored in prev_nbuf
@@ -1458,6 +1464,9 @@
 			QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
 				  QDF_TRACE_LEVEL_FATAL,
 				  "!!!! WAITING for msdu but list empty !!!!");
+
+			/* for incomplete list, free up the queue */
+			goto free_ppdu_desc_mpdu_q;
 		}
 
 		continue;
@@ -2152,6 +2161,50 @@
 		dp_gen_ack_rx_frame(pdev, &tx_capture_info);
 }
 
+static qdf_nbuf_t dp_tx_mon_get_next_mpdu(
+	struct cdp_tx_completion_ppdu *xretry_ppdu,
+	qdf_nbuf_t mpdu_nbuf)
+{
+	qdf_nbuf_t next_nbuf = NULL;
+	qdf_nbuf_queue_t temp_xretries;
+
+	if (mpdu_nbuf != qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q)) {
+		next_nbuf = qdf_nbuf_queue_next(mpdu_nbuf);
+		/* Initialize temp list */
+		qdf_nbuf_queue_init(&temp_xretries);
+		/* Move entries into temp list till the mpdu_nbuf is found */
+		while ((qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q)) &&
+		       (mpdu_nbuf !=
+				qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q))) {
+			qdf_nbuf_queue_add(&temp_xretries,
+				qdf_nbuf_queue_remove(&xretry_ppdu->mpdu_q));
+		}
+		if ((qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q)) &&
+		    (mpdu_nbuf == qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q))) {
+			/* Remove mpdu_nbuf from queue */
+			qdf_nbuf_queue_remove(&xretry_ppdu->mpdu_q);
+			/* Add remaining nbufs into temp queue */
+			qdf_nbuf_queue_append(&temp_xretries,
+					      &xretry_ppdu->mpdu_q);
+			/* Reinit xretry_ppdu->mpdu_q */
+			qdf_nbuf_queue_init(&xretry_ppdu->mpdu_q);
+			/* append all the entries into original queue */
+			qdf_nbuf_queue_append(&xretry_ppdu->mpdu_q,
+					      &temp_xretries);
+		} else {
+			QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
+				  QDF_TRACE_LEVEL_FATAL,
+				  "%s: This is buggy scenario, did not find nbuf in queue ",
+				  __func__);
+		}
+	} else {
+		qdf_nbuf_queue_remove(&xretry_ppdu->mpdu_q);
+		next_nbuf = qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q);
+	}
+
+	return next_nbuf;
+}
+
 static void
 dp_tx_mon_proc_xretries(struct dp_pdev *pdev, struct dp_peer *peer,
 			uint16_t tid)
@@ -2187,7 +2240,8 @@
 			mpdu_tried = ppdu_desc->user[0].mpdu_tried_ucast +
 			ppdu_desc->user[0].mpdu_tried_mcast;
 			mpdu_nbuf = qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q);
-			for (i = 0; (mpdu_tried > 0) && mpdu_nbuf; i++) {
+			for (i = 0; (i < ppdu_desc->user[0].ba_size) &&
+				(mpdu_tried > 0) && (mpdu_nbuf); i++) {
 				if (!(SEQ_BIT(ppdu_desc->user[0].enq_bitmap,
 				    i)))
 					continue;
@@ -2214,11 +2268,13 @@
 				ppdu_desc->pending_retries--;
 				if (ptr_msdu_info->transmit_cnt == 0) {
 					ppdu_desc->mpdus[seq_no - start_seq] =
-						mpdu_nbuf;
-					qdf_nbuf_queue_remove(
-						&xretry_ppdu->mpdu_q);
-					mpdu_nbuf = qdf_nbuf_queue_first(
-						&xretry_ppdu->mpdu_q);
+							mpdu_nbuf;
+					/*
+					 * This API removes mpdu_nbuf from q and
+					 * returns next mpdu from the queue
+					 */
+					mpdu_nbuf = dp_tx_mon_get_next_mpdu(
+							xretry_ppdu, mpdu_nbuf);
 				} else {
 					ppdu_desc->mpdus[seq_no - start_seq] =
 					qdf_nbuf_copy_expand_fraglist(
@@ -2592,6 +2648,7 @@
 	struct cdp_tx_indication_info tx_capture_info;
 	qdf_nbuf_t mgmt_ctl_nbuf;
 	uint8_t type, subtype;
+	uint8_t fc_type, fc_subtype;
 	bool is_sgen_pkt;
 	struct cdp_tx_mgmt_comp_info *ptr_comp_info;
 	qdf_nbuf_queue_t *retries_q;
@@ -2599,6 +2656,7 @@
 	uint32_t ppdu_id;
 	size_t head_size;
 	uint32_t status = 1;
+	uint32_t tsf_delta;
 
 	ppdu_desc = (struct cdp_tx_completion_ppdu *)
 		qdf_nbuf_data(nbuf_ppdu_desc);
@@ -2608,6 +2666,11 @@
 	 */
 	head_size = sizeof(struct cdp_tx_mgmt_comp_info);
 
+	fc_type = (ppdu_desc->frame_ctrl &
+		  IEEE80211_FC0_TYPE_MASK);
+	fc_subtype = (ppdu_desc->frame_ctrl &
+		     IEEE80211_FC0_SUBTYPE_MASK);
+
 	type = (ppdu_desc->frame_ctrl &
 		IEEE80211_FC0_TYPE_MASK) >>
 		IEEE80211_FC0_TYPE_SHIFT;
@@ -2630,7 +2693,11 @@
 	switch (ppdu_desc->htt_frame_type) {
 	case HTT_STATS_FTYPE_TIDQ_DATA_SU:
 	case HTT_STATS_FTYPE_TIDQ_DATA_MU:
-		is_sgen_pkt = false;
+		if ((fc_type == IEEE80211_FC0_TYPE_MGT) &&
+		    (fc_subtype == IEEE80211_FC0_SUBTYPE_BEACON))
+			is_sgen_pkt = true;
+		else
+			is_sgen_pkt = false;
 	break;
 	default:
 		is_sgen_pkt = true;
@@ -2682,8 +2749,13 @@
 			if (is_sgen_pkt) {
 				start_tsf = (ppdu_desc->ppdu_start_timestamp &
 					     LOWER_32_MASK);
-				if (ptr_comp_info->tx_tsf <
-				     (start_tsf + MAX_MGMT_ENQ_DELAY)) {
+
+				if (start_tsf > ptr_comp_info->tx_tsf)
+					tsf_delta = start_tsf - ptr_comp_info->tx_tsf;
+				else
+					tsf_delta = ptr_comp_info->tx_tsf - start_tsf;
+
+				if (tsf_delta > MAX_MGMT_ENQ_DELAY) {
 					/*
 					 * free the older mgmt buffer from
 					 * the queue and get new mgmt buffer
diff --git a/dp/wifi3.0/dp_txrx_wds.c b/dp/wifi3.0/dp_txrx_wds.c
index 562180a..4c7c82e 100644
--- a/dp/wifi3.0/dp_txrx_wds.c
+++ b/dp/wifi3.0/dp_txrx_wds.c
@@ -245,7 +245,7 @@
  * Return: status
  */
 #ifdef WDS_VENDOR_EXTENSION
-void
+QDF_STATUS
 dp_txrx_set_wds_rx_policy(struct cdp_soc_t *soc, uint8_t vdev_id, u_int32_t val)
 {
 	struct dp_peer *peer;
@@ -255,7 +255,7 @@
 	if (!vdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 			  FL("vdev is NULL for vdev_id %d"), vdev_id);
-		return;
+		return QDF_STATUS_E_INVAL;
 	}
 
 	if (vdev->opmode == wlan_op_mode_ap) {
@@ -280,6 +280,8 @@
 		peer->wds_ecm.wds_rx_mcast_4addr =
 			(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
 	}
+
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
@@ -293,7 +295,7 @@
  *
  * Return: void
  */
-void
+QDF_STATUS
 dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *soc,  uint8_t vdev_id,
 				  uint8_t *peer_mac, int wds_tx_ucast,
 				  int wds_tx_mcast)
@@ -305,7 +307,7 @@
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 			  FL("peer is NULL for mac %pM vdev_id %d"),
 			  peer_mac, vdev_id);
-		return;
+		return QDF_STATUS_E_INVAL;
 	}
 
 	if (wds_tx_ucast || wds_tx_mcast) {
@@ -330,6 +332,7 @@
 		  peer->wds_ecm.wds_tx_mcast_4addr);
 
 	dp_peer_unref_delete(peer);
+	return QDF_STATUS_SUCCESS;
 }
 
 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
@@ -339,7 +342,8 @@
 	struct dp_peer *bss_peer;
 	int fr_ds, to_ds, rx_3addr, rx_4addr;
 	int rx_policy_ucast, rx_policy_mcast;
-	int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr);
+	hal_soc_handle_t hal_soc = vdev->pdev->soc->hal_soc;
+	int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(hal_soc, rx_tlv_hdr);
 
 	if (vdev->opmode == wlan_op_mode_ap) {
 		TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
@@ -390,8 +394,8 @@
 	 * ------------------------------------------------
 	 */
 
-	fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
-	to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
+	fr_ds = hal_rx_mpdu_get_fr_ds(hal_soc, rx_tlv_hdr);
+	to_ds = hal_rx_mpdu_get_to_ds(hal_soc, rx_tlv_hdr);
 	rx_3addr = fr_ds ^ to_ds;
 	rx_4addr = fr_ds & to_ds;
 
diff --git a/dp/wifi3.0/dp_txrx_wds.h b/dp/wifi3.0/dp_txrx_wds.h
index 99a434c..5c5b328 100644
--- a/dp/wifi3.0/dp_txrx_wds.h
+++ b/dp/wifi3.0/dp_txrx_wds.h
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -57,11 +57,11 @@
 }
 #endif
 #ifdef WDS_VENDOR_EXTENSION
-void
+QDF_STATUS
 dp_txrx_peer_wds_tx_policy_update(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
 				  uint8_t *peer_mac, int wds_tx_ucast,
-				  int wds_tx_mcast)
-void
+				  int wds_tx_mcast);
+QDF_STATUS
 dp_txrx_set_wds_rx_policy(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
 			  u_int32_t val);
 #endif
@@ -264,6 +264,9 @@
 		 */
 		if ((sa_peer->vdev->opmode == wlan_op_mode_ap) &&
 		    !sa_peer->delete_in_progress) {
+			qdf_mem_copy(wds_src_mac,
+				     (qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE),
+				     QDF_MAC_ADDR_SIZE);
 			sa_peer->delete_in_progress = true;
 			if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
 				soc->cdp_soc.ol_ops->peer_sta_kickout(
diff --git a/target_if/cfr/src/target_if_cfr.c b/target_if/cfr/src/target_if_cfr.c
index dd216eb..525b212 100644
--- a/target_if/cfr/src/target_if_cfr.c
+++ b/target_if/cfr/src/target_if_cfr.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -257,24 +257,25 @@
 
 	uint8_t def_mac[QDF_MAC_ADDR_SIZE] = {0xFF, 0xFF, 0xFF,
 		0xFF, 0xFF, 0xFF};
-	uint8_t null_mac[QDF_MAC_ADDR_SIZE] = {0, 0, 0, 0, 0, 0};
+	uint8_t null_mac[QDF_MAC_ADDR_SIZE] = {0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00};
 
 	for (grp_id = 0; grp_id < MAX_TA_RA_ENTRIES; grp_id++) {
 		if (qdf_test_bit(grp_id, (unsigned long *)&reset_cfg)) {
 			curr_cfg = &rcc_info->curr[grp_id];
 			qdf_mem_copy(curr_cfg->tx_addr,
-				     def_mac, QDF_MAC_ADDR_SIZE);
+				     null_mac, QDF_MAC_ADDR_SIZE);
 			qdf_mem_copy(curr_cfg->tx_addr_mask,
-				     null_mac, QDF_MAC_ADDR_SIZE);
-			qdf_mem_copy(curr_cfg->rx_addr,
 				     def_mac, QDF_MAC_ADDR_SIZE);
-			qdf_mem_copy(curr_cfg->rx_addr_mask,
+			qdf_mem_copy(curr_cfg->rx_addr,
 				     null_mac, QDF_MAC_ADDR_SIZE);
+			qdf_mem_copy(curr_cfg->rx_addr_mask,
+				     def_mac, QDF_MAC_ADDR_SIZE);
 			curr_cfg->bw = 0xf;
 			curr_cfg->nss = 0xff;
-			curr_cfg->mgmt_subtype_filter = 0xffff;
-			curr_cfg->ctrl_subtype_filter = 0xffff;
-			curr_cfg->data_subtype_filter = 0xffff;
+			curr_cfg->mgmt_subtype_filter = 0;
+			curr_cfg->ctrl_subtype_filter = 0;
+			curr_cfg->data_subtype_filter = 0;
 			if (!allvalid) {
 				curr_cfg->valid_ta = 0;
 				curr_cfg->valid_ta_mask = 0;
diff --git a/target_if/cfr/src/target_if_cfr_6018.c b/target_if/cfr/src/target_if_cfr_6018.c
index 7ffc443..826726a 100644
--- a/target_if/cfr/src/target_if_cfr_6018.c
+++ b/target_if/cfr/src/target_if_cfr_6018.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -35,6 +35,8 @@
 #include <target_if_cfr_6018.h>
 #include "cdp_txrx_ctrl.h"
 
+#define CMN_NOISE_FLOOR       (-96)
+
 static u_int32_t end_magic = 0xBEAFDEAD;
 /**
  * get_lut_entry() - Retrieve LUT entry using cookie number
@@ -107,6 +109,8 @@
 		return;
 	}
 
+	qdf_spin_lock_bh(&pcfr->lut_lock);
+
 	for (i = 0; i < NUM_LUT_ENTRIES; i++) {
 		lut = get_lut_entry(pcfr, i);
 		if (!lut)
@@ -124,6 +128,9 @@
 		}
 
 	}
+
+	qdf_spin_unlock_bh(&pcfr->lut_lock);
+
 	wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
 }
 
@@ -617,19 +624,19 @@
 						     WLAN_UMAC_COMP_CFR);
 	if (qdf_unlikely(!pcfr)) {
 		cfr_err("pdev object for CFR is NULL");
-		goto done;
+		goto relref;
 	}
 
 	cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(nbuf);
 	cfr_info = &cdp_rx_ppdu->cfr_info;
 
 	if (!cfr_info->bb_captured_channel)
-		goto done;
+		goto relref;
 
 	psoc = wlan_pdev_get_psoc(pdev);
 	if (qdf_unlikely(!psoc)) {
 		cfr_err("psoc is null\n");
-		goto done;
+		goto relref;
 	}
 
 	cfr_rx_ops = &psoc->soc_cb.rx_ops.cfr_rx_ops;
@@ -642,7 +649,7 @@
 					&cookie, 0)) {
 		cfr_debug("Cookie lookup failure for addr: 0x%pK",
 			  (void *)((uintptr_t)buf_addr));
-		goto done;
+		goto relref;
 	}
 
 	cfr_debug("<RXTLV><%u>:buffer address: 0x%pK \n"
@@ -661,16 +668,18 @@
 		  cfr_info->rtt_che_buffer_pointer_high8,
 		  cfr_info->chan_capture_status);
 
+	qdf_spin_lock_bh(&pcfr->lut_lock);
+
 	lut = get_lut_entry(pcfr, cookie);
 	if (qdf_unlikely(!lut)) {
 		cfr_err("lut is NULL");
-		goto done;
+		goto unlock;
 	}
 
 	vdev = wlan_objmgr_pdev_get_first_vdev(pdev, WLAN_CFR_ID);
 	if (qdf_unlikely(!vdev)) {
 		cfr_debug("vdev is null\n");
-		goto done;
+		goto unlock;
 	}
 
 	bss_chan = wlan_vdev_mlme_get_bss_chan(vdev);
@@ -714,13 +723,10 @@
 		meta->num_mu_users = CYP_CFR_MU_USERS;
 
 	for (i = 0; i < MAX_CHAIN; i++)
-		meta->chain_rssi[i] = cdp_rx_ppdu->per_chain_rssi[i];
+		meta->chain_rssi[i] =
+			cdp_rx_ppdu->per_chain_rssi[i] + CMN_NOISE_FLOOR;
 
-	if (cdp_rx_ppdu->u.ppdu_type == CDP_RX_TYPE_SU) {
-		qdf_mem_copy(meta->peer_addr.su_peer_addr,
-			     cdp_rx_ppdu->mac_addr,
-			     QDF_MAC_ADDR_SIZE);
-	} else {
+	if (cdp_rx_ppdu->u.ppdu_type != CDP_RX_TYPE_SU) {
 		for (i = 0 ; i < meta->num_mu_users; i++) {
 			rx_stats_peruser = &cdp_rx_ppdu->user[i];
 			qdf_mem_copy(meta->peer_addr.mu_peer_addr[i],
@@ -750,7 +756,10 @@
 	} else {
 		cfr_err("Correlation returned invalid status!!");
 	}
-done:
+
+unlock:
+	qdf_spin_unlock_bh(&pcfr->lut_lock);
+relref:
 	qdf_nbuf_free(nbuf);
 	wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
 }
@@ -856,9 +865,12 @@
 	length  = dma_hdr.length * 4;
 	length += dma_hdr.total_bytes; /* size of cfr data */
 
+	qdf_spin_lock_bh(&pcfr->lut_lock);
+
 	lut = get_lut_entry(pcfr, cookie);
 	if (!lut) {
 		cfr_err("lut is NULL");
+		qdf_spin_unlock_bh(&pcfr->lut_lock);
 		return true;
 	}
 
@@ -919,6 +931,7 @@
 		status = true;
 	}
 
+	qdf_spin_unlock_bh(&pcfr->lut_lock);
 	return status;
 }
 
@@ -1123,7 +1136,7 @@
 	if (!pcfr) {
 		cfr_err("pdev object for CFR is NULL");
 		retval = -EINVAL;
-		goto end;
+		goto relref;
 	}
 
 	if ((tx_evt_param.status & PEER_CFR_CAPTURE_EVT_PS_STATUS_MASK) == 1) {
@@ -1139,14 +1152,14 @@
 						  NULL, 0, &end_magic, 4);
 
 		retval = -EINVAL;
-		goto end;
+		goto relref;
 	}
 
 	if ((tx_evt_param.status & PEER_CFR_CAPTURE_EVT_STATUS_MASK) == 0) {
 		cfr_debug("CFR capture failed for peer : %s",
 			  ether_sprintf(&tx_evt_param.peer_mac_addr.bytes[0]));
 		retval = -EINVAL;
-		goto end;
+		goto relref;
 	}
 
 	if (tx_evt_param.status & CFR_TX_EVT_STATUS_MASK) {
@@ -1154,7 +1167,7 @@
 			  tx_evt_param.status & CFR_TX_EVT_STATUS_MASK,
 			  ether_sprintf(&tx_evt_param.peer_mac_addr.bytes[0]));
 		retval = -EINVAL;
-		goto end;
+		goto relref;
 	}
 
 	buf_addr_temp = (tx_evt_param.correlation_info_2 & 0x0f);
@@ -1166,7 +1179,7 @@
 		cfr_debug("Cookie lookup failure for addr: 0x%pK status: 0x%x",
 			  (void *)((uintptr_t)buf_addr), tx_evt_param.status);
 		retval = -EINVAL;
-		goto end;
+		goto relref;
 	}
 
 	cfr_debug("buffer address: 0x%pK cookie: %u",
@@ -1174,11 +1187,13 @@
 
 	dump_cfr_peer_tx_event_enh(&tx_evt_param, cookie);
 
+	qdf_spin_lock_bh(&pcfr->lut_lock);
+
 	lut = get_lut_entry(pcfr, cookie);
 	if (!lut) {
 		cfr_err("lut is NULL\n");
 		retval = -EINVAL;
-		goto end;
+		goto unlock;
 	}
 
 	pcfr->tx_evt_cnt++;
@@ -1252,10 +1267,11 @@
 	} else {
 		cfr_err("Correlation returned invalid status!!");
 		retval = -EINVAL;
-		goto end;
 	}
 
-end:
+unlock:
+	qdf_spin_unlock_bh(&pcfr->lut_lock);
+relref:
 
 	wlan_objmgr_psoc_release_ref(psoc, WLAN_CFR_ID);
 	wlan_objmgr_vdev_release_ref(vdev, WLAN_CFR_ID);
@@ -1366,6 +1382,8 @@
 
 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
 
+	qdf_spin_lock_bh(&pcfr->lut_lock);
+
 	for (i = 0; i < NUM_LUT_ENTRIES; i++) {
 		lut = get_lut_entry(pcfr, i);
 		if (!lut)
@@ -1387,6 +1405,8 @@
 		}
 	}
 
+	qdf_spin_unlock_bh(&pcfr->lut_lock);
+
 	if (pcfr->lut_timer_init)
 		qdf_timer_mod(&pcfr->lut_age_timer, LUT_AGE_TIMER);
 	wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
@@ -1563,6 +1583,8 @@
 		pcfr->lut_timer_init = 1;
 	}
 
+	qdf_spinlock_create(&pcfr->lut_lock);
+
 	return status;
 }
 
@@ -1605,6 +1627,7 @@
 	qdf_mem_zero(&pcfr->rcc_param, sizeof(struct cfr_rcc_param));
 	qdf_mem_zero(&pcfr->global, (sizeof(struct ta_ra_cfr_cfg) *
 				     MAX_TA_RA_ENTRIES));
+	pcfr->cfr_timer_enable = 0;
 
 #ifdef DIRECT_BUF_RX_ENABLE
 	status = target_if_unregister_to_dbr_enh(pdev);
@@ -1616,6 +1639,8 @@
 	if (status != QDF_STATUS_SUCCESS)
 		cfr_err("Failed to register with dbr");
 
+	qdf_spinlock_destroy(&pcfr->lut_lock);
+
 	return status;
 }
 
diff --git a/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h b/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h
index 329acd5..144af33 100644
--- a/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h
+++ b/umac/cfr/dispatcher/inc/wlan_cfr_utils_api.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -495,6 +495,7 @@
 	uint64_t cfr_dma_aborts;
 #endif
 	struct unassoc_pool_entry unassoc_pool[MAX_CFR_ENABLED_CLIENTS];
+	qdf_spinlock_t lut_lock;
 };
 
 #define PEER_CFR_CAPTURE_ENABLE   1
diff --git a/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c b/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c
index 4823665..f7638a9 100644
--- a/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c
+++ b/umac/cfr/dispatcher/src/wlan_cfr_ucfg_api.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -26,6 +26,21 @@
 #include "cdp_txrx_ctrl.h"
 #endif
 
+#ifdef WLAN_ENH_CFR_ENABLE
+static bool cfr_is_filter_enabled(struct cfr_rcc_param *rcc_param)
+{
+	if (rcc_param->m_directed_ftm ||
+	    rcc_param->m_all_ftm_ack ||
+	    rcc_param->m_ndpa_ndp_directed ||
+	    rcc_param->m_ndpa_ndp_all ||
+	    rcc_param->m_ta_ra_filter ||
+	    rcc_param->m_all_packet)
+		return true;
+	else
+		return false;
+}
+#endif
+
 int ucfg_cfr_start_capture(struct wlan_objmgr_pdev *pdev,
 			   struct wlan_objmgr_peer *peer,
 			   struct cfr_capture_params *params)
@@ -69,13 +84,21 @@
 
 	if (params->period) {
 		if (pa->cfr_current_sta_count == pa->cfr_max_sta_count) {
-			qdf_info("max periodic cfr clients reached\n");
+			cfr_err("max periodic cfr clients reached");
 			return -EINVAL;
 		}
 		if (!(pe->request))
 			pa->cfr_current_sta_count++;
 	}
 
+#ifdef WLAN_ENH_CFR_ENABLE
+	if (pa->is_cfr_rcc_capable && cfr_is_filter_enabled(&pa->rcc_param)) {
+		cfr_err("This is not allowed since RCC is enabled");
+		pa->cfr_timer_enable = 0;
+		return -EINVAL;
+	}
+#endif
+
 	status = tgt_cfr_start_capture(pdev, peer, params);
 
 	if (status == 0) {
@@ -458,7 +481,11 @@
 	if (status != QDF_STATUS_SUCCESS)
 		return status;
 
-	pcfr->rcc_param.capture_interval = params->cap_intvl;
+	if (pcfr->rcc_param.capture_duration > params->cap_intvl) {
+		cfr_err("Capture intval should be more than capture duration");
+		status = QDF_STATUS_E_INVAL;
+	} else
+		pcfr->rcc_param.capture_interval = params->cap_intvl;
 
 	wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
 
@@ -482,7 +509,12 @@
 	if (status != QDF_STATUS_SUCCESS)
 		return status;
 
-	pcfr->rcc_param.capture_duration = params->cap_dur;
+	if (pcfr->rcc_param.capture_interval
+	    && (params->cap_dur > pcfr->rcc_param.capture_interval)) {
+		cfr_err("Capture duration is exceeding capture interval");
+		status = QDF_STATUS_E_INVAL;
+	} else
+		pcfr->rcc_param.capture_duration = params->cap_dur;
 
 	wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
 
@@ -604,19 +636,6 @@
 	return status;
 }
 
-static bool cfr_is_filter_enabled(struct cfr_rcc_param *rcc_param)
-{
-	if (rcc_param->m_directed_ftm ||
-	    rcc_param->m_all_ftm_ack ||
-	    rcc_param->m_ndpa_ndp_directed ||
-	    rcc_param->m_ndpa_ndp_all ||
-	    rcc_param->m_ta_ra_filter ||
-	    rcc_param->m_all_packet)
-		return true;
-	else
-		return false;
-}
-
 QDF_STATUS ucfg_cfr_get_cfg(struct wlan_objmgr_vdev *vdev)
 {
 	struct pdev_cfr *pcfr = NULL;
@@ -629,7 +648,7 @@
 	if (status != QDF_STATUS_SUCCESS)
 		return status;
 	if (!cfr_is_filter_enabled(&pcfr->rcc_param)) {
-		cfr_err(" All RCC modes are disabled.\n");
+		cfr_err(" All RCC modes are disabled");
 		wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
 		return status;
 	}
@@ -934,6 +953,7 @@
 
 	if (!psoc) {
 		cfr_err("psoc is null!");
+		wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
 		return QDF_STATUS_E_NULL_VALUE;
 	}
 	/*
@@ -950,6 +970,12 @@
 	 */
 
 	if (cfr_is_filter_enabled(&pcfr->rcc_param)) {
+		if (pcfr->cfr_timer_enable) {
+			cfr_err("Not allowed: Periodic capture is enabled.\n");
+			wlan_objmgr_pdev_release_ref(pdev, WLAN_CFR_ID);
+			return QDF_STATUS_E_NOSUPPORT;
+		}
+
 		if (pcfr->rcc_param.m_all_ftm_ack) {
 			filter_val.mode |= MON_FILTER_PASS |
 					   MON_FILTER_OTHER;
diff --git a/umac/dfs/core/src/misc/dfs_zero_cac.c b/umac/dfs/core/src/misc/dfs_zero_cac.c
index 270523e..c42223c 100644
--- a/umac/dfs/core/src/misc/dfs_zero_cac.c
+++ b/umac/dfs/core/src/misc/dfs_zero_cac.c
@@ -488,8 +488,9 @@
  *				    Return true if CAC done, else false.
  * @dfs_precac_entry: Precac entry which has the root of the precac BSTree.
  * @chan_freq:        IEEE channel freq. This is the center of a
- *                    20/40/80 MHz channel and the center channel is unique
- *                    irrespective of the bandwidth(20/40/80 MHz).
+ *                    20/40/80/160/165 MHz channel and the center channel is
+ *                    unique irrespective of the bandwidth
+ *                    (20/40/80/160/165 MHz).
  */
 #ifdef CONFIG_CHAN_FREQ_API
 static bool
@@ -497,7 +498,7 @@
 				      uint16_t chan_freq)
 {
 	struct precac_tree_node *node = precac_entry->tree_root;
-	uint8_t n_cur_lvl_subchs = N_SUBCHANS_FOR_80BW;
+	uint8_t n_cur_lvl_subchs = N_SUBCHANS_FOR_160BW;
 
 	while (node) {
 		if (node->ch_freq == chan_freq)
@@ -517,6 +518,12 @@
 
 #ifdef CONFIG_CHAN_FREQ_API
 #define VHT80_FREQ_OFFSET 30
+/* For any 160MHz channel, a frequency offset of 70MHz would have been enough
+ * to include the right edge and left edge channels. But, the restricted 80P80
+ * or the 165MHz channel is also assumed to have a 160MHz root ie channel 146,
+ * so an offset of 75MHz is chosen.
+ */
+#define VHT160_FREQ_OFFSET 75
 #endif
 
 #define IS_WITHIN_RANGE(_A, _B, _C)  \
@@ -814,7 +821,12 @@
 	}
 
 	if (ch_freq) {
-		adfs_param.precac_chan_freq = ch_freq;
+		adfs_param.precac_center_freq_1 =
+			(ch_freq == RESTRICTED_80P80_CHAN_CENTER_FREQ) ?
+			(RESTRICTED_80P80_LEFT_80_CENTER_FREQ) : ch_freq;
+		adfs_param.precac_center_freq_2 =
+			(ch_freq == RESTRICTED_80P80_CHAN_CENTER_FREQ) ?
+			(RESTRICTED_80P80_RIGHT_80_CENTER_FREQ) : 0;
 		adfs_param.precac_chan = utils_dfs_freq_to_chan(ch_freq);
 		adfs_param.precac_chwidth = temp_dfs->dfs_precac_chwidth;
 		dfs_start_agile_precac_timer(temp_dfs,
@@ -1261,8 +1273,8 @@
 				   pe_list,
 				   tmp_precac_entry) {
 			if (IS_WITHIN_RANGE(channels[i],
-					    precac_entry->vht80_ch_freq,
-					    VHT80_FREQ_OFFSET)) {
+					    precac_entry->center_ch_freq,
+					    VHT160_FREQ_OFFSET)) {
 				dfs_mark_tree_node_as_cac_done_for_freq
 					(dfs, precac_entry, channels[i]);
 				break;
@@ -1675,7 +1687,7 @@
 		 * Set precac_state_started to false to indicate preCAC is not
 		 * running and also reset the current Agile channel.
 		 */
-		if (detector_id == AGILE_DETECTOR_ID) {
+		if (detector_id == dfs_get_agile_detector_id(dfs)) {
 			dfs_prepare_agile_precac_chan(dfs);
 		} else {
 			dfs->dfs_agile_precac_freq = 0;
@@ -1735,8 +1747,8 @@
 				   pe_list,
 				   tmp_precac_entry) {
 			if (IS_WITHIN_RANGE(freq_lst[i],
-					    precac_entry->vht80_ch_freq,
-					    VHT80_FREQ_OFFSET)) {
+					    precac_entry->center_ch_freq,
+					    VHT160_FREQ_OFFSET)) {
 				dfs_mark_tree_node_as_nol_for_freq(dfs,
 								   precac_entry,
 								   freq_lst[i]);
@@ -1809,7 +1821,7 @@
 		 * Set precac_state_started to false to indicate preCAC is not
 		 * running and also reset the current Agile channel.
 		 */
-		if (detector_id == AGILE_DETECTOR_ID) {
+		if (detector_id == dfs_get_agile_detector_id(dfs)) {
 			dfs_prepare_agile_precac_chan(dfs);
 		} else {
 			dfs->dfs_agile_precac_freq_mhz = 0;
@@ -1853,7 +1865,7 @@
 		 * TRIGGER agile precac timer with 0sec timeout
 		 * with ocac_status 0 for old pdev
 		 */
-		adfs_param.precac_chan_freq = center_freq_mhz;
+		adfs_param.precac_center_freq_1 = center_freq_mhz;
 		adfs_param.precac_chwidth = dfs->dfs_precac_chwidth;
 		dfs_start_agile_precac_timer(dfs,
 					     ocac_status,
@@ -2158,10 +2170,20 @@
 		     current_time / 1000);
 	    if (dfs_soc_obj->ocac_status == OCAC_SUCCESS) {
 		dfs_soc_obj->ocac_status = OCAC_RESET;
-		dfs_mark_precac_done_for_freq(dfs,
-					      dfs->dfs_agile_precac_freq_mhz,
-					      0,
-					      dfs->dfs_precac_chwidth);
+		if (dfs->dfs_agile_precac_freq_mhz ==
+		    RESTRICTED_80P80_CHAN_CENTER_FREQ) {
+			dfs_mark_precac_done_for_freq(
+				dfs,
+				RESTRICTED_80P80_LEFT_80_CENTER_FREQ,
+				RESTRICTED_80P80_RIGHT_80_CENTER_FREQ,
+				CH_WIDTH_80P80MHZ);
+		} else {
+			dfs_mark_precac_done_for_freq(
+				dfs,
+				dfs->dfs_agile_precac_freq_mhz,
+				0,
+				dfs->dfs_precac_chwidth);
+		}
 	    }
 	    /* check if CAC done on home channel */
 	    is_cac_done_on_des_chan = dfs_precac_check_home_chan_change(dfs);
@@ -2263,6 +2285,10 @@
 {
 	dfs->dfs_precac_timeout_override = -1;
 	PRECAC_LIST_LOCK_CREATE(dfs);
+	if (dfs_is_true_160mhz_supported(dfs))
+		dfs->dfs_agile_detector_id = AGILE_DETECTOR_ID_TRUE_160MHZ;
+	else
+		dfs->dfs_agile_detector_id = AGILE_DETECTOR_ID_80P80;
 }
 
 /* dfs_init_precac_tree_node() - Initialise the preCAC BSTree node with the
@@ -2291,12 +2317,15 @@
  * @node:      Precac_tree_node to be filled.
  * @freq:      IEEE channel freq value.
  * @bandwidth: Bandwidth of the channel.
+ * @depth:     Depth of the tree. The depth of the tree when the root is 160MHz
+ *             channel is 4, 80MHz is 3, 40MHz is 2 and 20MHz is 1.
  */
 #ifdef CONFIG_CHAN_FREQ_API
 static inline void
 dfs_init_precac_tree_node_for_freq(struct precac_tree_node *node,
 				   uint16_t freq,
-				   uint8_t bandwidth)
+				   uint8_t bandwidth,
+				   uint8_t depth)
 {
 	node->left_child = NULL;
 	node->right_child = NULL;
@@ -2306,6 +2335,8 @@
 	node->n_nol_subchs = 0;
 	node->n_valid_subchs = N_SUBCHS_FOR_BANDWIDTH(bandwidth);
 	node->bandwidth = bandwidth;
+	node->depth = depth;
+
 }
 #endif
 
@@ -2360,6 +2391,8 @@
  * @root:      The preCAC BSTree root pointer.
  * @chan:      IEEE freq of the new node.
  * @bandwidth: Bandwidth of the channel.
+ * @depth:     Depth of the tree. The depth of the tree when the root is 160MHz
+ *             channel is 4, 80MHz is 3, 40MHz is 2 and 20MHz is 1.
  *
  * Return: EOK if new node is allocated, else return ENOMEM.
  */
@@ -2367,7 +2400,8 @@
 static QDF_STATUS
 dfs_insert_node_into_bstree_for_freq(struct precac_tree_node **root,
 				     uint16_t chan_freq,
-				     uint8_t bandwidth)
+				     uint8_t bandwidth,
+				     uint8_t depth)
 {
 	struct precac_tree_node *new_node = NULL;
 	struct precac_tree_node *curr_node, *prev_node = NULL;
@@ -2376,7 +2410,10 @@
 	new_node = qdf_mem_malloc(sizeof(*new_node));
 	if (!new_node)
 		return -ENOMEM;
-	dfs_init_precac_tree_node_for_freq(new_node, chan_freq, bandwidth);
+	dfs_init_precac_tree_node_for_freq(new_node,
+					   chan_freq,
+					   bandwidth,
+					   depth);
 
 	/* If root node is null, assign the newly allocated node
 	 * to this node and return.
@@ -2435,13 +2472,13 @@
 	struct precac_tree_node *root = NULL;
 	int chan, i, bandwidth = DFS_CHWIDTH_80_VAL;
 	QDF_STATUS status = EOK;
-	static const int initial_and_next_offsets[TREE_DEPTH][N_OFFSETS] = {
+	static const int initial_and_next_offsets[TREE_DEPTH_MAX][N_OFFSETS] = {
 		{INITIAL_80_CHAN_OFFSET, NEXT_80_CHAN_OFFSET},
 		{INITIAL_40_CHAN_OFFSET, NEXT_40_CHAN_OFFSET},
 		{INITIAL_20_CHAN_OFFSET, NEXT_20_CHAN_OFFSET}
 	};
 
-	for (i = 0; i < TREE_DEPTH; i++) {
+	for (i = 0; i < TREE_DEPTH_MAX; i++) {
 		/* In offset array,
 		 * column 0 is initial chan offset,
 		 * column 1 is next chan offset.
@@ -2469,89 +2506,208 @@
 }
 #endif
 
-/* dfs_create_precac_tree_for_freq() - Fill precac entry tree (level insertion).
- * @dfs:     WLAN DFS structure
- * @ch_freq: root_node freq.
- */
 #ifdef CONFIG_CHAN_FREQ_API
-static QDF_STATUS
-dfs_create_precac_tree_for_freq(struct wlan_dfs *dfs,
-				struct dfs_precac_entry *precac_entry,
-				uint16_t ch_freq)
-{
-	struct precac_tree_node *root = NULL;
-	int chan_freq, i, bandwidth = DFS_CHWIDTH_80_VAL;
-	QDF_STATUS status = EOK;
-	static const int initial_and_next_offsets[TREE_DEPTH][N_OFFSETS] = {
+/**
+ * struct precac_tree_offset_for_different_bw - Bandwidth, tree depth and
+ * channel offsets information to build the precac tree.
+ * @bandwidth:                Bandwidth of the the root node.
+ * @tree_depth:               Tree depth of the precac tree.
+ * @initial_and_next_offsets: Offset to root node to find the initial and the
+ *                            next channels of the node.
+ */
+struct precac_tree_offset_for_different_bw {
+	int bandwidth;
+	int tree_depth;
+	int initial_and_next_offsets[TREE_DEPTH_MAX][N_OFFSETS];
+};
+
+static const
+struct precac_tree_offset_for_different_bw offset20 = {DFS_CHWIDTH_20_VAL,
+	TREE_DEPTH_20,
+	{
+		{0, NEXT_20_CHAN_FREQ_OFFSET}
+	}
+};
+
+static const
+struct precac_tree_offset_for_different_bw offset40 = {DFS_CHWIDTH_40_VAL,
+	TREE_DEPTH_40,
+	{
+		{0, NEXT_40_CHAN_FREQ_OFFSET},
+		{-10, NEXT_20_CHAN_FREQ_OFFSET}
+	}
+};
+
+static const
+struct precac_tree_offset_for_different_bw offset80 = {DFS_CHWIDTH_80_VAL,
+	TREE_DEPTH_80,
+	{
+		{0, NEXT_80_CHAN_FREQ_OFFSET},
+		{-20, NEXT_40_CHAN_FREQ_OFFSET},
+		{-30, NEXT_20_CHAN_FREQ_OFFSET}
+	}
+};
+
+static const
+struct precac_tree_offset_for_different_bw offset160 = {DFS_CHWIDTH_160_VAL,
+	TREE_DEPTH_160,
+	{
+		{INITIAL_160_CHAN_FREQ_OFFSET, NEXT_160_CHAN_FREQ_OFFSET},
 		{INITIAL_80_CHAN_FREQ_OFFSET, NEXT_80_CHAN_FREQ_OFFSET},
 		{INITIAL_40_CHAN_FREQ_OFFSET, NEXT_40_CHAN_FREQ_OFFSET},
 		{INITIAL_20_CHAN_FREQ_OFFSET, NEXT_20_CHAN_FREQ_OFFSET}
-	};
+	}
+};
 
-	for (i = 0; i < TREE_DEPTH; i++) {
+static const
+struct precac_tree_offset_for_different_bw default_offset = {0, 0};
+
+/* dfs_create_precac_tree_for_freq() - Fill precac entry tree (level insertion).
+ * @dfs:       WLAN DFS structure
+ * @ch_freq:   root_node freq.
+ * @root:      Pointer to the node that will be filled and inserted as tree
+ *             root.
+ * @bandwidth: Bandwidth value of the root.
+ */
+static QDF_STATUS
+dfs_create_precac_tree_for_freq(struct wlan_dfs *dfs,
+				uint16_t ch_freq,
+				struct precac_tree_node **root,
+				int bandwidth)
+{
+	int chan_freq, i;
+	QDF_STATUS status = EOK;
+	struct precac_tree_offset_for_different_bw current_mode;
+	uint8_t top_lvl_step;
+	bool is_node_part_of_165_tree = false;
+
+	if (ch_freq == RESTRICTED_80P80_LEFT_80_CENTER_FREQ ||
+	    ch_freq == RESTRICTED_80P80_RIGHT_80_CENTER_FREQ)
+		is_node_part_of_165_tree = true;
+
+	switch (bandwidth) {
+	case DFS_CHWIDTH_160_VAL:
+			current_mode = offset160;
+			break;
+	case DFS_CHWIDTH_80_VAL:
+			current_mode = offset80;
+			break;
+	case DFS_CHWIDTH_40_VAL:
+			current_mode = offset40;
+			break;
+	case DFS_CHWIDTH_20_VAL:
+			current_mode = offset20;
+			break;
+	default:
+			current_mode = default_offset;
+			break;
+	}
+	top_lvl_step = current_mode.initial_and_next_offsets[0][1];
+	for (i = 0; i < current_mode.tree_depth; i++) {
 		/* In offset array,
 		 * column 0 is initial chan offset,
 		 * column 1 is next chan offset.
 		 * Boundary offset is initial offset and next offset
 		 * of root level (since root level can have only 1 node)
 		 */
-		int offset = initial_and_next_offsets[i][START_INDEX];
-		int step = initial_and_next_offsets[i][STEP_INDEX];
-		uint8_t top_lvl_step = NEXT_80_CHAN_FREQ_OFFSET;
+		int offset =
+		    current_mode.initial_and_next_offsets[i][START_INDEX];
+		int step = current_mode.initial_and_next_offsets[i][STEP_INDEX];
 		int boundary_offset = offset + top_lvl_step;
+		uint8_t depth = is_node_part_of_165_tree ? i + 1 : i;
 
 		for (; offset < boundary_offset; offset += step) {
 			chan_freq = (int)ch_freq + offset;
 			status =
-			    dfs_insert_node_into_bstree_for_freq(&root,
+			    dfs_insert_node_into_bstree_for_freq(root,
 								 chan_freq,
-								 bandwidth);
+								 bandwidth,
+								 depth);
 			if (status)
 				return status;
 		}
 		bandwidth /= 2;
 	}
 
-	precac_entry->tree_root = root;
 	return status;
 }
 #endif
 
-/*
- * dfs_init_precac_list() - Initialize preCAC lists.
- * @dfs: Pointer to wlan_dfs.
- */
 #ifdef CONFIG_CHAN_FREQ_API
-void dfs_init_precac_list(struct wlan_dfs *dfs)
+/**
+ * struct dfs_channel_bw - Structure to store the information about precac
+ * root's primary channel frequency, maximum bandwidth and the center frequency.
+ *
+ * @dfs_pri_ch_freq:     Primary channel frequency of the root channel.
+ * @dfs_center_ch_freq:  Center frequency of the 20/40/80/160 channel.In case of
+ *                       the 165MHz channel, it is 5730MHz.
+ * @dfs_max_bw:          Maximum bandwidth of the channel available in the
+ *                       current channel list.
+ */
+struct dfs_channel_bw {
+	uint16_t dfs_pri_ch_freq;
+	uint16_t dfs_center_ch_freq;
+	uint16_t dfs_max_bw;
+};
+
+static void
+dfs_calculate_bw_for_same_pri_ch(struct wlan_dfs *dfs,
+				 struct dfs_channel_bw *dfs_max_bw_info,
+				 int index,
+				 struct dfs_channel *ichan,
+				 int *delimiter)
 {
-	u_int i;
-	uint8_t found;
-	struct dfs_precac_entry *tmp_precac_entry;
-	int nchans = 0;
-	QDF_STATUS status;
+	uint8_t temp_bw = 0;
 
-	/* Right now, only ETSI domain supports preCAC. Check if current
-	 * DFS domain is ETSI and only then build the preCAC list.
-	 */
-	if (utils_get_dfsdomain(dfs->dfs_pdev_obj) != DFS_ETSI_DOMAIN)
-		return;
+	dfs_max_bw_info[index].dfs_pri_ch_freq = ichan->dfs_ch_freq;
+	dfs_max_bw_info[index].dfs_center_ch_freq = ichan->dfs_ch_mhz_freq_seg1;
 
-	/*
-	 * We need to prepare list of uniq VHT80 center frequencies. But at the
-	 * beginning we do not know how many uniq frequencies are present.
-	 * Therefore, we calculate the MAX size and allocate a temporary
-	 * list/array. However we fill the temporary array with uniq frequencies
-	 * and copy the uniq list of frequencies to the final list with exact
-	 * size.
-	 */
-	TAILQ_INIT(&dfs->dfs_precac_list);
+	if (WLAN_IS_CHAN_MODE_20(ichan)) {
+		temp_bw = DFS_CHWIDTH_20_VAL;
+	} else if (WLAN_IS_CHAN_MODE_40(ichan)) {
+		temp_bw = DFS_CHWIDTH_40_VAL;
+	} else if (WLAN_IS_CHAN_MODE_80(ichan) ||
+		   WLAN_IS_CHAN_MODE_80_80(ichan)) {
+		temp_bw = DFS_CHWIDTH_80_VAL;
+	if (dfs_is_restricted_80p80mhz_supported(dfs) &&
+	    WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(ichan) &&
+	    (ichan->dfs_ch_vhtop_ch_freq_seg1 ==
+	     RESTRICTED_80P80_LEFT_80_CENTER_CHAN) &&
+	    (ichan->dfs_ch_vhtop_ch_freq_seg2 ==
+	     RESTRICTED_80P80_RIGHT_80_CENTER_CHAN)) {
+		temp_bw = DFS_CHWIDTH_165_VAL;
+		dfs_max_bw_info[index].dfs_center_ch_freq =
+			RESTRICTED_80P80_CHAN_CENTER_FREQ;
+		}
+	} else if (WLAN_IS_CHAN_MODE_160(ichan)) {
+		temp_bw = DFS_CHWIDTH_160_VAL;
+		dfs_max_bw_info[index].dfs_center_ch_freq =
+			ichan->dfs_ch_mhz_freq_seg2;
+	}
+	if (temp_bw > dfs_max_bw_info[index].dfs_max_bw)
+		dfs_max_bw_info[index].dfs_max_bw = temp_bw;
+	*delimiter = dfs_max_bw_info[index].dfs_pri_ch_freq +
+	dfs_max_bw_info[index].dfs_max_bw;
+}
+
+/* dfs_fill_max_bw_for_chan() - Finds unique precac tree node in the channel
+ * list and stores the primary channel frequency, maximum bandwidth and the
+ * center frequency. The algorithm is based on the data structure ic_channels
+ * where the channels are organized as 36HT20, 36HT40, 36HT80,... and so on..
+ * @dfs:               WLAN DFS structure
+ * @dfs_max_bw_info:   Structure to store precac tree root channel's
+ * information.
+ * @num_precac_roots:  Number of unique.
+ */
+static void dfs_fill_max_bw_for_chan(struct wlan_dfs *dfs,
+				     struct dfs_channel_bw *dfs_max_bw_info,
+				     int *num_precac_roots)
+{
+	int nchans = 0, i, j = 0, prev_ch_freq = 0, delimiter = 0;
+
 	dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans);
-
-	PRECAC_LIST_LOCK(dfs);
-	/* Fill the  precac_list with unique elements */
 	for (i = 0; i < nchans; i++) {
 		struct dfs_channel *ichan = NULL, lc;
-		uint16_t pri_chan_cfreq = 0;
 
 		ichan = &lc;
 		dfs_mlme_get_dfs_channels_for_freq
@@ -2565,60 +2721,242 @@
 			 &ichan->dfs_ch_mhz_freq_seg1,
 			 &ichan->dfs_ch_mhz_freq_seg2,
 			 i);
-		pri_chan_cfreq = ichan->dfs_ch_mhz_freq_seg1;
+		if (!WLAN_IS_PRIMARY_OR_SECONDARY_CHAN_DFS(ichan))
+			continue;
+		if (ichan->dfs_ch_freq == prev_ch_freq) {
+			dfs_calculate_bw_for_same_pri_ch(dfs,
+							 dfs_max_bw_info,
+							 j,
+							 ichan,
+							 &delimiter);
+		} else if (ichan->dfs_ch_freq < delimiter) {
+			continue;
+		} else {
+			prev_ch_freq = ichan->dfs_ch_freq;
+			j++;
+		}
+	}
+	*num_precac_roots = j + 1;
+}
 
-		if (WLAN_IS_CHAN_11AC_VHT80(ichan) &&
-		    WLAN_IS_CHAN_DFS(ichan)) {
-			found = 0;
-			TAILQ_FOREACH(tmp_precac_entry,
-				      &dfs->dfs_precac_list,
-				      pe_list) {
-				if (tmp_precac_entry->vht80_ch_freq ==
-				    pri_chan_cfreq) {
-					found = 1;
-					break;
-				}
+static QDF_STATUS
+dfs_precac_create_precac_entry(struct wlan_dfs *dfs,
+			       struct dfs_precac_entry *precac_entry,
+			       struct dfs_channel_bw *dfs_max_bw_info,
+			       int index)
+{
+	QDF_STATUS status;
+	uint16_t precac_center_freq =
+	    dfs_max_bw_info[index].dfs_center_ch_freq;
+
+	precac_entry->center_ch_freq = precac_center_freq;
+	precac_entry->center_ch_ieee =
+	utils_dfs_freq_to_chan(precac_center_freq);
+	precac_entry->bw = dfs_max_bw_info[index].dfs_max_bw;
+	precac_entry->dfs = dfs;
+	status =
+	    dfs_create_precac_tree_for_freq(dfs,
+					    precac_entry->center_ch_freq,
+					    &precac_entry->tree_root,
+					    precac_entry->bw);
+	if (status) {
+		dfs_debug(dfs, WLAN_DEBUG_DFS,
+			  "PreCAC entry for channel %d not created",
+			  precac_entry->center_ch_ieee);
+	} else {
+	    TAILQ_INSERT_TAIL(
+		    &dfs->dfs_precac_list,
+		    precac_entry, pe_list);
+	}
+	return status;
+}
+
+static QDF_STATUS
+dfs_precac_create_165mhz_precac_entry(struct wlan_dfs *dfs,
+				      struct dfs_precac_entry *precac_entry)
+{
+	QDF_STATUS status;
+
+	precac_entry->center_ch_freq =
+		RESTRICTED_80P80_CHAN_CENTER_FREQ;
+	precac_entry->center_ch_ieee =
+		utils_dfs_freq_to_chan(precac_entry->center_ch_freq);
+	precac_entry->bw = DFS_CHWIDTH_160_VAL;
+	precac_entry->dfs = dfs;
+	dfs_insert_node_into_bstree_for_freq(&precac_entry->tree_root,
+					     RESTRICTED_80P80_CHAN_CENTER_FREQ,
+					     DFS_CHWIDTH_160_VAL,
+					     DEPTH_160_ROOT);
+	status =
+		dfs_create_precac_tree_for_freq
+		(dfs,
+					RESTRICTED_80P80_LEFT_80_CENTER_FREQ,
+					&precac_entry->tree_root->left_child,
+					DFS_CHWIDTH_80_VAL);
+	if (!status)
+		status =
+		    dfs_create_precac_tree_for_freq(
+			    dfs,
+			    RESTRICTED_80P80_RIGHT_80_CENTER_FREQ,
+			    &precac_entry->tree_root->right_child,
+			    DFS_CHWIDTH_80_VAL);
+	TAILQ_INSERT_TAIL(
+			&dfs->dfs_precac_list,
+			precac_entry, pe_list);
+	return status;
+}
+
+static void
+dfs_mark_non_dfs_as_precac_done(struct wlan_dfs *dfs,
+				uint16_t dfs_pri_ch_freq,
+				enum wlan_phymode mode)
+{
+	struct dfs_channel *ichan, lc;
+
+	ichan = &lc;
+	dfs_mlme_find_dot11_chan_for_freq(dfs->dfs_pdev_obj,
+					  dfs_pri_ch_freq,
+					  0,
+					  mode,
+					  &ichan->dfs_ch_freq,
+					  &ichan->dfs_ch_flags,
+					  &ichan->dfs_ch_flagext,
+					  &ichan->dfs_ch_ieee,
+					  &ichan->dfs_ch_vhtop_ch_freq_seg1,
+					  &ichan->dfs_ch_vhtop_ch_freq_seg2,
+					  &ichan->dfs_ch_mhz_freq_seg1,
+					  &ichan->dfs_ch_mhz_freq_seg2);
+	if (!WLAN_IS_CHAN_DFS(ichan)) {
+		PRECAC_LIST_UNLOCK(dfs);
+		dfs_mark_precac_done_for_freq(dfs,
+					      ichan->dfs_ch_mhz_freq_seg1,
+					      0,
+					      CH_WIDTH_80MHZ);
+		PRECAC_LIST_LOCK(dfs);
+	} else if (!WLAN_IS_CHAN_DFS_CFREQ2(ichan)) {
+		PRECAC_LIST_UNLOCK(dfs);
+		dfs_mark_precac_done_for_freq(dfs,
+					      ichan->dfs_ch_mhz_freq_seg2,
+					      0,
+					      CH_WIDTH_80MHZ);
+		PRECAC_LIST_LOCK(dfs);
+	}
+}
+
+/*
+ * dfs_init_precac_list() - Initialize preCAC lists.
+ * @dfs: Pointer to wlan_dfs.
+ */
+void dfs_init_precac_list(struct wlan_dfs *dfs)
+{
+	u_int i;
+	uint8_t found;
+	struct dfs_precac_entry *tmp_precac_entry;
+	int nchans = 0;
+	QDF_STATUS status;
+	struct dfs_channel_bw *dfs_max_bw_info;
+	int num_precac_roots;
+
+	/* Right now, only ETSI domain supports preCAC. Check if current
+	 * DFS domain is ETSI and only then build the preCAC list.
+	 */
+	if (utils_get_dfsdomain(dfs->dfs_pdev_obj) != DFS_ETSI_DOMAIN)
+		return;
+
+	/*
+	 * We need to prepare list of uniquee center frequencies of maximum
+	 * possible bandwidths. But at the beginning we do not know how many
+	 * unique frequencies are present. Therefore, we calculate the MAX size
+	 * and allocate a temporary list/array. However we fill the temporary
+	 * array with unique frequencies and copy the unique list of frequencies
+	 * to the final list with exact size.
+	 */
+	dfs_mlme_get_dfs_ch_nchans(dfs->dfs_pdev_obj, &nchans);
+	dfs_max_bw_info = qdf_mem_malloc(nchans *
+		sizeof(struct dfs_channel_bw));
+	if (!dfs_max_bw_info) {
+		dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS,
+			"memory allocation failed");
+		return;
+	}
+	dfs_fill_max_bw_for_chan(dfs, dfs_max_bw_info, &num_precac_roots);
+
+	TAILQ_INIT(&dfs->dfs_precac_list);
+
+	PRECAC_LIST_LOCK(dfs);
+	for (i = 0; i < num_precac_roots; i++) {
+		uint16_t pri_chan_cfreq = dfs_max_bw_info[i].dfs_center_ch_freq;
+
+		found = 0;
+		TAILQ_FOREACH(tmp_precac_entry,
+			      &dfs->dfs_precac_list,
+			      pe_list) {
+			if (tmp_precac_entry->center_ch_freq ==
+					pri_chan_cfreq) {
+				found = 1;
+				break;
 			}
-			if (!found && pri_chan_cfreq) {
-				struct dfs_precac_entry *precac_entry;
+		}
+		if (!found && pri_chan_cfreq) {
+			struct dfs_precac_entry *precac_entry;
 
-				precac_entry =
-					qdf_mem_malloc(sizeof(*precac_entry));
-				if (!precac_entry) {
-					dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS,
-						"entry alloc fail for : %d", i);
-					continue;
-				}
-				precac_entry->vht80_ch_freq =
-					pri_chan_cfreq;
-				precac_entry->vht80_ch_ieee =
-					utils_dfs_freq_to_chan(pri_chan_cfreq);
-				precac_entry->dfs = dfs;
-				status =
-					dfs_create_precac_tree_for_freq
-					(dfs, precac_entry, pri_chan_cfreq);
+			precac_entry =
+				qdf_mem_malloc(sizeof(*precac_entry));
+			if (!precac_entry) {
+				dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS,
+					"entry alloc fail for : %d", i);
+				continue;
+			}
+			if (dfs_max_bw_info[i].dfs_max_bw ==
+				DFS_CHWIDTH_165_VAL) {
+				status = dfs_precac_create_165mhz_precac_entry(
+						dfs,
+						precac_entry);
 				if (status) {
-					dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS,
-						"tree_node alloc failed");
+					dfs_debug(dfs,
+						  WLAN_DEBUG_DFS,
+						  "PreCAC entry for channel 146 not created");
 					continue;
 				}
-				TAILQ_INSERT_TAIL(
-						  &dfs->dfs_precac_list,
-						  precac_entry, pe_list);
+			    /* The restricted 80p80 or the 165MHz channel might
+			     * have a non DFS part with center frequency 5775.
+			     * Mark the non DFS portion as precac done.
+			     */
+			    dfs_mark_non_dfs_as_precac_done(
+				    dfs,
+				    dfs_max_bw_info[i].dfs_pri_ch_freq,
+				    WLAN_PHYMODE_11AC_VHT80_80);
+			} else {
+			    status =
+				dfs_precac_create_precac_entry(dfs,
+							       precac_entry,
+							       dfs_max_bw_info,
+							       i);
+			if (status)
+				continue;
+			/* Some channels like 36HT160 might have a non DFS
+			 * part. Mark the non DFS portion as precac done.
+			 */
+			dfs_mark_non_dfs_as_precac_done(
+				dfs,
+				dfs_max_bw_info[i].dfs_pri_ch_freq,
+				WLAN_PHYMODE_11AC_VHT160);
 			}
 		}
 	}
 	PRECAC_LIST_UNLOCK(dfs);
+	qdf_mem_free(dfs_max_bw_info);
 
 	dfs_debug(dfs, WLAN_DEBUG_DFS,
-		  "Print the list of VHT80 frequencies from linked list");
+		  "Print the list of PreCAC ieee chan from linked list");
 	TAILQ_FOREACH(tmp_precac_entry,
 		      &dfs->dfs_precac_list,
 		      pe_list) {
-	    uint8_t ch_ieee;
+	    uint8_t ch_ieee, bw;
 
-	    ch_ieee = utils_dfs_freq_to_chan(tmp_precac_entry->vht80_ch_freq);
-	    dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "ieee=%u", ch_ieee);
+	    ch_ieee = utils_dfs_freq_to_chan(tmp_precac_entry->center_ch_freq);
+	    bw = tmp_precac_entry->bw;
+	    dfs_info(dfs, WLAN_DEBUG_DFS_ALWAYS, "ieee=%u bw=%u", ch_ieee, bw);
 	}
 }
 #else
@@ -3388,7 +3726,7 @@
 				  uint8_t ocac_status,
 				  struct dfs_agile_cac_params *adfs_param)
 {
-	uint16_t pcacfreq = adfs_param->precac_chan_freq;
+	uint16_t pcacfreq = adfs_param->precac_center_freq_1;
 	enum phy_ch_width chwidth = adfs_param->precac_chwidth;
 	uint32_t min_precac_timeout, max_precac_timeout;
 	struct dfs_soc_priv_obj *dfs_soc_obj;
@@ -3659,7 +3997,7 @@
  * based on the level (and by our logic, bandwidth) of the current node.
  *
  */
-#define MAX_PREFIX_CHAR 20
+#define MAX_PREFIX_CHAR 28
 /*Retaining IEEE to print node data */
 static void dfs_print_node_data(struct wlan_dfs *dfs,
 				struct precac_tree_node *node)
@@ -3668,17 +4006,25 @@
 	char prev_line_prefix[MAX_PREFIX_CHAR] = "";
 	char inv[4] = "inv";
 
-	switch (node->bandwidth) {
-	case DFS_CHWIDTH_80_VAL:
+	switch (node->depth) {
+	case DEPTH_160_ROOT:
 		break;
-	case DFS_CHWIDTH_40_VAL:
+	case DEPTH_80_ROOT:
 		qdf_str_lcopy(prev_line_prefix, "|", MAX_PREFIX_CHAR);
 		qdf_str_lcopy(prefix, "|------- ", MAX_PREFIX_CHAR);
 		break;
-	case DFS_CHWIDTH_20_VAL:
+	case DEPTH_40_ROOT:
 		qdf_str_lcopy(prev_line_prefix, "|        |", MAX_PREFIX_CHAR);
 		qdf_str_lcopy(prefix, "|        |------- ", MAX_PREFIX_CHAR);
 		break;
+	case DEPTH_20_ROOT:
+		qdf_str_lcopy(prev_line_prefix,
+			      "|        |        |",
+			      MAX_PREFIX_CHAR);
+		qdf_str_lcopy(prefix,
+			      "|        |        |------- ",
+			      MAX_PREFIX_CHAR);
+		break;
 	default:
 		return;
 	}
@@ -4239,8 +4585,12 @@
 	case CH_WIDTH_40MHZ:
 		return CH_WIDTH_40MHZ;
 	case CH_WIDTH_80MHZ:
+		return CH_WIDTH_80MHZ;
 	case CH_WIDTH_80P80MHZ:
 	case CH_WIDTH_160MHZ:
+		if (dfs_is_true_160mhz_supported(dfs) ||
+		    dfs_is_restricted_80p80mhz_supported(dfs))
+			return CH_WIDTH_160MHZ;
 		return CH_WIDTH_80MHZ;
 	default:
 		dfs_err(dfs, WLAN_DEBUG_DFS_ALWAYS, "Invalid chwidth enum!");
@@ -4356,6 +4706,13 @@
 	else
 		dfs->dfs_agile_precac_freq_mhz = 0;
 
+	/* It was assumed that the bandwidth of the restricted 80p80 channel is
+	 * 160MHz to build the precac tree. But when configuring Agile the
+	 * channel width should be given as 80p80.
+	 */
+	if (ieee_chan_freq == RESTRICTED_80P80_CHAN_CENTER_FREQ)
+		dfs->dfs_precac_chwidth = CH_WIDTH_80P80MHZ;
+
 	*ch_freq = dfs->dfs_agile_precac_freq_mhz;
 }
 #endif
@@ -4697,7 +5054,7 @@
 		 * and ocac_status as 0
 		 */
 		adfs_param.precac_chan = 0;
-		adfs_param.precac_chan_freq = 0;
+		adfs_param.precac_center_freq_1 = 0;
 		adfs_param.precac_chwidth = CH_WIDTH_INVALID;
 		qdf_info("%s : %d Initiated agile precac",
 			 __func__, __LINE__);
diff --git a/wmi/inc/wmi_unified_ap_api.h b/wmi/inc/wmi_unified_ap_api.h
index 8f91a9f..a39dd7e 100644
--- a/wmi/inc/wmi_unified_ap_api.h
+++ b/wmi/inc/wmi_unified_ap_api.h
@@ -625,7 +625,7 @@
  */
 QDF_STATUS wmi_extract_vdev_start_resp(
 		wmi_unified_t wmi_handle, void *evt_buf,
-		wmi_host_vdev_start_resp *vdev_rsp);
+		struct vdev_start_response *vdev_rsp);
 
 /**
  * wmi_extract_vdev_delete_resp - api to extract vdev delete
@@ -766,4 +766,16 @@
 		uint8_t peer_addr[QDF_MAC_ADDR_SIZE],
 		struct peer_vlan_config_param *param);
 
+/**
+ * wmi_extract_muedca_params_handler() - WMI function to extract Muedca params
+ *
+ * @wmi_handle: WMI handle
+ * @evt_buf: Event data buffer
+ * @muedca_param_list: struct muedca_params
+ *
+ * Return: QDF_STATUS_SUCCESS if success, else returns proper error code.
+ */
+QDF_STATUS wmi_extract_muedca_params_handler(wmi_unified_t wmi_handle,
+		void *evt_buf, struct muedca_params *muedca_param_list);
+
 #endif /* _WMI_UNIFIED_AP_API_H_ */
diff --git a/wmi/src/wmi_unified_ap_api.c b/wmi/src/wmi_unified_ap_api.c
index 7dc68ea..d5f7b6a 100644
--- a/wmi/src/wmi_unified_ap_api.c
+++ b/wmi/src/wmi_unified_ap_api.c
@@ -605,3 +605,15 @@
 
 	return QDF_STATUS_E_FAILURE;
 }
+
+QDF_STATUS wmi_extract_muedca_params_handler(
+		wmi_unified_t wmi_handle,
+		void *evt_buf,
+		struct muedca_params *muedca_param_list)
+{
+	if (wmi_handle->ops->extract_muedca_params_handler)
+		return wmi_handle->ops->extract_muedca_params_handler(
+					wmi_handle, evt_buf, muedca_param_list);
+
+	return QDF_STATUS_E_FAILURE;
+}
diff --git a/wmi/src/wmi_unified_ap_tlv.c b/wmi/src/wmi_unified_ap_tlv.c
index cd60c68..66cc624 100644
--- a/wmi/src/wmi_unified_ap_tlv.c
+++ b/wmi/src/wmi_unified_ap_tlv.c
@@ -2534,7 +2534,10 @@
 		return QDF_STATUS_E_INVAL;
 	}
 
-	param->pdev_id = ev->pdev_id;
+
+	param->pdev_id = wmi_hdl->ops->convert_target_pdev_id_to_host(
+								wmi_hdl,
+								ev->pdev_id);
 	param->status = ev->status;
 
 	if (!param_buf->num_vdev_ids_bitmap)
diff --git a/wmi/src/wmi_unified_non_tlv.c b/wmi/src/wmi_unified_non_tlv.c
index 167a696..78bdf90 100644
--- a/wmi/src/wmi_unified_non_tlv.c
+++ b/wmi/src/wmi_unified_non_tlv.c
@@ -7439,9 +7439,10 @@
  *
  * Return: 0 for success or error code
  */
-static QDF_STATUS extract_vdev_start_resp_non_tlv(wmi_unified_t wmi_handle,
-		void *evt_buf,
-		wmi_host_vdev_start_resp *vdev_rsp)
+static QDF_STATUS extract_vdev_start_resp_non_tlv(
+					wmi_unified_t wmi_handle,
+					void *evt_buf,
+					struct vdev_start_response *vdev_rsp)
 {
 	wmi_vdev_start_response_event *ev =
 	    (wmi_vdev_start_response_event *) evt_buf;