Merge "qca-wifi: Add WMI support for dynamic Muedca"
diff --git a/configs/wlan_cfg/ap_wlan_cfg.config b/configs/wlan_cfg/ap_wlan_cfg.config
index e7cddc7..5f75ae4 100644
--- a/configs/wlan_cfg/ap_wlan_cfg.config
+++ b/configs/wlan_cfg/ap_wlan_cfg.config
@@ -34,3 +34,6 @@
EXTRA_CFLAGS += -DRX_DATA_BUFFER_SIZE=1792
EXTRA_CFLAGS += -DRX_DATA_BUFFER_ALIGNMENT=0
endif
+
+EXTRA_CFLAGS += -DWLAN_PSOC_MAX_VDEVS=60
+EXTRA_CFLAGS += -DWLAN_PDEV_MAX_VDEVS=30
diff --git a/dp/inc/cdp_txrx_extd_struct.h b/dp/inc/cdp_txrx_extd_struct.h
index 43d5945..31b835a 100644
--- a/dp/inc/cdp_txrx_extd_struct.h
+++ b/dp/inc/cdp_txrx_extd_struct.h
@@ -85,6 +85,7 @@
uint32_t length;
uint8_t per_chain_rssi[CDP_MAX_RX_CHAINS];
uint8_t channel;
+ qdf_freq_t chan_freq;
};
#ifdef __KERNEL__
diff --git a/dp/wifi3.0/dp_rx_mon_feature.c b/dp/wifi3.0/dp_rx_mon_feature.c
index 4a4c66b..b8a0960 100644
--- a/dp/wifi3.0/dp_rx_mon_feature.c
+++ b/dp/wifi3.0/dp_rx_mon_feature.c
@@ -81,6 +81,7 @@
cdp_mpdu_info->ppdu_id = ppdu_info->com_info.ppdu_id;
cdp_mpdu_info->channel = ppdu_info->rx_status.chan_num;
+ cdp_mpdu_info->chan_freq = ppdu_info->rx_status.chan_freq;
cdp_mpdu_info->duration = ppdu_info->rx_status.duration;
cdp_mpdu_info->timestamp = ppdu_info->rx_status.tsft;
cdp_mpdu_info->bw = ppdu_info->rx_status.bw;
diff --git a/dp/wifi3.0/dp_tx_capture.c b/dp/wifi3.0/dp_tx_capture.c
index e74700d..bd02318 100644
--- a/dp/wifi3.0/dp_tx_capture.c
+++ b/dp/wifi3.0/dp_tx_capture.c
@@ -22,7 +22,6 @@
#include "dp_peer.h"
#include "dp_types.h"
#include "dp_internal.h"
-#include "dp_rx_mon.h"
#include "htt_ppdu_stats.h"
#include "dp_htt.h"
#include "qdf_mem.h" /* qdf_mem_malloc,free */
@@ -42,6 +41,9 @@
#define DP_NUM_BYTES_PER_PPDU_BITMAP_64 (DP_MAX_MPDU_64 >> 3)
#define DP_NUM_BYTES_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 3)
#define DP_IEEE80211_BAR_CTL_TID_S 12
+#define DP_IEEE80211_BAR_CTL_TID_M 0xf
+#define DP_IEEE80211_BAR_CTL_POLICY_S 0
+#define DP_IEEE80211_BAR_CTL_POLICY_M 0x1
#define DP_IEEE80211_BA_S_SEQ_S 4
#define DP_IEEE80211_BAR_CTL_COMBA 0x0004
@@ -100,6 +102,46 @@
#ifdef WLAN_TX_PKT_CAPTURE_ENH
+/**
+ * dp_tx_cap_peer_find_by_id() - Returns peer object given the peer id
+ * if delete_in_progress in not set for peer
+ *
+ * @soc : core DP soc context
+ * @peer_id : peer id from peer object can be retrieved
+ *
+ * Return: struct dp_peer*: Pointer to DP peer object
+ */
+static inline
+struct dp_peer *dp_tx_cap_peer_find_by_id(struct dp_soc *soc,
+ uint16_t peer_id)
+{
+ struct dp_peer *peer;
+
+ qdf_spin_lock_bh(&soc->peer_ref_mutex);
+ peer = __dp_peer_find_by_id(soc, peer_id);
+ if (!peer || (peer && peer->delete_in_progress)) {
+ qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+ return NULL;
+ }
+
+ qdf_atomic_inc(&peer->ref_cnt);
+ qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+
+ return peer;
+}
+
+/**
+ * dp_tx_cap_peer_unref_del() - dec ref and del peer if ref count is
+ * taken by dp_tx_cap_peer_find_by_id
+ * @peer: peer context
+ *
+ * Return: none
+ */
+static inline void dp_tx_cap_peer_unref_del(struct dp_peer *peer)
+{
+ dp_peer_unref_delete(peer);
+}
+
/*
* dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type
* pdev: DP pdev handle
@@ -680,6 +722,8 @@
qdf_mem_free(pdev->tx_capture.ptr_peer_mgmt_list);
}
+#define MAX_MSDU_THRESHOLD_TSF 100000
+#define MAX_MSDU_ENQUEUE_THRESHOLD 10000
/**
* dp_update_msdu_to_list(): Function to queue msdu from wbm
* @pdev: dp_pdev
@@ -698,6 +742,11 @@
{
struct dp_tx_tid *tx_tid;
struct msdu_completion_info *msdu_comp_info;
+ struct msdu_completion_info *ptr_msdu_info = NULL;
+ qdf_nbuf_t nbuf;
+ qdf_nbuf_t head_msdu;
+ uint32_t tsf_delta;
+ uint32_t qlen;
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
@@ -754,6 +803,30 @@
/* lock here */
qdf_spin_lock_bh(&tx_tid->tid_lock);
+ while ((head_msdu = qdf_nbuf_queue_first(&tx_tid->msdu_comp_q))) {
+ ptr_msdu_info =
+ (struct msdu_completion_info *)qdf_nbuf_data(head_msdu);
+
+ if (ts->tsf > ptr_msdu_info->tsf)
+ tsf_delta = ts->tsf - ptr_msdu_info->tsf;
+ else
+ tsf_delta = ptr_msdu_info->tsf - ts->tsf;
+
+ if (tsf_delta < MAX_MSDU_THRESHOLD_TSF)
+ break;
+
+ /* free head */
+ nbuf = qdf_nbuf_queue_remove(&tx_tid->msdu_comp_q);
+ qdf_nbuf_free(nbuf);
+ }
+
+ /* get queue length */
+ qlen = qdf_nbuf_queue_len(&tx_tid->msdu_comp_q);
+ if (qlen > MAX_MSDU_ENQUEUE_THRESHOLD) {
+ /* free head */
+ nbuf = qdf_nbuf_queue_remove(&tx_tid->msdu_comp_q);
+ qdf_nbuf_free(nbuf);
+ }
/* add nbuf to tail queue per peer tid */
qdf_nbuf_queue_add(&tx_tid->msdu_comp_q, netbuf);
@@ -784,8 +857,13 @@
dp_peer_or_pdev_tx_cap_enabled(pdev, peer, peer->mac_addr.raw) &&
((ts->status == HAL_TX_TQM_RR_FRAME_ACKED) ||
(ts->status == HAL_TX_TQM_RR_REM_CMD_TX) ||
- ((ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) && ts->transmit_cnt)))
+ ((ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) && ts->transmit_cnt))) {
+ /* skip enqueuing OFDMA frames */
+ if (ts->ofdma)
+ return ret;
+
ret = dp_update_msdu_to_list(soc, pdev, peer, ts, desc->nbuf);
+ }
return ret;
}
@@ -1452,12 +1530,14 @@
if (qdf_unlikely(!tx_tid))
return 0;
- if (qdf_nbuf_is_queue_empty(&tx_tid->msdu_comp_q))
- return 0;
-
/* lock here */
qdf_spin_lock_bh(&tx_tid->tid_lock);
+ if (qdf_nbuf_is_queue_empty(&tx_tid->msdu_comp_q)) {
+ qdf_spin_unlock_bh(&tx_tid->tid_lock);
+ return 0;
+ }
+
curr_msdu = qdf_nbuf_queue_first(&tx_tid->msdu_comp_q);
while (curr_msdu) {
@@ -1617,7 +1697,7 @@
}
}
- return qdf_nbuf_copy_expand(mpdu, MAX_MONITOR_HEADER, 0);
+ return qdf_nbuf_copy_expand_fraglist(mpdu, MAX_MONITOR_HEADER, 0);
}
/**
@@ -1824,10 +1904,10 @@
qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf,
sizeof(struct ieee80211_frame_min_one));
else {
- peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
+ peer = dp_tx_cap_peer_find_by_id(pdev->soc, user->peer_id);
if (peer) {
vdev = peer->vdev;
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
} else {
vdev =
dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
@@ -1914,6 +1994,9 @@
if ((rts_send && cur_ppdu_desc->rts_success) ||
cur_ppdu_desc->mprot_type == SEND_WIFICTS2SELF_E) {
+ uint16_t peer_id;
+
+ peer_id = cur_ppdu_desc->user[0].peer_id;
/* send dummy CTS frame */
ppdu_desc->htt_frame_type = HTT_STATS_FTYPE_SGEN_CTS;
ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
@@ -1923,12 +2006,11 @@
cur_ppdu_desc->ppdu_start_timestamp;
ppdu_desc->ppdu_end_timestamp =
cur_ppdu_desc->ppdu_end_timestamp;
- ppdu_desc->user[0].peer_id = cur_ppdu_desc->user[0].peer_id;
- peer = dp_peer_find_by_id(pdev->soc,
- cur_ppdu_desc->user[0].peer_id);
+ ppdu_desc->user[0].peer_id = peer_id;
+ peer = dp_tx_cap_peer_find_by_id(pdev->soc, peer_id);
if (peer) {
vdev = peer->vdev;
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
} else {
uint8_t vdev_id;
@@ -2139,9 +2221,9 @@
&xretry_ppdu->mpdu_q);
} else {
ppdu_desc->mpdus[seq_no - start_seq] =
- qdf_nbuf_copy_expand(mpdu_nbuf,
- MAX_MONITOR_HEADER,
- 0);
+ qdf_nbuf_copy_expand_fraglist(
+ mpdu_nbuf,
+ MAX_MONITOR_HEADER, 0);
mpdu_nbuf =
qdf_nbuf_queue_next(mpdu_nbuf);
}
@@ -2285,8 +2367,8 @@
ppdu_desc->user[0].start_seq + i,
ppdu_desc->ppdu_id);
ppdu_desc->mpdus[i] =
- qdf_nbuf_copy_expand(mpdu_nbuf,
- MAX_MONITOR_HEADER, 0);
+ qdf_nbuf_copy_expand_fraglist(
+ mpdu_nbuf, MAX_MONITOR_HEADER, 0);
ppdu_desc->user[0].failed_bitmap[k] |=
SEQ_SEG_MSK(ppdu_desc->user[0].failed_bitmap[k],
i);
@@ -2447,7 +2529,6 @@
wh_min->i_dur[1], wh_min->i_dur[0]);
}
-
dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
ptr_tx_cap_info, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);
@@ -2601,10 +2682,8 @@
if (is_sgen_pkt) {
start_tsf = (ppdu_desc->ppdu_start_timestamp &
LOWER_32_MASK);
- if ((ptr_comp_info->tx_tsf <
- (start_tsf + MAX_MGMT_ENQ_DELAY)) &&
- ((ppdu_id & SCH_ID_MASK) <
- (ppdu_desc->ppdu_id & SCH_ID_MASK))) {
+ if (ptr_comp_info->tx_tsf <
+ (start_tsf + MAX_MGMT_ENQ_DELAY)) {
/*
* free the older mgmt buffer from
* the queue and get new mgmt buffer
@@ -2861,15 +2940,15 @@
{
struct dp_peer *peer;
- peer = dp_peer_find_by_id(pdev->soc,
- ppdu_desc->user[0].peer_id);
+ peer = dp_tx_cap_peer_find_by_id(pdev->soc,
+ ppdu_desc->user[0].peer_id);
if (!peer)
return;
dp_peer_tx_cap_tid_queue_flush_tlv(pdev, peer, ppdu_desc);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
return;
}
@@ -2883,6 +2962,7 @@
if (mpdu_nbuf)
qdf_nbuf_free(mpdu_nbuf);
}
+
qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
qdf_mem_free(ppdu_desc->mpdus);
ppdu_desc->mpdus = NULL;
@@ -2918,6 +2998,7 @@
qdf_nbuf_t mpdu_nbuf;
struct dp_peer *peer;
uint8_t type;
+ uint32_t mpdus_tried;
if (!nbuf_ppdu_desc_list[desc_cnt])
continue;
@@ -2937,10 +3018,10 @@
continue;
}
- if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL ||
- ppdu_desc->htt_frame_type ==
- HTT_STATS_FTYPE_SGEN_QOS_NULL ||
- type != FRAME_CTRL_TYPE_DATA) {
+ if ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) ||
+ (ppdu_desc->htt_frame_type ==
+ HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
+ (type != FRAME_CTRL_TYPE_DATA)) {
qdf_nbuf_t nbuf_ppdu = nbuf_ppdu_desc_list[desc_cnt];
if (dp_check_mgmt_ctrl_ppdu(pdev, nbuf_ppdu)) {
@@ -2953,8 +3034,8 @@
continue;
}
- peer = dp_peer_find_by_id(pdev->soc,
- ppdu_desc->user[0].peer_id);
+ peer = dp_tx_cap_peer_find_by_id(pdev->soc,
+ ppdu_desc->user[0].peer_id);
if (!peer) {
tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
nbuf_ppdu_desc_list[desc_cnt] = NULL;
@@ -3000,24 +3081,28 @@
nbuf_ppdu_desc_list[desc_cnt] = NULL;
qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
qdf_nbuf_free(tmp_nbuf);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
continue;
}
if (qdf_unlikely(ppdu_desc->user[0].ba_size >
CDP_BA_256_BIT_MAP_SIZE_DWORDS *
SEQ_SEG_SZ_BITS(ppdu_desc->user[0].failed_bitmap))) {
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
qdf_assert_always(0);
return;
}
/* Fill seq holes within current schedule list */
start_seq = ppdu_desc->user[0].start_seq;
- for (i = 0; i < ppdu_desc->user[0].ba_size; i++) {
+ mpdus_tried = ppdu_desc->user[0].mpdu_tried_mcast +
+ ppdu_desc->user[0].mpdu_tried_ucast;
+ for (i = 0; (i < ppdu_desc->user[0].ba_size) && mpdus_tried;
+ i++) {
if (qdf_likely(ppdu_desc->user[0].tid !=
DP_NON_QOS_TID) &&
!(SEQ_BIT(ppdu_desc->user[0].enq_bitmap, i)))
continue;
+ mpdus_tried--;
/* missed seq number */
seq_no = start_seq + i;
@@ -3064,7 +3149,17 @@
mpdu_nbuf;
}
}
- dp_peer_unref_del_find_by_id(peer);
+
+ /* It is possible that enq_bitmap received has more bits than
+ * actual mpdus tried if HW was unable to send all MPDUs, and
+ * last_enq_seq and ba_size should be adjusted in that case
+ */
+ if (i < ppdu_desc->user[0].ba_size) {
+ ppdu_desc->user[0].last_enq_seq = seq_no;
+ ppdu_desc->user[0].ba_size = seq_no - start_seq + 1;
+ }
+
+ dp_tx_cap_peer_unref_del(peer);
if ((ppdu_desc->pending_retries == 0) &&
qdf_nbuf_is_queue_empty(&tx_tid->pending_ppdu_q)) {
@@ -3081,6 +3176,7 @@
struct cdp_tx_completion_ppdu *cur_ppdu_desc;
struct dp_peer *peer;
qdf_nbuf_queue_t head_ppdu;
+ uint16_t peer_id;
if (!nbuf_ppdu_desc_list[i])
continue;
@@ -3089,8 +3185,8 @@
if (!cur_ppdu_desc)
continue;
- peer = dp_peer_find_by_id(pdev->soc,
- cur_ppdu_desc->user[0].peer_id);
+ peer_id = cur_ppdu_desc->user[0].peer_id;
+ peer = dp_tx_cap_peer_find_by_id(pdev->soc, peer_id);
if (!peer) {
tmp_nbuf = nbuf_ppdu_desc_list[i];
nbuf_ppdu_desc_list[i] = NULL;
@@ -3103,7 +3199,7 @@
dp_tx_mon_proc_pending_ppdus(pdev, tx_tid,
nbuf_ppdu_desc_list + i,
ppdu_desc_cnt - i, &head_ppdu,
- cur_ppdu_desc->user[0].peer_id);
+ peer_id);
if (qdf_nbuf_is_queue_empty(&tx_tid->pending_ppdu_q)) {
while ((tmp_nbuf = qdf_nbuf_queue_first(&head_ppdu))) {
@@ -3122,7 +3218,7 @@
dp_tx_mon_proc_xretries(pdev, peer, tx_tid->tid);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
pending_ppdus = qdf_nbuf_queue_len(&tx_tid->pending_ppdu_q);
if (pending_ppdus > MAX_PENDING_PPDUS) {
QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
@@ -3236,6 +3332,7 @@
uint32_t num_msdu = 0;
uint32_t qlen = 0;
uint16_t peer_id;
+ uint8_t type, subtype;
qdf_nbuf_queue_init(&head_msdu);
qdf_nbuf_queue_init(&head_xretries);
@@ -3258,6 +3355,18 @@
ppdu_desc = (struct cdp_tx_completion_ppdu *)
qdf_nbuf_data(nbuf);
+ type = (ppdu_desc->frame_ctrl &
+ IEEE80211_FC0_TYPE_MASK);
+ subtype = (ppdu_desc->frame_ctrl &
+ IEEE80211_FC0_SUBTYPE_MASK);
+
+ if ((type == IEEE80211_FC0_TYPE_DATA) &&
+ (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) &&
+ (ppdu_desc->htt_frame_type ==
+ HTT_STATS_FTYPE_TIDQ_DATA_SU)) {
+ ppdu_desc->htt_frame_type =
+ HTT_STATS_FTYPE_SGEN_QOS_NULL;
+ }
/* send WDI event */
if (pdev->tx_capture_enabled ==
@@ -3302,11 +3411,25 @@
continue;
}
- if ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA) ||
+ /* Drop all type of MU frame */
+ if ((ppdu_desc->htt_frame_type ==
+ HTT_STATS_FTYPE_TIDQ_DATA_MU) ||
+ ((ppdu_desc->htt_frame_type >=
+ HTT_STATS_FTYPE_SGEN_MU_BAR) &&
+ (ppdu_desc->htt_frame_type <=
+ HTT_STATS_FTYPE_SGEN_MU_BSR))) {
+ qdf_nbuf_free(nbuf);
+ continue;
+ }
+
+ if (((ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA) &&
+ (ppdu_desc->htt_frame_type !=
+ HTT_STATS_FTYPE_SGEN_QOS_NULL)) ||
(ppdu_desc->num_mpdu &&
ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR)) {
peer_id = ppdu_desc->user[0].peer_id;
- peer = dp_peer_find_by_id(pdev->soc, peer_id);
+ peer = dp_tx_cap_peer_find_by_id(pdev->soc,
+ peer_id);
/**
* peer can be NULL
*/
@@ -3324,7 +3447,7 @@
if (peer->bss_peer ||
!dp_peer_or_pdev_tx_cap_enabled(pdev,
peer, peer->mac_addr.raw)) {
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
qdf_nbuf_free(nbuf);
continue;
}
@@ -3341,7 +3464,7 @@
ppdu_desc->user[0].peer_id,
ppdu_desc->user[0].tid);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
qdf_nbuf_free(nbuf);
continue;
}
@@ -3407,12 +3530,12 @@
qdf_nbuf_queue_len(&ppdu_desc->mpdu_q);
if (!qlen) {
qdf_nbuf_free(nbuf);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
continue;
}
} else {
qdf_nbuf_free(nbuf);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
continue;
}
@@ -3435,7 +3558,7 @@
ppdu_cnt,
ppdu_desc_cnt);
- dp_peer_unref_del_find_by_id(peer);
+ dp_tx_cap_peer_unref_del(peer);
} else {
/*
* other packet frame also added to
@@ -3528,12 +3651,16 @@
mpdu_info->bw = 0;
- if (mpdu_info->channel_num < 20)
+ if (rx_status->preamble_type == HAL_RX_PKT_TYPE_11B) {
mpdu_info->preamble = DOT11_B;
- else
+ mpdu_info->mcs = CDP_LEGACY_MCS3;
+ } else if (rx_status->preamble_type == HAL_RX_PKT_TYPE_11A) {
mpdu_info->preamble = DOT11_A;
-
- mpdu_info->mcs = CDP_LEGACY_MCS3;
+ mpdu_info->mcs = CDP_LEGACY_MCS3;
+ } else {
+ mpdu_info->preamble = DOT11_A;
+ mpdu_info->mcs = CDP_LEGACY_MCS1;
+ }
}
static void dp_gen_ack_frame(struct hal_rx_ppdu_info *ppdu_info,
@@ -3564,6 +3691,7 @@
}
static void dp_gen_block_ack_frame(
+ struct hal_rx_ppdu_info *ppdu_info,
struct mon_rx_user_status *rx_user_status,
struct mon_rx_user_info *rx_user_info,
struct dp_peer *peer,
@@ -3577,7 +3705,7 @@
tid = rx_user_status->tid;
tx_tid = &peer->tx_capture.tx_tid[tid];
- if (!rx_user_info->bar_frame) {
+ if (ppdu_info->sw_frame_group_id != HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR) {
tx_tid->first_data_seq_ctrl =
rx_user_status->first_data_seq_ctrl;
tx_tid->mpdu_cnt = rx_user_status->mpdu_cnt_fcs_ok +
@@ -3594,7 +3722,6 @@
rx_user_status->mpdu_fcs_ok_bitmap[0]));
}
-
wh_addr2 = (struct ieee80211_ctlframe_addr2 *)
qdf_nbuf_data(mpdu_nbuf);
@@ -3738,10 +3865,137 @@
&tx_capture_info, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);
+ if (tx_capture_info.mpdu_nbuf)
+ qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
+
return QDF_STATUS_SUCCESS;
}
/**
+ * dp_send_usr_ack_frm_to_stack(): Function to generate BA or ACK frame and
+ * send to upper layer
+ * @soc: core txrx main context
+ * @pdev: DP pdev object
+ * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV
+ * @rx_status: variable for rx status
+ * @rx_user_status: variable for rx user status
+ * @rx_user_info: variable for rx user info
+ *
+ * return: no
+ */
+void dp_send_usr_ack_frm_to_stack(struct dp_soc *soc,
+ struct dp_pdev *pdev,
+ struct hal_rx_ppdu_info *ppdu_info,
+ struct mon_rx_status *rx_status,
+ struct mon_rx_user_status *rx_user_status,
+ struct mon_rx_user_info *rx_user_info)
+{
+ struct cdp_tx_indication_info tx_capture_info;
+ struct dp_peer *peer;
+ struct dp_ast_entry *ast_entry;
+ uint32_t peer_id;
+ uint32_t ast_index;
+ uint8_t *ptr_mac_addr;
+
+ if (rx_user_info->qos_control_info_valid &&
+ ((rx_user_info->qos_control &
+ IEEE80211_QOS_ACKPOLICY) >> IEEE80211_QOS_ACKPOLICY_S)
+ == IEEE80211_BAR_CTL_NOACK)
+ return;
+
+ ast_index = rx_user_status->ast_index;
+
+ if (ast_index >=
+ wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
+
+ if (ppdu_info->sw_frame_group_id ==
+ HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR)
+ return;
+
+ ptr_mac_addr = &ppdu_info->nac_info.mac_addr2[0];
+ if (!dp_peer_or_pdev_tx_cap_enabled(pdev,
+ NULL, ptr_mac_addr))
+ return;
+
+ set_mpdu_info(&tx_capture_info,
+ rx_status, rx_user_status);
+ tx_capture_info.mpdu_nbuf =
+ qdf_nbuf_alloc(pdev->soc->osdev,
+ MAX_MONITOR_HEADER +
+ DP_BA_ACK_FRAME_SIZE,
+ MAX_MONITOR_HEADER,
+ 4, FALSE);
+ if (!tx_capture_info.mpdu_nbuf)
+ return;
+ dp_gen_ack_frame(ppdu_info, NULL,
+ tx_capture_info.mpdu_nbuf);
+ dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
+ &tx_capture_info, HTT_INVALID_PEER,
+ WDI_NO_VAL, pdev->pdev_id);
+ return;
+ }
+
+ qdf_spin_lock_bh(&soc->ast_lock);
+ ast_entry = soc->ast_table[ast_index];
+ if (!ast_entry) {
+ qdf_spin_unlock_bh(&soc->ast_lock);
+ return;
+ }
+
+ peer = ast_entry->peer;
+ if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
+ qdf_spin_unlock_bh(&soc->ast_lock);
+ return;
+ }
+ peer_id = peer->peer_ids[0];
+ qdf_spin_unlock_bh(&soc->ast_lock);
+
+ peer = dp_peer_find_by_id(soc, peer_id);
+ if (!peer)
+ return;
+
+ if (!dp_peer_or_pdev_tx_cap_enabled(pdev, peer,
+ peer->mac_addr.raw)) {
+ dp_peer_unref_del_find_by_id(peer);
+ return;
+ }
+
+ set_mpdu_info(&tx_capture_info,
+ rx_status, rx_user_status);
+
+ tx_capture_info.mpdu_nbuf =
+ qdf_nbuf_alloc(pdev->soc->osdev,
+ MAX_MONITOR_HEADER +
+ DP_BA_ACK_FRAME_SIZE,
+ MAX_MONITOR_HEADER,
+ 4, FALSE);
+
+ if (!tx_capture_info.mpdu_nbuf) {
+ dp_peer_unref_del_find_by_id(peer);
+ return;
+ }
+
+ if (peer->rx_tid[rx_user_status->tid].ba_status == DP_RX_BA_ACTIVE ||
+ ppdu_info->sw_frame_group_id == HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR) {
+ dp_gen_block_ack_frame(ppdu_info,
+ rx_user_status,
+ rx_user_info,
+ peer,
+ tx_capture_info.mpdu_nbuf);
+ tx_capture_info.mpdu_info.tid = rx_user_status->tid;
+
+ } else {
+ dp_gen_ack_frame(ppdu_info, peer,
+ tx_capture_info.mpdu_nbuf);
+ }
+
+ dp_peer_unref_del_find_by_id(peer);
+ dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
+ &tx_capture_info, HTT_INVALID_PEER,
+ WDI_NO_VAL, pdev->pdev_id);
+
+}
+/**
* dp_send_ack_frame_to_stack(): Function to generate BA or ACK frame and
* send to upper layer on received unicast frame
* @soc: core txrx main context
@@ -3754,17 +4008,10 @@
struct dp_pdev *pdev,
struct hal_rx_ppdu_info *ppdu_info)
{
- struct cdp_tx_indication_info tx_capture_info;
- struct dp_peer *peer;
- struct dp_ast_entry *ast_entry;
- uint32_t peer_id;
struct mon_rx_status *rx_status;
struct mon_rx_user_status *rx_user_status;
struct mon_rx_user_info *rx_user_info;
- uint32_t ast_index;
uint32_t i;
- bool bar_frame;
- uint8_t *ptr_mac_addr;
rx_status = &ppdu_info->rx_status;
@@ -3787,9 +4034,7 @@
return dp_send_cts_frame_to_stack(soc, pdev, ppdu_info);
if (ppdu_info->sw_frame_group_id == HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR)
- bar_frame = true;
- else
- bar_frame = false;
+ return QDF_STATUS_SUCCESS;
for (i = 0; i < ppdu_info->com_info.num_users; i++) {
if (i > OFDMA_NUM_USERS)
@@ -3798,103 +4043,72 @@
rx_user_status = &ppdu_info->rx_user_status[i];
rx_user_info = &ppdu_info->rx_user_info[i];
- rx_user_info->bar_frame = bar_frame;
-
- if (rx_user_info->qos_control_info_valid &&
- ((rx_user_info->qos_control &
- IEEE80211_QOS_ACKPOLICY) >> IEEE80211_QOS_ACKPOLICY_S)
- == IEEE80211_BAR_CTL_NOACK)
- continue;
-
- ast_index = rx_user_status->ast_index;
- if (ast_index >=
- wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
- ptr_mac_addr = &ppdu_info->nac_info.mac_addr2[0];
- if (!dp_peer_or_pdev_tx_cap_enabled(pdev,
- NULL, ptr_mac_addr))
- continue;
- set_mpdu_info(&tx_capture_info,
- rx_status, rx_user_status);
- tx_capture_info.mpdu_nbuf =
- qdf_nbuf_alloc(pdev->soc->osdev,
- MAX_MONITOR_HEADER +
- DP_BA_ACK_FRAME_SIZE,
- MAX_MONITOR_HEADER,
- 4, FALSE);
- if (!tx_capture_info.mpdu_nbuf)
- continue;
- dp_gen_ack_frame(ppdu_info, NULL,
- tx_capture_info.mpdu_nbuf);
- dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
- &tx_capture_info, HTT_INVALID_PEER,
- WDI_NO_VAL, pdev->pdev_id);
- continue;
- }
-
- qdf_spin_lock_bh(&soc->ast_lock);
- ast_entry = soc->ast_table[ast_index];
- if (!ast_entry) {
- qdf_spin_unlock_bh(&soc->ast_lock);
- continue;
- }
-
- peer = ast_entry->peer;
- if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
- qdf_spin_unlock_bh(&soc->ast_lock);
- continue;
- }
- peer_id = peer->peer_ids[0];
- qdf_spin_unlock_bh(&soc->ast_lock);
-
- peer = dp_peer_find_by_id(soc, peer_id);
- if (!peer)
- continue;
-
- if (!dp_peer_or_pdev_tx_cap_enabled(pdev,
- NULL,
- peer->mac_addr.raw)) {
- dp_peer_unref_del_find_by_id(peer);
- continue;
- }
-
- set_mpdu_info(&tx_capture_info,
- rx_status, rx_user_status);
-
- tx_capture_info.mpdu_nbuf =
- qdf_nbuf_alloc(pdev->soc->osdev,
- MAX_MONITOR_HEADER +
- DP_BA_ACK_FRAME_SIZE,
- MAX_MONITOR_HEADER,
- 4, FALSE);
-
- if (!tx_capture_info.mpdu_nbuf) {
- dp_peer_unref_del_find_by_id(peer);
- return QDF_STATUS_E_NOMEM;
- }
-
- if (peer->rx_tid[rx_user_status->tid].ba_status ==
- DP_RX_BA_ACTIVE) {
- dp_gen_block_ack_frame(rx_user_status,
- rx_user_info,
- peer,
- tx_capture_info.mpdu_nbuf);
- tx_capture_info.mpdu_info.tid = rx_user_status->tid;
-
- } else {
- dp_gen_ack_frame(ppdu_info, peer,
- tx_capture_info.mpdu_nbuf);
- }
- dp_peer_unref_del_find_by_id(peer);
- dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
- &tx_capture_info, HTT_INVALID_PEER,
- WDI_NO_VAL, pdev->pdev_id);
-
+ dp_send_usr_ack_frm_to_stack(soc, pdev, ppdu_info, rx_status,
+ rx_user_status, rx_user_info);
}
return QDF_STATUS_SUCCESS;
}
/**
+ * dp_bar_send_ack_frm_to_stack(): send BA or ACK frame
+ * to upper layers on received BAR packet for tx capture feature
+ *
+ * @soc: soc handle
+ * @pdev: pdev handle
+ * @nbuf: received packet
+ *
+ * Return: QDF_STATUS_SUCCESS on success
+ * others on error
+ */
+QDF_STATUS
+dp_bar_send_ack_frm_to_stack(struct dp_soc *soc,
+ struct dp_pdev *pdev,
+ qdf_nbuf_t nbuf)
+{
+ struct ieee80211_ctlframe_addr2 *wh;
+ uint8_t *frm;
+ struct hal_rx_ppdu_info *ppdu_info;
+ struct mon_rx_status *rx_status;
+ struct mon_rx_user_status *rx_user_status;
+ struct mon_rx_user_info *rx_user_info;
+ uint16_t bar_ctl;
+ uint32_t user_id;
+ uint8_t tid;
+
+ if (!nbuf)
+ return QDF_STATUS_E_INVAL;
+
+ wh = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(nbuf);
+
+ if (wh->i_fc[0] != (IEEE80211_FC0_VERSION_0 |
+ IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) {
+ return QDF_STATUS_SUCCESS;
+ }
+
+ frm = (uint8_t *)&wh[1];
+
+ bar_ctl = qdf_le16_to_cpu(*(uint16_t *)frm);
+
+ if (bar_ctl & DP_IEEE80211_BAR_CTL_POLICY_M)
+ return QDF_STATUS_SUCCESS;
+
+ tid = (bar_ctl >> DP_IEEE80211_BAR_CTL_TID_S) &
+ DP_IEEE80211_BAR_CTL_TID_M;
+
+ ppdu_info = &pdev->ppdu_info;
+ user_id = ppdu_info->rx_info.user_id;
+ rx_status = &ppdu_info->rx_status;
+ rx_user_status = &ppdu_info->rx_user_status[user_id];
+ rx_user_info = &ppdu_info->rx_user_info[user_id];
+ rx_user_status->tid = tid;
+
+ dp_send_usr_ack_frm_to_stack(soc, pdev, ppdu_info, rx_status,
+ rx_user_status, rx_user_info);
+
+ return QDF_STATUS_SUCCESS;
+}
+/**
* dp_gen_noack_frame: generate noack Action frame by using parameters
* from received NDPA frame
* @ppdu_info: pointer to ppdu_info
@@ -4040,6 +4254,9 @@
&tx_capture_info, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);
+ if (tx_capture_info.mpdu_nbuf)
+ qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
+
return QDF_STATUS_SUCCESS;
}
@@ -4066,6 +4283,9 @@
case HAL_MPDU_SW_FRAME_GROUP_CTRL_NDPA:
return dp_send_noack_frame_to_stack(soc, pdev, mon_mpdu);
+ case HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR:
+ return dp_bar_send_ack_frm_to_stack(soc, pdev, mon_mpdu);
+
default:
break;
}
diff --git a/dp/wifi3.0/dp_tx_capture.h b/dp/wifi3.0/dp_tx_capture.h
index 82c1005..0511bd0 100644
--- a/dp/wifi3.0/dp_tx_capture.h
+++ b/dp/wifi3.0/dp_tx_capture.h
@@ -285,6 +285,21 @@
qdf_nbuf_t mon_mpdu);
/**
+ * dp_bar_send_ack_frm_to_stack(): send ACK frame
+ * to upper layers on received BAR packet for tx capture feature
+ *
+ * @soc: soc handle
+ * @pdev: pdev handle
+ * @nbuf: received packet
+ *
+ * Return: QDF_STATUS_SUCCESS on success
+ * others on error
+ */
+QDF_STATUS dp_bar_send_ack_frm_to_stack(struct dp_soc *soc,
+ struct dp_pdev *pdev,
+ qdf_nbuf_t nbuf);
+
+/**
* dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
* @pdev: DP PDEV handle
* @peer: Peer handle
diff --git a/dp/wifi3.0/dp_txrx_wds.c b/dp/wifi3.0/dp_txrx_wds.c
index a597228..562180a 100644
--- a/dp/wifi3.0/dp_txrx_wds.c
+++ b/dp/wifi3.0/dp_txrx_wds.c
@@ -664,16 +664,27 @@
/**
* dp_peer_multipass_list_add: add to new multipass list
* @dp_soc: soc handle
- * @peer: peer handle
+ * @peer_mac: mac address
+ * @vdev_id: vdev id for peer
+ * @vlan_id: vlan_id
*
* return: void
*/
-static void dp_peer_multipass_list_add(struct dp_soc *soc, struct dp_peer *peer)
+static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
+ uint8_t vdev_id, uint16_t vlan_id)
{
+ struct dp_peer *peer =
+ dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
+
+ if (qdf_unlikely(!peer)) {
+ qdf_err("NULL peer");
+ return;
+ }
/*
* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it when element is deleted from the list.
*/
+ peer->vlan_id = vlan_id;
qdf_spin_lock_bh(&peer->vdev->mpass_peer_mutex);
TAILQ_INSERT_HEAD(&peer->vdev->mpass_peer_list, peer,
mpass_peer_list_elem);
@@ -683,6 +694,7 @@
/**
* dp_peer_set_vlan_id: set vlan_id for this peer
* @cdp_soc: soc handle
+ * @vdev_id: vdev id for peer
* @peer_mac: mac address
* @vlan_id: vlan id for peer
*
@@ -693,7 +705,6 @@
uint16_t vlan_id)
{
struct dp_soc *soc = (struct dp_soc *)cdp_soc;
- struct dp_peer *peer = NULL;
struct dp_vdev *vdev =
dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
vdev_id);
@@ -701,21 +712,7 @@
if (!vdev || !vdev->multipass_en)
return;
- peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev->vdev_id);
-
- if (qdf_unlikely(!peer)) {
- qdf_err("NULL peer");
- return;
- }
-
- peer->vlan_id = vlan_id;
-
- dp_peer_multipass_list_add(soc, peer);
-
- /* Ref_cnt is incremented inside dp_peer_find_hash_find().
- * Decrement it here.
- */
- dp_peer_unref_delete(peer);
+ dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
}
/**
diff --git a/target_if/cfr/src/target_if_cfr_6018.c b/target_if/cfr/src/target_if_cfr_6018.c
index 101810f..7ffc443 100644
--- a/target_if/cfr/src/target_if_cfr_6018.c
+++ b/target_if/cfr/src/target_if_cfr_6018.c
@@ -16,6 +16,8 @@
* PERFORMANCE OF THIS SOFTWARE.
*/
+#ifdef WLAN_ENH_CFR_ENABLE
+
#include <target_if_cfr.h>
#include <wlan_tgt_def_config.h>
#include <target_type.h>
@@ -1616,3 +1618,6 @@
return status;
}
+
+#endif /* WLAN_ENH_CFR_ENABLE */
+
diff --git a/tools/linux/peerstats.c b/tools/linux/peerstats.c
index 1689cd7..0cd1a65 100644
--- a/tools/linux/peerstats.c
+++ b/tools/linux/peerstats.c
@@ -260,7 +260,11 @@
sojourn_stats->sum_sojourn_msdu[tid],
sojourn_stats->num_msdus[tid]);
}
- PRINT("sizeof(avg): %"PRIu32,
+#ifdef __LP64__
+ PRINT("sizeof(avg) : %"PRIu64,
+#else
+ PRINT("sizeof(avg) : %"PRIu32,
+#endif
sizeof(sojourn_stats->avg_sojourn_msdu[tid]));
PRINT("\n...........................................");
PRINT("...................................");
diff --git a/umac/dfs/core/src/misc/dfs_zero_cac.c b/umac/dfs/core/src/misc/dfs_zero_cac.c
index 02dd68f..270523e 100644
--- a/umac/dfs/core/src/misc/dfs_zero_cac.c
+++ b/umac/dfs/core/src/misc/dfs_zero_cac.c
@@ -1812,7 +1812,7 @@
if (detector_id == AGILE_DETECTOR_ID) {
dfs_prepare_agile_precac_chan(dfs);
} else {
- dfs->dfs_agile_precac_freq = 0;
+ dfs->dfs_agile_precac_freq_mhz = 0;
dfs_soc_obj->precac_state_started = PRECAC_NOT_STARTED;
}
}
@@ -2863,9 +2863,146 @@
PRECAC_LIST_LOCK_DESTROY(dfs);
}
+/**
+ * dfs_is_pcac_required_for_freq() - Find if given frequency is preCAC required.
+ * @node: Pointer to the preCAC tree Node in which the frequency is present.
+ * @freq: Frequency to be checked.
+ *
+ * Return: False if the frequency is not fully CAC done or in NOL, else true.
+ */
+static bool
+dfs_is_pcac_required_for_freq(struct precac_tree_node *node, uint16_t freq)
+{
+ while (node) {
+ if (node->ch_freq == freq) {
+ if ((node->n_caced_subchs ==
+ N_SUBCHS_FOR_BANDWIDTH(node->bandwidth)) ||
+ (node->n_nol_subchs))
+ return false;
+ else
+ return true;
+ }
+ node = dfs_descend_precac_tree_for_freq(node, freq);
+ }
+ return false;
+}
+
+#define DFS_160MHZ_SECSEG_CHAN_FREQ_OFFSET 40
+#ifdef CONFIG_CHAN_NUM_API
+/**
+ * dfs_get_num_cur_subchans_in_node() - Get number of excluded channels
+ * inside the current node.
+ * @dfs: Pointer to wlan_dfs structure.
+ * @node: Node to be checked.
+ *
+ * Return: uint8_t.
+ * Return the number of excluded (current operating channels in CAC) that are in
+ * the given tree node range.
+ */
+static uint8_t
+dfs_get_num_cur_subchans_in_node(struct wlan_dfs *dfs,
+ struct precac_tree_node *node)
+{
+ uint16_t exclude_pri_ch_freq, exclude_sec_ch_freq, n_exclude_subchs = 0;
+ uint8_t chwidth_val = DFS_CHWIDTH_80_VAL;
+ struct dfs_channel *curchan = dfs->dfs_curchan;
+
+ exclude_pri_ch_freq =
+ utils_dfs_chan_to_freq(curchan->dfs_ch_vhtop_ch_freq_seg1);
+ exclude_sec_ch_freq =
+ utils_dfs_chan_to_freq(curchan->dfs_ch_vhtop_ch_freq_seg2);
+ if (WLAN_IS_CHAN_MODE_160(curchan)) {
+ if (exclude_sec_ch_freq < exclude_pri_ch_freq)
+ exclude_sec_ch_freq -=
+ DFS_160MHZ_SECSEG_CHAN_FREQ_OFFSET;
+ else
+ exclude_sec_ch_freq +=
+ DFS_160MHZ_SECSEG_CHAN_FREQ_OFFSET;
+ }
+
+ if (WLAN_IS_CHAN_MODE_20(curchan))
+ chwidth_val = DFS_CHWIDTH_20_VAL;
+ else if (WLAN_IS_CHAN_MODE_40(curchan))
+ chwidth_val = DFS_CHWIDTH_40_VAL;
+
+ /* Check if the channel is a subset of the tree node and if it's
+ * currently in CAC period. This is to avoid excluding channels twice,
+ * one below and one in the already CACed channels exclusion (in the
+ * caller API). */
+ if (IS_WITHIN_RANGE(exclude_pri_ch_freq,
+ node->ch_freq,
+ (node->bandwidth / 2)) &&
+ dfs_is_pcac_required_for_freq(node, exclude_pri_ch_freq))
+ n_exclude_subchs += N_SUBCHS_FOR_BANDWIDTH(chwidth_val);
+ if (IS_WITHIN_RANGE(exclude_sec_ch_freq,
+ node->ch_freq,
+ (node->bandwidth / 2)) &&
+ dfs_is_pcac_required_for_freq(node, exclude_sec_ch_freq))
+ n_exclude_subchs += N_SUBCHS_FOR_BANDWIDTH(chwidth_val);
+ return n_exclude_subchs;
+}
+#endif
+
+#ifdef CONFIG_CHAN_FREQ_API
+/**
+ * dfs_get_num_cur_subchans_in_node_freq() - Get number of excluded channels
+ * inside the current node.
+ * @dfs: Pointer to wlan_dfs structure.
+ * @node: Node to be checked.
+ *
+ * Return: uint8_t.
+ * Return the number of excluded (current operating channels in CAC) that are in
+ * the given tree node range.
+ */
+static uint8_t
+dfs_get_num_cur_subchans_in_node_freq(struct wlan_dfs *dfs,
+ struct precac_tree_node *node)
+{
+ uint16_t exclude_pri_ch_freq, exclude_sec_ch_freq;
+ uint8_t chwidth_val = DFS_CHWIDTH_80_VAL;
+ uint8_t n_exclude_subchs = 0;
+
+ exclude_pri_ch_freq =
+ dfs->dfs_curchan->dfs_ch_mhz_freq_seg1;
+ exclude_sec_ch_freq =
+ dfs->dfs_curchan->dfs_ch_mhz_freq_seg2;
+ if (WLAN_IS_CHAN_MODE_160(dfs->dfs_curchan)) {
+ if (exclude_sec_ch_freq < exclude_pri_ch_freq)
+ exclude_sec_ch_freq -=
+ DFS_160MHZ_SECSEG_CHAN_OFFSET;
+ else
+ exclude_sec_ch_freq +=
+ DFS_160MHZ_SECSEG_CHAN_OFFSET;
+ }
+
+ if (WLAN_IS_CHAN_MODE_20(dfs->dfs_curchan))
+ chwidth_val = DFS_CHWIDTH_20_VAL;
+ else if (WLAN_IS_CHAN_MODE_40(dfs->dfs_curchan))
+ chwidth_val = DFS_CHWIDTH_40_VAL;
+
+ /* Check if the channel is a subset of the tree node and if it's
+ * currently in CAC period. This is to avoid excluding channels twice,
+ * one below and one in the already CACed channels exclusion (in the
+ * caller API). */
+ if (IS_WITHIN_RANGE(exclude_pri_ch_freq,
+ node->ch_freq,
+ (node->bandwidth / 2)) &&
+ dfs_is_pcac_required_for_freq(node, exclude_pri_ch_freq))
+ n_exclude_subchs += N_SUBCHS_FOR_BANDWIDTH(chwidth_val);
+ if (IS_WITHIN_RANGE(exclude_sec_ch_freq,
+ node->ch_freq,
+ (node->bandwidth / 2)) &&
+ dfs_is_pcac_required_for_freq(node, exclude_sec_ch_freq))
+ n_exclude_subchs += N_SUBCHS_FOR_BANDWIDTH(chwidth_val);
+ return n_exclude_subchs;
+}
+#endif
+
+#ifdef CONFIG_CHAN_NUM_API
/* dfs_is_cac_needed_for_bst_node() - For a requested bandwidth, find
* if the current preCAC BSTree node needs
* CAC.
+ * @dfs: Pointer to wlan_dfs structure.
* @node: Node to be checked.
* @req_bandwidth: bandwidth of channel requested.
*
@@ -2874,33 +3011,84 @@
* for the node which is not CAC done, else false.
*/
static bool
-dfs_is_cac_needed_for_bst_node(struct precac_tree_node *node,
+dfs_is_cac_needed_for_bst_node(struct wlan_dfs *dfs,
+ struct precac_tree_node *node,
uint8_t req_bandwidth)
{
- uint8_t n_subchs_for_req_bw, non_nol_subchs;
+ uint8_t n_subchs_for_req_bw, n_allowed_subchs, n_excluded_subchs;
if (!node)
return false;
/* Find the number of subchannels for the requested bandwidth */
+ n_excluded_subchs = dfs_get_num_cur_subchans_in_node(dfs, node);
n_subchs_for_req_bw = N_SUBCHS_FOR_BANDWIDTH(req_bandwidth);
- non_nol_subchs = node->n_valid_subchs - node->n_nol_subchs;
+ n_allowed_subchs = node->n_valid_subchs -
+ (node->n_nol_subchs + n_excluded_subchs);
/* Return false if,
- * 1. Number of non-NOL subchannels in the current node is less than
- * the requested number of subchannels.
- * 2. All the subchannels of the node are CAC done.
- * 3. If the number CAC done subchannels + NOL subchannels in the
- * current node is equal to number of valid subchannels in the node.
+ * 1. Number of allowed subchannels (all subchannels other than
+ * current operating sub-channels and NOL sub-channels) in the
+ * current node is less than the requested number of subchannels.
+ * 3. If the number CAC done subchannels + NOL subchannels + current
+ * operating subchannels in the current node is equal to number of
+ * valid subchannels in the node.
* else, return true.
*/
- if ((non_nol_subchs < n_subchs_for_req_bw) ||
- (node->n_caced_subchs == node->n_valid_subchs) ||
- (node->n_caced_subchs + node->n_nol_subchs == node->n_valid_subchs))
+ if ((n_allowed_subchs < n_subchs_for_req_bw) ||
+ ((node->n_caced_subchs + node->n_nol_subchs + n_excluded_subchs) ==
+ node->n_valid_subchs))
return false;
return true;
}
+#endif
+
+#ifdef CONFIG_CHAN_FREQ_API
+/* dfs_is_cac_needed_for_bst_node_for_freq() - For a requested bandwidth, find
+ * if the current preCAC BSTree
+ * node needs CAC.
+ * @dfs: Pointer to wlan_dfs struct.
+ * @node: Node to be checked.
+ * @req_bandwidth: bandwidth of channel requested.
+ *
+ * Return: TRUE/FALSE.
+ * Return true if there exists a channel of the requested bandwidth
+ * for the node which is not CAC done, else false.
+ */
+static bool
+dfs_is_cac_needed_for_bst_node_for_freq(struct wlan_dfs *dfs,
+ struct precac_tree_node *node,
+ uint8_t req_bandwidth)
+{
+ uint8_t n_subchs_for_req_bw, n_allowed_subchs, n_excluded_subchs;
+
+ if (!node)
+ return false;
+
+ /* Find the number of subchannels for the requested bandwidth */
+ n_excluded_subchs = dfs_get_num_cur_subchans_in_node_freq(dfs, node);
+ n_subchs_for_req_bw = N_SUBCHS_FOR_BANDWIDTH(req_bandwidth);
+ n_allowed_subchs = node->n_valid_subchs -
+ (node->n_nol_subchs + n_excluded_subchs);
+
+ /* Return false if,
+ * 1. Number of allowed subchannels (all subchannels other than
+ * current operating sub-channels and NOL sub-channels) in the
+ * current node is less than the requested number of subchannels.
+ * 3. If the number CAC done subchannels + NOL subchannels + current
+ * operating subchannels in the current node is equal to number of
+ * valid subchannels in the node.
+ * else, return true.
+ */
+ if ((n_allowed_subchs < n_subchs_for_req_bw) ||
+ ((node->n_caced_subchs + node->n_nol_subchs + n_excluded_subchs) ==
+ node->n_valid_subchs))
+ return false;
+
+ return true;
+}
+#endif
/* dfs_find_ieee_ch_from_precac_tree() - from the given preCAC tree, find a IEEE
* channel of the given bandwidth which
@@ -2914,19 +3102,22 @@
*/
#ifdef CONFIG_CHAN_NUM_API
static uint8_t
-dfs_find_ieee_ch_from_precac_tree(struct precac_tree_node *root,
+dfs_find_ieee_ch_from_precac_tree(struct wlan_dfs *dfs,
+ struct precac_tree_node *root,
uint8_t req_bw)
{
struct precac_tree_node *curr_node;
- if (!dfs_is_cac_needed_for_bst_node(root, req_bw))
+ if (!dfs_is_cac_needed_for_bst_node(dfs, root, req_bw))
return 0;
curr_node = root;
while (curr_node) {
if (curr_node->bandwidth == req_bw) {
/* find if current node in valid state (req.) */
- if (dfs_is_cac_needed_for_bst_node(curr_node, req_bw))
+ if (dfs_is_cac_needed_for_bst_node(dfs,
+ curr_node,
+ req_bw))
return curr_node->ch_ieee;
else
return 0;
@@ -2935,7 +3126,8 @@
/* Find if we need to go to left or right subtree.
* Note: If both are available, go to left.
*/
- if (!dfs_is_cac_needed_for_bst_node(curr_node->left_child,
+ if (!dfs_is_cac_needed_for_bst_node(dfs,
+ curr_node->left_child,
req_bw))
curr_node = curr_node->right_child;
else
@@ -2958,19 +3150,22 @@
*/
#ifdef CONFIG_CHAN_FREQ_API
static uint16_t
-dfs_find_ieee_ch_from_precac_tree_for_freq(struct precac_tree_node *root,
+dfs_find_ieee_ch_from_precac_tree_for_freq(struct wlan_dfs *dfs,
+ struct precac_tree_node *root,
uint8_t req_bw)
{
struct precac_tree_node *curr_node;
- if (!dfs_is_cac_needed_for_bst_node(root, req_bw))
+ if (!dfs_is_cac_needed_for_bst_node_for_freq(dfs, root, req_bw))
return 0;
curr_node = root;
while (curr_node) {
if (curr_node->bandwidth == req_bw) {
/* find if current node in valid state (req.) */
- if (dfs_is_cac_needed_for_bst_node(curr_node, req_bw))
+ if (dfs_is_cac_needed_for_bst_node_for_freq(dfs,
+ curr_node,
+ req_bw))
return curr_node->ch_freq;
else
return 0;
@@ -2979,8 +3174,10 @@
/* Find if we need to go to left or right subtree.
* Note: If both are available, go to left.
*/
- if (!dfs_is_cac_needed_for_bst_node(curr_node->left_child,
- req_bw))
+ if (!dfs_is_cac_needed_for_bst_node_for_freq(
+ dfs,
+ curr_node->left_child,
+ req_bw))
curr_node = curr_node->right_child;
else
curr_node = curr_node->left_child;
@@ -3011,13 +3208,11 @@
pe_list) {
root = precac_entry->tree_root;
ieee_chan =
- dfs_find_ieee_ch_from_precac_tree(root,
+ dfs_find_ieee_ch_from_precac_tree(dfs,
+ root,
bandwidth);
- if (ieee_chan &&
- (ieee_chan != exclude_pri_ch_ieee) &&
- (ieee_chan != exclude_sec_ch_ieee))
+ if (ieee_chan)
break;
- ieee_chan = 0;
}
}
PRECAC_LIST_UNLOCK(dfs);
@@ -3056,13 +3251,11 @@
pe_list) {
root = precac_entry->tree_root;
ieee_chan_freq =
- dfs_find_ieee_ch_from_precac_tree_for_freq(root,
+ dfs_find_ieee_ch_from_precac_tree_for_freq(dfs,
+ root,
bw);
- if (ieee_chan_freq &&
- (ieee_chan_freq != exclude_pri_ch_freq) &&
- (ieee_chan_freq != exclude_sec_ch_freq))
+ if (ieee_chan_freq)
break;
- ieee_chan_freq = 0;
}
}
PRECAC_LIST_UNLOCK(dfs);
diff --git a/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c b/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c
index c58567a..fe04d5d 100644
--- a/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c
+++ b/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c
@@ -263,50 +263,6 @@
wlan_util_change_map_index(send_array, wlan_vdev_get_id(vdev), 0);
}
-static void mlme_dispatch_mvr_req_fail(struct pdev_mlme_obj *pdev_mlme)
-{
- uint32_t vdev_id;
- uint32_t max_vdevs = 0;
- struct wlan_objmgr_pdev *pdev;
- struct wlan_objmgr_vdev *vdev;
-
- if (!pdev_mlme) {
- mlme_err("PDEV_MLME is NULL");
- return;
- }
-
- pdev = pdev_mlme->pdev;
- if (!pdev) {
- mlme_err("PDEV is NULL");
- return;
- }
-
- max_vdevs = wlan_psoc_get_max_vdev_count(wlan_pdev_get_psoc(pdev));
- for (vdev_id = 0; vdev_id < max_vdevs; vdev_id++) {
- if (!wlan_util_map_index_is_set(
- pdev_mlme->restart_send_vdev_bmap,
- vdev_id)) {
- continue;
- }
-
- vdev = wlan_objmgr_get_vdev_by_id_from_pdev(
- pdev, vdev_id, WLAN_MLME_NB_ID);
- if (!vdev) {
- mlme_err("objmgr vdev not found for vdev %u", vdev_id);
- continue;
- }
-
- mlme_err("Multivdev restart request failed vdev:%u", vdev_id);
- wlan_vdev_mlme_sm_deliver_evt(vdev,
- WLAN_VDEV_SM_EV_RESTART_REQ_FAIL,
- 0, NULL);
-
- wlan_util_change_map_index(pdev_mlme->restart_send_vdev_bmap,
- vdev_id, 0);
- wlan_objmgr_vdev_release_ref(vdev, WLAN_MLME_NB_ID);
- }
-}
-
static void mlme_restart_req_timer_start(struct pdev_mlme_obj *pdev_mlme)
{
qdf_timer_mod(&pdev_mlme->restart_req_timer, 100);
@@ -317,10 +273,9 @@
qdf_timer_stop(&pdev_mlme->restart_req_timer);
}
-static QDF_STATUS mlme_multivdev_restart(struct pdev_mlme_obj *pdev_mlme)
+static void mlme_multivdev_restart(struct pdev_mlme_obj *pdev_mlme)
{
struct wlan_objmgr_pdev *pdev;
- QDF_STATUS status = QDF_STATUS_SUCCESS;
pdev = pdev_mlme->pdev;
@@ -347,8 +302,7 @@
pdev_mlme->restart_send_vdev_bmap, 0,
WLAN_MLME_NB_ID);
else
- status = mlme_vdev_ops_multivdev_restart_fw_cmd_send(
- pdev);
+ mlme_vdev_ops_multivdev_restart_fw_cmd_send(pdev);
if (pdev_mlme->start_send_vdev_arr[0] ||
pdev_mlme->start_send_vdev_arr[1]) {
@@ -361,18 +315,16 @@
} else {
mlme_restart_req_timer_start(pdev_mlme);
}
-
- return status;
}
#define MULTIVDEV_RESTART_MAX_RETRY_CNT 100
-static void mlme_restart_req_timeout(void *arg)
+static os_timer_func(mlme_restart_req_timeout)
{
unsigned long restart_pend_vdev_bmap[2];
struct wlan_objmgr_pdev *pdev;
- struct pdev_mlme_obj *pdev_mlme = (struct pdev_mlme_obj *)arg;
- QDF_STATUS status = QDF_STATUS_SUCCESS;
+ struct pdev_mlme_obj *pdev_mlme;
+ OS_GET_TIMER_ARG(pdev_mlme, struct pdev_mlme_obj *);
pdev = pdev_mlme->pdev;
@@ -398,12 +350,9 @@
pdev_mlme->restart_pend_vdev_bmap[1])))
mlme_restart_req_timer_start(pdev_mlme);
else
- status = mlme_multivdev_restart(pdev_mlme);
+ mlme_multivdev_restart(pdev_mlme);
}
qdf_spin_unlock_bh(&pdev_mlme->vdev_restart_lock);
-
- if (status != QDF_STATUS_SUCCESS)
- mlme_dispatch_mvr_req_fail(pdev_mlme);
}
static QDF_STATUS mlme_vdev_restart_is_allowed(struct wlan_objmgr_pdev *pdev,
@@ -457,12 +406,22 @@
wlan_util_change_map_index(pdev_mlme->restart_send_vdev_bmap,
wlan_vdev_get_id(vdev), 1);
- /* On timer expiry, check any pending vdev has gone
- * down, then enable thats vdev bit, if pending vdev
- * is still in valid, then restart the timer
+ /* If all vdev id bits are enabled, start vdev restart for all
+ * vdevs, otherwise, start timer and return
*/
- mlme_restart_req_timer_start(pdev_mlme);
- status = QDF_STATUS_E_FAILURE;
+ if (!pdev_mlme->restart_pend_vdev_bmap[0] &&
+ !pdev_mlme->restart_pend_vdev_bmap[1]) {
+ mlme_restart_req_timer_stop(pdev_mlme);
+ mlme_multivdev_restart(pdev_mlme);
+ status = QDF_STATUS_E_FAILURE;
+ } else {
+ /* On timer expiry, check any pending vdev has gone
+ * down, then enable thats vdev bit, if pending vdev
+ * is still in valid, then restart the timer
+ */
+ mlme_restart_req_timer_start(pdev_mlme);
+ status = QDF_STATUS_E_FAILURE;
+ }
}
qdf_spin_unlock_bh(&pdev_mlme->vdev_restart_lock);