Merge "qca-wifi: Extract msdu end TLV information into structure"
diff --git a/configs/wlan_cfg/ap_wlan_cfg.config b/configs/wlan_cfg/ap_wlan_cfg.config
index b3751f5..3c2d0d3 100644
--- a/configs/wlan_cfg/ap_wlan_cfg.config
+++ b/configs/wlan_cfg/ap_wlan_cfg.config
@@ -22,9 +22,15 @@
EXTRA_CFLAGS += -DCONFIG_PROCESS_RX_STATUS=0
EXTRA_CFLAGS += -DCONFIG_PROCESS_TX_STATUS=0
EXTRA_CFLAGS += -DWLAN_CFG_MAC_PER_TARGET=3
-ifeq ($(strip ${QCA_WIFI_QCA8074_VP}),1)
+ifeq ($(strip ${CONFIG_WIFI_EMULATION_WIFI_3_0}),1)
EXTRA_CFLAGS += -DWLAN_CFG_NUM_TX_DESC=0x2000
else
EXTRA_CFLAGS += -DWLAN_CFG_NUM_TX_DESC=0x320000
endif
+ifdef NO_RX_PKT_HDR_TLV
+#RX_BUFFER_SIZE = 1536 data bytes + 256 RX TLV bytes. We are avoiding
+#128 bytes of RX_PKT_HEADER_TLV.
+EXTRA_CFLAGS += -DRX_DATA_BUFFER_SIZE=1792
+EXTRA_CFLAGS += -DRX_DATA_BUFFER_ALIGNMENT=0
+endif
diff --git a/dp/inc/dp_rate_stats.h b/dp/inc/dp_rate_stats.h
index 2ad7394..b840c57 100644
--- a/dp/inc/dp_rate_stats.h
+++ b/dp/inc/dp_rate_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -84,7 +84,6 @@
struct wlan_peer_rx_rate_stats rx;
uint8_t mac_addr[WLAN_MAC_ADDR_LEN];
uint64_t peer_cookie;
- struct cdp_pdev *pdev;
uint8_t pdev_id;
};
diff --git a/dp/wifi3.0/dp_rx_mon_feature.c b/dp/wifi3.0/dp_rx_mon_feature.c
index 903a003..121714e 100644
--- a/dp/wifi3.0/dp_rx_mon_feature.c
+++ b/dp/wifi3.0/dp_rx_mon_feature.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -28,6 +28,8 @@
#include "dp_internal.h"
#include "qdf_mem.h" /* qdf_mem_malloc,free */
#include "wlan_cfg.h"
+#include "dp_htt.h"
+#include "dp_mon_filter.h"
#ifdef WLAN_RX_PKT_CAPTURE_ENH
@@ -597,13 +599,13 @@
* Return: 0 for success. nonzero for failure.
*/
QDF_STATUS
-dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, uint32_t val)
+dp_config_enh_rx_capture(struct dp_pdev *pdev, uint32_t val)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
uint8_t rx_cap_mode = (val & CDP_RX_ENH_CAPTURE_MODE_MASK);
uint32_t rx_enh_capture_peer;
bool is_mpdu_hdr = false;
uint8_t user_id;
+ enum dp_mon_filter_action action = DP_MON_FILTER_SET;
rx_enh_capture_peer =
(val & CDP_RX_ENH_CAPTURE_PEER_MASK)
@@ -620,17 +622,34 @@
return QDF_STATUS_E_INVAL;
}
+ if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_DISABLED) &&
+ (rx_cap_mode == CDP_RX_ENH_CAPTURE_DISABLED)) {
+ dp_err("Rx capture is already disabled %d", rx_cap_mode);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /*
+ * Store the monitor vdev if present. The monitor vdev will be restored
+ * when the Rx enhance capture mode will be disabled.
+ */
if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_DISABLED &&
rx_cap_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
pdev->rx_enh_monitor_vdev = pdev->monitor_vdev;
}
- dp_reset_monitor_mode(pdev_handle);
+ /*
+ * Disable the monitor mode and re-enable it later if enhance capture
+ * gets enabled later.
+ */
+ dp_reset_monitor_mode((struct cdp_soc_t *)pdev->soc, pdev->pdev_id, 0);
if (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED &&
rx_cap_mode == CDP_RX_ENH_CAPTURE_DISABLED) {
pdev->monitor_vdev = pdev->rx_enh_monitor_vdev;
+ pdev->rx_enh_monitor_vdev = NULL;
+ action = DP_MON_FILTER_CLEAR;
}
+
pdev->rx_enh_capture_mode = rx_cap_mode;
pdev->rx_enh_capture_peer = rx_enh_capture_peer;
@@ -643,7 +662,31 @@
/* Use a bit from val to enable MSDU trailer for internal debug use */
pdev->is_rx_enh_capture_trailer_enabled =
(val & RX_ENH_CAPTURE_TRAILER_ENABLE_MASK) ? true : false;
- return dp_pdev_configure_monitor_rings(pdev);
+
+ /*
+ * Restore the monitor filters if previously monitor mode was enabled.
+ */
+ if (pdev->monitor_vdev) {
+ pdev->monitor_configured = true;
+ dp_mon_filter_setup_mon_mode(pdev);
+ }
+
+ /*
+ * Clear up the monitor mode filters if the monitor mode is enabled.
+ * Resotre the monitor mode filters once the Rx enhance capture is
+ * disabled.
+ */
+ if (action == DP_MON_FILTER_SET)
+ dp_mon_filter_setup_rx_enh_capture(pdev);
+ else
+ dp_mon_filter_reset_rx_enh_capture(pdev);
+
+ return dp_mon_filter_update(pdev);
}
+void
+dp_peer_set_rx_capture_enabled(struct dp_peer *peer, bool value)
+{
+ peer->rx_cap_enabled = value;
+}
#endif /* WLAN_RX_PKT_CAPTURE_ENH */
diff --git a/dp/wifi3.0/dp_rx_mon_feature.h b/dp/wifi3.0/dp_rx_mon_feature.h
index 0d27222..f21135a 100644
--- a/dp/wifi3.0/dp_rx_mon_feature.h
+++ b/dp/wifi3.0/dp_rx_mon_feature.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -66,12 +66,22 @@
/*
* dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
- * @pdev_handle: DP_PDEV handle
+ * @pdev: DP_PDEV handle
* @val: user provided value
*
* Return: 0 for success. nonzero for failure.
*/
QDF_STATUS
-dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, uint8_t val);
+dp_config_enh_rx_capture(struct dp_pdev *pdev, uint8_t val);
+
+/**
+ * dp_peer_set_rx_capture_enabled: Set rx_cap_enabled bit in peer
+ * @peer_handle: Peer handle
+ * @value: Enable/disable setting for rx_cap_enabled
+ *
+ * Return: None
+ */
+void
+dp_peer_set_rx_capture_enabled(struct dp_peer *peer, bool value);
#endif /* WLAN_RX_PKT_CAPTURE_ENH */
#endif /* _DP_RX_MON_FEATURE_H_ */
diff --git a/dp/wifi3.0/dp_rx_tag.c b/dp/wifi3.0/dp_rx_tag.c
index fbf2518..0b3f600 100644
--- a/dp/wifi3.0/dp_rx_tag.c
+++ b/dp/wifi3.0/dp_rx_tag.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -321,15 +321,14 @@
/**
* dp_summarize_tag_stats - sums up the given protocol type's counters
* across all the rings and dumps the same
- * @pdev_handle: cdp_pdev handle
+ * @pdev: dp_pdev handle
* @protocol_type: protocol type for which stats should be displayed
*
* Return: none
*/
-uint64_t dp_summarize_tag_stats(struct cdp_pdev *pdev_handle,
+uint64_t dp_summarize_tag_stats(struct dp_pdev *pdev,
uint16_t protocol_type)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
uint8_t ring_idx;
uint64_t total_tag_cnt = 0;
@@ -348,16 +347,23 @@
/**
* dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
* given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
- * @pdev_handle: cdp_pdev handle
+ * @soc: cdp_soc handle
+ * @pdev_id: id of cdp_pdev handle
* @protocol_type: protocol type for which stats should be displayed
*
* Return: none
*/
void
-dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
+dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
uint16_t protocol_type)
{
uint16_t proto_idx;
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
+
+ if (!pdev)
+ return;
if (protocol_type != RX_PROTOCOL_TAG_ALL &&
protocol_type >= RX_PROTOCOL_TAG_MAX) {
@@ -367,57 +373,76 @@
/* protocol_type in [0 ... RX_PROTOCOL_TAG_MAX] */
if (protocol_type != RX_PROTOCOL_TAG_ALL) {
- dp_summarize_tag_stats(pdev_handle, protocol_type);
+ dp_summarize_tag_stats(pdev, protocol_type);
return;
}
/* protocol_type == RX_PROTOCOL_TAG_ALL */
for (proto_idx = 0; proto_idx < RX_PROTOCOL_TAG_MAX; proto_idx++)
- dp_summarize_tag_stats(pdev_handle, proto_idx);
+ dp_summarize_tag_stats(pdev, proto_idx);
}
#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
#ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
-/**
- * dp_reset_pdev_rx_protocol_tag_stats - resets the stats counters for
- * given protocol type
- * @pdev_handle: cdp_pdev handle
- * @protocol_type: protocol type for which stats should be reset
- *
- * Return: none
- */
-void
-dp_reset_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
- uint16_t protocol_type)
+static void
+__dp_reset_pdev_rx_protocol_tag_stats(struct dp_pdev *pdev,
+ uint16_t protocol_type)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
uint8_t ring_idx;
for (ring_idx = 0; ring_idx < MAX_REO_DEST_RINGS; ring_idx++)
pdev->reo_proto_tag_stats[ring_idx][protocol_type].tag_ctr = 0;
pdev->rx_err_proto_tag_stats[protocol_type].tag_ctr = 0;
}
+
+/**
+ * dp_reset_pdev_rx_protocol_tag_stats - resets the stats counters for
+ * given protocol type
+ * @soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
+ * @protocol_type: protocol type for which stats should be reset
+ *
+ * Return: none
+ */
+static void
+dp_reset_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
+ uint16_t protocol_type)
+{
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
+ if (!pdev)
+ return;
+
+ __dp_reset_pdev_rx_protocol_tag_stats(pdev, protocol_type);
+}
#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
/**
* dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
* applied to the desired protocol type packets
- * @txrx_pdev_handle: cdp_pdev handle
+ * @soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
* @enable_rx_protocol_tag - bitmask that indicates what protocol types
* are enabled for tagging. zero indicates disable feature, non-zero indicates
* enable feature
* @protocol_type: new protocol type for which the tag is being added
* @tag: user configured tag for the new protocol
*
- * Return: QDF_STATUS
+ * Return: Success
*/
QDF_STATUS
-dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
+dp_update_pdev_rx_protocol_tag(struct cdp_soc_t *soc, uint8_t pdev_id,
uint32_t enable_rx_protocol_tag,
uint16_t protocol_type,
uint16_t tag)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
+ if (!pdev)
+ return QDF_STATUS_E_FAILURE;
+
/*
* dynamically enable/disable tagging based on enable_rx_protocol_tag
* flag.
@@ -434,7 +459,7 @@
}
/** Reset stats counter across all rings for given protocol */
- dp_reset_pdev_rx_protocol_tag_stats(pdev_handle, protocol_type);
+ __dp_reset_pdev_rx_protocol_tag_stats(pdev, protocol_type);
pdev->rx_proto_tag_map[protocol_type].tag = tag;
@@ -445,17 +470,25 @@
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
/**
* dp_set_rx_flow_tag - add/delete a flow
- * @pdev_handle: cdp_pdev handle
+ * @soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
* @flow_info: flow tuple that is to be added to/deleted from flow search table
*
- * Return: 0 for success, nonzero for failure
+ * Return: Success
*/
QDF_STATUS
-dp_set_rx_flow_tag(struct cdp_pdev *pdev_handle,
+dp_set_rx_flow_tag(struct cdp_soc_t *soc, uint8_t pdev_id,
struct cdp_rx_flow_info *flow_info)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
- struct wlan_cfg_dp_soc_ctxt *cfg = pdev->soc->wlan_cfg_ctx;
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
+ struct wlan_cfg_dp_soc_ctxt *cfg;
+
+ if (qdf_unlikely(!pdev))
+ return QDF_STATUS_E_FAILURE;
+
+ cfg = pdev->soc->wlan_cfg_ctx;
if (qdf_unlikely(!wlan_cfg_is_rx_flow_tag_enabled(cfg))) {
dp_err("RX Flow tag feature disabled");
@@ -473,20 +506,27 @@
/**
* dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
* given flow 5-tuple
- * @pdev_handle: cdp_pdev handle
+ * @cdp_soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
* @flow_info: flow 5-tuple for which stats should be displayed
*
- * Return: 0 for success, nonzero for failure
+ * Return: Success
*/
QDF_STATUS
-dp_dump_rx_flow_tag_stats(struct cdp_pdev *pdev_handle,
+dp_dump_rx_flow_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
struct cdp_rx_flow_info *flow_info)
{
QDF_STATUS status;
struct cdp_flow_stats stats;
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
- struct wlan_cfg_dp_soc_ctxt *cfg = pdev->soc->wlan_cfg_ctx;
+ struct wlan_cfg_dp_soc_ctxt *cfg;
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
+ if (!pdev)
+ return QDF_STATUS_E_FAILURE;
+
+ cfg = pdev->soc->wlan_cfg_ctx;
if (qdf_unlikely(!wlan_cfg_is_rx_flow_tag_enabled(cfg))) {
dp_err("RX Flow tag feature disabled");
return QDF_STATUS_E_NOSUPPORT;
diff --git a/dp/wifi3.0/dp_rx_tag.h b/dp/wifi3.0/dp_rx_tag.h
index 7de52fb..a3b79e8 100644
--- a/dp/wifi3.0/dp_rx_tag.h
+++ b/dp/wifi3.0/dp_rx_tag.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -26,17 +26,18 @@
/**
* dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
* applied to the desired protocol type packets
- * @txrx_pdev_handle: cdp_pdev handle
+ * @soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
* @enable_rx_protocol_tag - bitmask that indicates what protocol types
* are enabled for tagging. zero indicates disable feature, non-zero indicates
* enable feature
* @protocol_type: new protocol type for which the tag is being added
* @tag: user configured tag for the new protocol
*
- * Return: QDF_STATUS
+ * Return: Success
*/
QDF_STATUS
-dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
+dp_update_pdev_rx_protocol_tag(struct cdp_soc_t *soc, uint8_t pdev_id,
uint32_t enable_rx_protocol_tag,
uint16_t protocol_type,
uint16_t tag);
@@ -69,7 +70,7 @@
* Return: none
*/
void
-dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
+dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
uint16_t protocol_type);
#endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
@@ -92,25 +93,27 @@
/**
* dp_set_rx_flow_tag - add/delete a flow
- * @pdev_handle: cdp_pdev handle
+ * @soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
* @flow_info: flow tuple that is to be added to/deleted from flow search table
*
- * Return: 0 for success, nonzero for failure
+ * Return: Success
*/
QDF_STATUS
-dp_set_rx_flow_tag(struct cdp_pdev *pdev_handle,
+dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
struct cdp_rx_flow_info *flow_info);
/**
* dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
* given flow 5-tuple
- * @pdev_handle: cdp_pdev handle
+ * @cdp_soc: soc handle
+ * @pdev_id: id of cdp_pdev handle
* @flow_info: flow 5-tuple for which stats should be displayed
*
- * Return: 0 for success, nonzero for failure
+ * Return: Success
*/
QDF_STATUS
-dp_dump_rx_flow_tag_stats(struct cdp_pdev *pdev_handle,
+dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
struct cdp_rx_flow_info *flow_info);
/**
diff --git a/dp/wifi3.0/dp_tx_capture.c b/dp/wifi3.0/dp_tx_capture.c
index 53f6864..8d2ac96 100644
--- a/dp/wifi3.0/dp_tx_capture.c
+++ b/dp/wifi3.0/dp_tx_capture.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -43,6 +43,9 @@
#define DP_IEEE80211_BA_S_SEQ_S 4
#define DP_IEEE80211_BAR_CTL_COMBA 0x0004
+#define INVALID_PPDU_ID 0xFFFF
+#define MAX_END_TSF 0xFFFFFFFF
+
/* Macros to handle sequence number bitmaps */
/* HW generated rts frame flag */
@@ -119,12 +122,22 @@
ptr_tx_cap = &(pdev->tx_capture);
- DP_PRINT_STATS("tx capture stats\n");
+ DP_PRINT_STATS("tx capture stats:");
+ DP_PRINT_STATS(" mgmt control enqueue stats:");
for (i = 0; i < TXCAP_MAX_TYPE; i++) {
for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
if (ptr_tx_cap->ctl_mgmt_q[i][j].qlen)
- DP_PRINT_STATS(" ctl_mgmt_q[%d][%d] = queue_len[%d]\n",
- i, j, ptr_tx_cap->ctl_mgmt_q[i][j].qlen);
+ DP_PRINT_STATS(" ctl_mgmt_q[%d][%d] = queue_len[%d]",
+ i, j, ptr_tx_cap->ctl_mgmt_q[i][j].qlen);
+ }
+ }
+ DP_PRINT_STATS(" mgmt control retry queue stats:");
+ for (i = 0; i < TXCAP_MAX_TYPE; i++) {
+ for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
+ if (ptr_tx_cap->ctl_mgmt_q[i][j].qlen)
+ DP_PRINT_STATS(" retries_ctl_mgmt_q[%d][%d] = queue_len[%d]",
+ i, j,
+ ptr_tx_cap->retries_ctl_mgmt_q[i][j].qlen);
}
}
@@ -180,6 +193,23 @@
}
}
+static
+void dp_peer_tx_cap_tid_queue_flush(struct dp_peer *peer)
+{
+ int tid;
+ struct dp_tx_tid *tx_tid;
+
+ for (tid = 0; tid < DP_MAX_TIDS; tid++) {
+ tx_tid = &peer->tx_capture.tx_tid[tid];
+
+ qdf_spin_lock_bh(&tx_tid->tid_lock);
+ qdf_nbuf_queue_free(&tx_tid->msdu_comp_q);
+ qdf_spin_unlock_bh(&tx_tid->tid_lock);
+
+ tx_tid->max_ppdu_id = 0;
+ }
+}
+
/*
* dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
* @peer: Datapath peer
@@ -262,7 +292,8 @@
subtype = (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
IEEE80211_FC0_SUBTYPE_SHIFT;
- if (!ptr_mgmt_hdr->ppdu_id || !ptr_mgmt_hdr->tx_tsf) {
+ if (!ptr_mgmt_hdr->ppdu_id || !ptr_mgmt_hdr->tx_tsf ||
+ (!type && !subtype)) {
/*
* if either ppdu_id and tx_tsf are zero then
* storing the payload won't be useful
@@ -468,7 +499,7 @@
{
int ret = QDF_STATUS_E_FAILURE;
- if ((desc->pdev->tx_capture_enabled != CDP_TX_ENH_CAPTURE_DISABLED) &&
+ if (peer && dp_peer_or_pdev_tx_cap_enabled(desc->pdev, peer) &&
((ts->status == HAL_TX_TQM_RR_FRAME_ACKED) ||
(ts->status == HAL_TX_TQM_RR_REM_CMD_TX) ||
((ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) && ts->transmit_cnt))) {
@@ -681,9 +712,8 @@
* Return: QDF_STATUS
*/
QDF_STATUS
-dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
+dp_config_enh_tx_capture(struct dp_pdev *pdev, uint8_t val)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
int i, j;
qdf_spin_lock(&pdev->tx_capture.config_lock);
@@ -1083,6 +1113,13 @@
if (qdf_unlikely(!peer))
return 0;
+ /* Non-QOS frames are being indicated with TID 0
+ * in WBM completion path, an hence we should
+ * TID 0 to reap MSDUs from completion path
+ */
+ if (qdf_unlikely(tid == DP_NON_QOS_TID))
+ tid = 0;
+
tx_tid = &peer->tx_capture.tx_tid[tid];
if (qdf_unlikely(!tx_tid))
@@ -1454,19 +1491,24 @@
mpdu_info->mac_address,
QDF_MAC_ADDR_SIZE);
- peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
- if (peer) {
- struct dp_vdev *vdev = NULL;
- vdev = peer->vdev;
- if (vdev)
- qdf_mem_copy(wh_min->i_addr2,
- vdev->mac_addr.raw,
- QDF_MAC_ADDR_SIZE);
- dp_peer_unref_del_find_by_id(peer);
+ if (subtype == IEEE80211_FC0_SUBTYPE_ACK)
+ qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf,
+ sizeof(struct ieee80211_frame_min_one));
+ else {
+ peer = dp_peer_find_by_id(pdev->soc, user->peer_id);
+ if (peer) {
+ struct dp_vdev *vdev = NULL;
+
+ vdev = peer->vdev;
+ if (vdev)
+ qdf_mem_copy(wh_min->i_addr2,
+ vdev->mac_addr.raw,
+ QDF_MAC_ADDR_SIZE);
+ dp_peer_unref_del_find_by_id(peer);
+ }
+ qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf, sizeof(*wh_min));
}
- qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf,
- sizeof(*wh_min));
QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
QDF_TRACE_LEVEL_DEBUG,
"HTT_FTYPE[%d] frm(0x%08x): fc %x %x, dur 0x%x%x\n",
@@ -1567,6 +1609,47 @@
}
}
+static void dp_gen_ack_rx_frame(struct dp_pdev *pdev,
+ struct cdp_tx_indication_info *tx_capture_info)
+{
+ struct cdp_tx_completion_ppdu *ppdu_desc;
+ struct dp_peer *peer;
+ struct dp_pdev_tx_capture *ptr_tx_cap;
+
+ ptr_tx_cap = &pdev->tx_capture;
+ ppdu_desc = &ptr_tx_cap->dummy_ppdu_desc;
+ ppdu_desc->channel = tx_capture_info->ppdu_desc->channel;
+ ppdu_desc->num_mpdu = 1;
+ ppdu_desc->num_msdu = 1;
+ ppdu_desc->user[0].ppdu_type = HTT_PPDU_STATS_PPDU_TYPE_SU;
+ ppdu_desc->bar_num_users = 0;
+ ppdu_desc->num_users = 1;
+
+ ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
+ ppdu_desc->frame_ctrl = (IEEE80211_FC0_SUBTYPE_ACK |
+ IEEE80211_FC0_TYPE_CTL);
+ ppdu_desc->ppdu_start_timestamp =
+ tx_capture_info->ppdu_desc->ppdu_start_timestamp;
+ ppdu_desc->ppdu_end_timestamp =
+ tx_capture_info->ppdu_desc->ppdu_end_timestamp;
+ ppdu_desc->user[0].peer_id =
+ tx_capture_info->ppdu_desc->user[0].peer_id;
+ peer = dp_peer_find_by_id(pdev->soc,
+ tx_capture_info->ppdu_desc->user[0].peer_id);
+ if (peer) {
+ struct dp_vdev *vdev = NULL;
+
+ vdev = peer->vdev;
+ if (vdev)
+ qdf_mem_copy(&ppdu_desc->user[0].mac_addr,
+ vdev->mac_addr.raw,
+ QDF_MAC_ADDR_SIZE);
+ dp_peer_unref_del_find_by_id(peer);
+ }
+
+ dp_send_dummy_mpdu_info_to_stack(pdev, ppdu_desc);
+}
+
/**
* dp_send_data_to_stack(): Function to deliver mpdu info to stack
* to upper layer
@@ -1646,6 +1729,9 @@
if (tx_capture_info.mpdu_nbuf)
qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
}
+
+ if (ppdu_desc->resp_type == HTT_PPDU_STATS_ACK_EXPECTED_E)
+ dp_gen_ack_rx_frame(pdev, &tx_capture_info);
}
static void
@@ -1825,6 +1911,7 @@
qdf_assert_always(0);
return;
}
+
for (i = 0; (i < ppdu_desc->user[0].ba_size) && cur_ppdu_desc;
i++) {
if (!(i & (SEQ_SEG_SZ_BITS(
@@ -1849,6 +1936,7 @@
if (!(SEQ_SEG_BIT(failed_seq, i)))
continue;
failed_seq ^= SEQ_SEG_MSK(failed_seq, i);
+
mpdu_nbuf = cur_ppdu_desc->mpdus[cur_index];
if (mpdu_nbuf) {
QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
@@ -1865,7 +1953,17 @@
i);
ppdu_desc->pending_retries--;
}
+
cur_index++;
+ if (cur_index >= cur_ppdu_desc->user[0].ba_size) {
+ QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
+ QDF_TRACE_LEVEL_INFO,
+ "%s: ba_size[%d] cur_index[%d]\n",
+ __func__,
+ cur_ppdu_desc->user[0].ba_size,
+ cur_index);
+ break;
+ }
/* Skip through empty slots in current PPDU */
while (!(SEQ_BIT(cur_ppdu_desc->user[0].enq_bitmap,
cur_index))) {
@@ -2100,6 +2198,16 @@
subtype = 0;
}
+ switch (ppdu_desc->htt_frame_type) {
+ case HTT_STATS_FTYPE_TIDQ_DATA_SU:
+ case HTT_STATS_FTYPE_TIDQ_DATA_MU:
+ is_sgen_pkt = false;
+ break;
+ default:
+ is_sgen_pkt = true;
+ break;
+ }
+
retries_q = &pdev->tx_capture.retries_ctl_mgmt_q[type][subtype];
get_mgmt_pkt_from_queue:
qdf_spin_lock_bh(
@@ -2158,6 +2266,11 @@
}
}
+ /*
+ * only for the packets send over the air are handled
+ * packets drop by firmware is not handled in this
+ * feature
+ */
if (ppdu_desc->user[0].completion_status ==
HTT_PPDU_STATS_USER_STATUS_FILTERED) {
qdf_nbuf_free(nbuf_ppdu_desc);
@@ -2188,6 +2301,11 @@
uint16_t frame_ctrl_le;
struct ieee80211_frame *wh;
+ /*
+ * only for the packets send over the air are handled
+ * packets drop by firmware is not handled in this
+ * feature
+ */
if (ppdu_desc->user[0].completion_status ==
HTT_PPDU_STATS_USER_STATUS_FILTERED) {
qdf_nbuf_free(nbuf_ppdu_desc);
@@ -2233,6 +2351,7 @@
wh->i_fc[1] = (frame_ctrl_le & 0xFF00) >> 8;
wh->i_fc[0] = (frame_ctrl_le & 0xFF);
+ tx_capture_info.ppdu_desc = tmp_ppdu_desc;
/*
* send MPDU to osif layer
*/
@@ -2265,6 +2384,7 @@
wh->i_fc[1] = (frame_ctrl_le & 0xFF00) >> 8;
wh->i_fc[0] = (frame_ctrl_le & 0xFF);
+ tx_capture_info.ppdu_desc = ppdu_desc;
/*
* send MPDU to osif layer
*/
@@ -2272,10 +2392,32 @@
&tx_capture_info,
mgmt_ctl_nbuf, true);
}
+ } else if (!is_sgen_pkt) {
+ /*
+ * only for the packets send over the air are handled
+ * packets drop by firmware is not handled in this
+ * feature
+ */
+ if (ppdu_desc->user[0].completion_status ==
+ HTT_PPDU_STATS_USER_STATUS_FILTERED) {
+ qdf_nbuf_free(nbuf_ppdu_desc);
+ status = 0;
+ goto free_ppdu_desc;
+ }
+ /*
+ * add the ppdu_desc into retry queue
+ */
+ qdf_nbuf_queue_add(retries_q, nbuf_ppdu_desc);
+ status = 0;
} else if ((ppdu_desc->frame_ctrl &
IEEE80211_FC0_TYPE_MASK) ==
IEEE80211_FC0_TYPE_CTL) {
+ /*
+ * only for the packets send over the air are handled
+ * packets drop by firmware is not handled in this
+ * feature
+ */
if (ppdu_desc->user[0].completion_status ==
HTT_PPDU_STATS_USER_STATUS_FILTERED) {
qdf_nbuf_free(nbuf_ppdu_desc);
@@ -2299,6 +2441,65 @@
}
/**
+ * dp_peer_tx_cap_tid_queue_flush_tlv(): Function to dequeue peer queue
+ * @pdev: DP pdev handle
+ * @peer; DP peer handle
+ * @ppdu_desc: ppdu_desc
+ *
+ * return: void
+ */
+static void
+dp_peer_tx_cap_tid_queue_flush_tlv(struct dp_pdev *pdev,
+ struct dp_peer *peer,
+ struct cdp_tx_completion_ppdu *ppdu_desc)
+{
+ int tid;
+ struct dp_tx_tid *tx_tid;
+ qdf_nbuf_queue_t head_xretries;
+ qdf_nbuf_queue_t head_msdu;
+ uint32_t qlen = 0;
+ uint32_t qlen_curr = 0;
+
+ tid = ppdu_desc->user[0].tid;
+ tx_tid = &peer->tx_capture.tx_tid[tid];
+
+ qdf_nbuf_queue_init(&head_msdu);
+ qdf_nbuf_queue_init(&head_xretries);
+
+ qlen = qdf_nbuf_queue_len(&tx_tid->msdu_comp_q);
+
+ dp_tx_msdu_dequeue(peer, INVALID_PPDU_ID,
+ tid, ppdu_desc->num_msdu,
+ &head_msdu,
+ &head_xretries,
+ 0, MAX_END_TSF);
+
+ if (!qdf_nbuf_is_queue_empty(&head_xretries)) {
+ struct cdp_tx_completion_ppdu *xretry_ppdu =
+ &tx_tid->xretry_ppdu;
+
+ xretry_ppdu->ppdu_id = peer->tx_capture.tx_wifi_ppdu_id;
+
+ /* Restitch MPDUs from xretry MSDUs */
+ dp_tx_mon_restitch_mpdu(pdev, peer,
+ xretry_ppdu,
+ &head_xretries,
+ &xretry_ppdu->mpdu_q);
+ }
+ qdf_nbuf_queue_free(&head_msdu);
+ qdf_nbuf_queue_free(&head_xretries);
+ qlen_curr = qdf_nbuf_queue_len(&tx_tid->msdu_comp_q);
+
+ dp_tx_mon_proc_xretries(pdev, peer, tid);
+
+ QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
+ QDF_TRACE_LEVEL_INFO_MED,
+ "peer_id [%d 0x%x] tid[%d] qlen[%d -> %d]",
+ ppdu_desc->user[0].peer_id, peer, tid, qlen, qlen_curr);
+
+}
+
+/**
* dp_tx_ppdu_stats_flush(): Function to flush pending retried ppdu desc
* @pdev: DP pdev handle
* @nbuf: ppdu_desc
@@ -2317,11 +2518,7 @@
if (!peer)
return;
- /*
- * for all drop reason we are invoking
- * proc xretries
- */
- dp_tx_mon_proc_xretries(pdev, peer, ppdu_desc->user[0].tid);
+ dp_peer_tx_cap_tid_queue_flush_tlv(pdev, peer, ppdu_desc);
dp_peer_unref_del_find_by_id(peer);
return;
@@ -2534,6 +2731,7 @@
nbuf_ppdu_desc_list[i] = NULL;
qdf_nbuf_queue_free(&cur_ppdu_desc->mpdu_q);
qdf_mem_free(cur_ppdu_desc->mpdus);
+ cur_ppdu_desc->mpdus = NULL;
qdf_nbuf_free(tmp_nbuf);
continue;
}
@@ -2783,16 +2981,7 @@
continue;
}
- /* Non-QOS frames are being indicated with TID 0
- * in WBM completion path, an hence we should
- * TID 0 to reap MSDUs from completion path
- */
- if (qdf_unlikely(ppdu_desc->user[0].tid ==
- DP_NON_QOS_TID))
- tid = 0;
- else
- tid = ppdu_desc->user[0].tid;
-
+ tid = ppdu_desc->user[0].tid;
dequeue_msdu_again:
num_msdu = ppdu_desc->user[0].num_msdu;
start_tsf = ppdu_desc->ppdu_start_timestamp;
@@ -2806,7 +2995,8 @@
*/
ret = dp_tx_msdu_dequeue(peer,
ppdu_desc->ppdu_id,
- tid, num_msdu,
+ ppdu_desc->user[0].tid,
+ num_msdu,
&head_msdu,
&head_xretries,
start_tsf, end_tsf);
@@ -2965,6 +3155,8 @@
mpdu_info->preamble = DOT11_B;
else
mpdu_info->preamble = DOT11_A;
+
+ mpdu_info->mcs = CDP_LEGACY_MCS3;
}
static void dp_gen_ack_frame(struct hal_rx_ppdu_info *ppdu_info,
@@ -2996,13 +3188,36 @@
static void dp_gen_block_ack_frame(
struct mon_rx_user_status *rx_user_status,
+ struct mon_rx_user_info *rx_user_info,
struct dp_peer *peer,
qdf_nbuf_t mpdu_nbuf)
{
struct dp_vdev *vdev = NULL;
+ uint32_t tid;
+ struct dp_tx_tid *tx_tid;
struct ieee80211_ctlframe_addr2 *wh_addr2;
uint8_t *frm;
+ tid = rx_user_status->tid;
+ tx_tid = &peer->tx_capture.tx_tid[tid];
+ if (!rx_user_info->bar_frame) {
+ tx_tid->first_data_seq_ctrl =
+ rx_user_status->first_data_seq_ctrl;
+ tx_tid->mpdu_cnt = rx_user_status->mpdu_cnt_fcs_ok +
+ rx_user_status->mpdu_cnt_fcs_err;
+ if (tx_tid->mpdu_cnt > DP_MAX_MPDU_64)
+ qdf_mem_copy(tx_tid->mpdu_fcs_ok_bitmap,
+ rx_user_status->mpdu_fcs_ok_bitmap,
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * sizeof(
+ rx_user_status->mpdu_fcs_ok_bitmap[0]));
+ else
+ qdf_mem_copy(tx_tid->mpdu_fcs_ok_bitmap,
+ rx_user_status->mpdu_fcs_ok_bitmap,
+ DP_NUM_WORDS_PER_PPDU_BITMAP_64 * sizeof(
+ rx_user_status->mpdu_fcs_ok_bitmap[0]));
+ }
+
+
wh_addr2 = (struct ieee80211_ctlframe_addr2 *)
qdf_nbuf_data(mpdu_nbuf);
@@ -3029,19 +3244,17 @@
DP_IEEE80211_BAR_CTL_COMBA);
frm += 2;
*((uint16_t *)frm) =
- rx_user_status->first_data_seq_ctrl;
+ tx_tid->first_data_seq_ctrl;
frm += 2;
- if ((rx_user_status->mpdu_cnt_fcs_ok +
- rx_user_status->mpdu_cnt_fcs_err)
- > DP_MAX_MPDU_64) {
+ if (tx_tid->mpdu_cnt > DP_MAX_MPDU_64) {
qdf_mem_copy(frm,
- rx_user_status->mpdu_fcs_ok_bitmap,
+ tx_tid->mpdu_fcs_ok_bitmap,
HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
frm += DP_NUM_BYTES_PER_PPDU_BITMAP;
} else {
qdf_mem_copy(frm,
- rx_user_status->mpdu_fcs_ok_bitmap,
+ tx_tid->mpdu_fcs_ok_bitmap,
DP_NUM_WORDS_PER_PPDU_BITMAP_64 *
sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
frm += DP_NUM_BYTES_PER_PPDU_BITMAP_64;
@@ -3069,8 +3282,10 @@
uint32_t peer_id;
struct mon_rx_status *rx_status;
struct mon_rx_user_status *rx_user_status;
+ struct mon_rx_user_info *rx_user_info;
uint32_t ast_index;
uint32_t i;
+ bool bar_frame;
rx_status = &ppdu_info->rx_status;
@@ -3081,11 +3296,30 @@
HAL_MPDU_SW_FRAME_GROUP_MGMT_BEACON)
return QDF_STATUS_SUCCESS;
+ if (ppdu_info->sw_frame_group_id == HAL_MPDU_SW_FRAME_GROUP_MGMT_PROBE_REQ &&
+ (ppdu_info->rx_info.mac_addr1[0] & 1)) {
+ return QDF_STATUS_SUCCESS;
+ }
+
+ if (ppdu_info->sw_frame_group_id == HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR)
+ bar_frame = true;
+ else
+ bar_frame = false;
+
for (i = 0; i < ppdu_info->com_info.num_users; i++) {
if (i > OFDMA_NUM_USERS)
return QDF_STATUS_E_FAULT;
rx_user_status = &ppdu_info->rx_user_status[i];
+ rx_user_info = &ppdu_info->rx_user_info[i];
+
+ rx_user_info->bar_frame = bar_frame;
+
+ if (rx_user_info->qos_control_info_valid &&
+ ((rx_user_info->qos_control &
+ IEEE80211_QOS_ACKPOLICY) >> IEEE80211_QOS_ACKPOLICY_S)
+ == IEEE80211_BAR_CTL_NOACK)
+ continue;
ast_index = rx_user_status->ast_index;
if (ast_index >=
@@ -3148,9 +3382,11 @@
return QDF_STATUS_E_NOMEM;
}
- if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
- dp_gen_block_ack_frame(
- rx_user_status, peer,
+ if (peer->rx_tid[rx_user_status->tid].ba_status ==
+ DP_RX_BA_ACTIVE) {
+ dp_gen_block_ack_frame(rx_user_status,
+ rx_user_info,
+ peer,
tx_capture_info.mpdu_nbuf);
tx_capture_info.mpdu_info.tid = rx_user_status->tid;
@@ -3167,4 +3403,19 @@
return QDF_STATUS_SUCCESS;
}
+
+/**
+ * dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
+ * @peer: Peer handle
+ * @value: Enable/disable setting for tx_cap_enabled
+ *
+ * Return: None
+ */
+void
+dp_peer_set_tx_capture_enabled(struct dp_peer *peer, bool value)
+{
+ peer->tx_cap_enabled = value;
+ if (!value)
+ dp_peer_tx_cap_tid_queue_flush(peer);
+}
#endif
diff --git a/dp/wifi3.0/dp_tx_capture.h b/dp/wifi3.0/dp_tx_capture.h
index 637c318..6f85eaf 100644
--- a/dp/wifi3.0/dp_tx_capture.h
+++ b/dp/wifi3.0/dp_tx_capture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -73,6 +73,9 @@
qdf_nbuf_queue_t msdu_comp_q;
qdf_nbuf_queue_t pending_ppdu_q;
struct cdp_tx_completion_ppdu xretry_ppdu;
+ uint16_t first_data_seq_ctrl;
+ uint32_t mpdu_cnt;
+ uint32_t mpdu_fcs_ok_bitmap[QDF_MON_STATUS_MPDU_FCS_BMAP_NWORDS];
};
struct dp_peer_tx_capture {
@@ -179,7 +182,7 @@
* Return: QDF_STATUS
*/
QDF_STATUS
-dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val);
+dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val);
/*
* dp_deliver_mgmt_frm: Process
@@ -255,5 +258,15 @@
QDF_STATUS dp_send_ack_frame_to_stack(struct dp_soc *soc,
struct dp_pdev *pdev,
struct hal_rx_ppdu_info *ppdu_info);
+
+/**
+ * dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
+ * @peer: Peer handle
+ * @value: Enable/disable setting for tx_cap_enabled
+ *
+ * Return: None
+ */
+void
+dp_peer_set_tx_capture_enabled(struct dp_peer *peer, bool value);
#endif
#endif
diff --git a/dp/wifi3.0/dp_txrx_me.c b/dp/wifi3.0/dp_txrx_me.c
index 8587ae3..b6d8b1d 100644
--- a/dp/wifi3.0/dp_txrx_me.c
+++ b/dp/wifi3.0/dp_txrx_me.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -18,6 +18,7 @@
#include "hal_hw_headers.h"
#include "dp_types.h"
+#include "dp_peer.h"
#include "qdf_nbuf.h"
#include "qdf_atomic.h"
#include "qdf_types.h"
@@ -96,14 +97,19 @@
/**
* dp_tx_me_alloc_descriptor():Allocate ME descriptor
- * @pdev_handle: DP PDEV handle
+ * @soc: DP SOC handle
+ * @pdev_id: id of DP PDEV handle
*
* Return:void
*/
-void
-dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev_handle)
+void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
+
+ if (!pdev)
+ return;
if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
dp_tx_me_init(pdev);
@@ -162,20 +168,27 @@
/**
* dp_tx_me_free_descriptor():free ME descriptor
- * @pdev_handle:DP_PDEV handle
+ * @soc: DP SOC handle
+ * @pdev_id: id of DP PDEV handle
*
* Return:void
*/
void
-dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle)
+dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id)
{
- struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
+ struct dp_pdev *pdev =
+ dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
+ pdev_id);
- qdf_atomic_dec(&pdev->mc_num_vap_attached);
- if (atomic_read(&pdev->mc_num_vap_attached) == 0) {
- dp_tx_me_exit(pdev);
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
- "Disable MCAST_TO_UCAST");
+ if (!pdev)
+ return;
+
+ if (atomic_read(&pdev->mc_num_vap_attached)) {
+ if (qdf_atomic_dec_and_test(&pdev->mc_num_vap_attached)) {
+ dp_tx_me_exit(pdev);
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+ "Disable MCAST_TO_UCAST");
+ }
}
}
@@ -189,10 +202,10 @@
QDF_STATUS
dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
- if (vdev->me_convert) {
- if (vdev->me_convert(vdev->osif_vdev, nbuf) > 0)
- return QDF_STATUS_SUCCESS;
- }
+ if (dp_me_mcast_convert((struct cdp_soc_t *)(vdev->pdev->soc),
+ vdev->vdev_id, vdev->pdev->pdev_id,
+ nbuf) > 0)
+ return QDF_STATUS_SUCCESS;
return QDF_STATUS_E_FAILURE;
}
@@ -231,7 +244,8 @@
/**
* dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
- * @vdev: DP VDEV handle
+ * @soc: Datapath soc handle
+ * @vdev_id: vdev id
* @nbuf: Multicast nbuf
* @newmac: Table of the clients to which packets have to be sent
* @new_mac_cnt: No of clients
@@ -239,11 +253,12 @@
* return: no of converted packets
*/
uint16_t
-dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
- uint8_t newmac[][QDF_MAC_ADDR_SIZE], uint8_t new_mac_cnt)
+dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
+ qdf_nbuf_t nbuf,
+ uint8_t newmac[][QDF_MAC_ADDR_SIZE],
+ uint8_t new_mac_cnt)
{
- struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
- struct dp_pdev *pdev = vdev->pdev;
+ struct dp_pdev *pdev;
qdf_ether_header_t *eh;
uint8_t *data;
uint16_t len;
@@ -265,6 +280,31 @@
qdf_dma_addr_t paddr_mcbuf = 0;
uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
QDF_STATUS status;
+ struct dp_vdev *vdev =
+ dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
+ vdev_id);
+
+ if (!vdev) {
+ qdf_nbuf_free(nbuf);
+ return 1;
+ }
+
+ pdev = vdev->pdev;
+
+ if (!pdev) {
+ qdf_nbuf_free(nbuf);
+ return 1;
+ }
+
+ vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
+ vdev_id);
+ if (!vdev)
+ return 1;
+
+ pdev = vdev->pdev;
+
+ if (!pdev)
+ return 1;
qdf_mem_zero(&msdu_info, sizeof(msdu_info));
diff --git a/dp/wifi3.0/dp_txrx_me.h b/dp/wifi3.0/dp_txrx_me.h
index 9c7601e..a716bdc 100644
--- a/dp/wifi3.0/dp_txrx_me.h
+++ b/dp/wifi3.0/dp_txrx_me.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -17,14 +17,19 @@
*/
#ifndef _DP_TXRX_ME_H_
#define _DP_TXRX_ME_H_
-uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle,
- qdf_nbuf_t nbuf,
- uint8_t newmac[][QDF_MAC_ADDR_SIZE],
- uint8_t new_mac_cnt);
-void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev);
-
-void dp_tx_me_free_descriptor(struct cdp_pdev *pdev);
+uint16_t
+dp_tx_me_send_convert_ucast(struct cdp_soc_t *soc, uint8_t vdev_id,
+ qdf_nbuf_t nbuf,
+ uint8_t newmac[][QDF_MAC_ADDR_SIZE],
+ uint8_t new_mac_cnt);
+void dp_tx_me_alloc_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
+void dp_tx_me_free_descriptor(struct cdp_soc_t *soc, uint8_t pdev_id);
void dp_tx_me_exit(struct dp_pdev *pdev);
QDF_STATUS
dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
+extern int
+dp_me_mcast_convert(struct cdp_soc_t *soc,
+ uint8_t vdev_id,
+ uint8_t pdev_id,
+ qdf_nbuf_t wbuf);
#endif
diff --git a/dp/wifi3.0/dp_txrx_wds.c b/dp/wifi3.0/dp_txrx_wds.c
index c7bc2f3..a597228 100644
--- a/dp/wifi3.0/dp_txrx_wds.c
+++ b/dp/wifi3.0/dp_txrx_wds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -32,6 +32,13 @@
#define DP_VLAN_TAGGED_MULTICAST 1
#define DP_VLAN_TAGGED_UNICAST 2
#define DP_MAX_VLAN_IDS 4096
+#define DP_INVALID_AST_IDX 0xffff
+#define DP_INVALID_FLOW_PRIORITY 0xff
+#define DP_PEER_AST0_FLOW_MASK 0x4
+#define DP_PEER_AST1_FLOW_MASK 0x8
+#define DP_PEER_AST2_FLOW_MASK 0x1
+#define DP_PEER_AST3_FLOW_MASK 0x2
+#define DP_MAX_AST_INDEX_PER_PEER 4
static void dp_ast_aging_timer_fn(void *soc_hdl)
{
@@ -596,39 +603,34 @@
* @nbuf: skb
* @tid: traffic priority
*
- * Return: bool: true if tag is inserted else false
+ * Return: bool: true in case of success else false
+ * Success is considered if:
+ * i. If frame has vlan header
+ * ii. If the frame comes from different peer and dont need multipass processing
+ * Failure is considered if:
+ * i. Frame comes from multipass peer but doesn't contain vlan header.
+ * In failure case, drop such frames.
*/
bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
{
- qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
- struct vlan_ethhdr vethhdr;
+ struct vlan_ethhdr *vethhdrp;
if (qdf_unlikely(!peer->vlan_id))
- return false;
+ return true;
- if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < ETHERTYPE_VLAN_LEN))
+ vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
+ /*
+ * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
+ * as it is expected to be padded by 0
+ * return false if frame doesn't have above tag so that caller will
+ * drop the frame.
+ */
+ if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
+ qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
return false;
- /*
- * Form the VLAN header and insert in nbuf
- */
- qdf_mem_copy(vethhdr.h_dest, eh->ether_dhost, QDF_MAC_ADDR_SIZE);
- qdf_mem_copy(vethhdr.h_source, eh->ether_shost, QDF_MAC_ADDR_SIZE);
- vethhdr.h_vlan_proto = htons(QDF_ETH_TYPE_8021Q);
- vethhdr.h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
- (peer->vlan_id & VLAN_VID_MASK));
-
- /*
- * Packet format : DSTMAC | SRCMAC | <VLAN HEADERS TO BE INSERTED> | ETHERTYPE | IP HEADER
- * DSTMAC: 6 BYTES
- * SRCMAC: 6 BYTES
- * VLAN HEADER: 4 BYTES ( TPID | PCP | VLAN ID)
- * ETHERTYPE: 2 BYTES
- */
- qdf_nbuf_push_head(nbuf, sizeof(struct vlan_hdr));
- qdf_mem_copy(qdf_nbuf_data(nbuf), &vethhdr,
- sizeof(struct vlan_ethhdr)- ETHERNET_TYPE_LEN);
-
+ vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
+ (peer->vlan_id & VLAN_VID_MASK));
return true;
}
@@ -662,28 +664,20 @@
/**
* dp_peer_multipass_list_add: add to new multipass list
* @dp_soc: soc handle
- * @dp_vdev: vdev handle
- * @peer_mac: mac address
+ * @peer: peer handle
*
* return: void
*/
-static void dp_peer_multipass_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
- uint8_t *peer_mac)
+static void dp_peer_multipass_list_add(struct dp_soc *soc, struct dp_peer *peer)
{
- struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
- vdev->vdev_id);
-
- if (!peer) {
- return;
- }
-
/*
* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it when element is deleted from the list.
*/
- qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
- TAILQ_INSERT_HEAD(&vdev->mpass_peer_list, peer, mpass_peer_list_elem);
- qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
+ qdf_spin_lock_bh(&peer->vdev->mpass_peer_mutex);
+ TAILQ_INSERT_HEAD(&peer->vdev->mpass_peer_list, peer,
+ mpass_peer_list_elem);
+ qdf_spin_unlock_bh(&peer->vdev->mpass_peer_mutex);
}
/**
@@ -695,14 +689,16 @@
* return: void
*/
void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
- struct cdp_vdev *vdev_handle, uint8_t *peer_mac,
+ uint8_t vdev_id, uint8_t *peer_mac,
uint16_t vlan_id)
{
struct dp_soc *soc = (struct dp_soc *)cdp_soc;
- struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
struct dp_peer *peer = NULL;
+ struct dp_vdev *vdev =
+ dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
+ vdev_id);
- if (!vdev->multipass_en)
+ if (!vdev || !vdev->multipass_en)
return;
peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev->vdev_id);
@@ -714,11 +710,12 @@
peer->vlan_id = vlan_id;
+ dp_peer_multipass_list_add(soc, peer);
+
/* Ref_cnt is incremented inside dp_peer_find_hash_find().
* Decrement it here.
*/
dp_peer_unref_delete(peer);
- dp_peer_multipass_list_add(soc, vdev, peer_mac);
}
/**
@@ -802,3 +799,263 @@
qdf_spinlock_create(&vdev->mpass_peer_mutex);
}
#endif
+
+#ifdef QCA_PEER_MULTIQ_SUPPORT
+
+/**
+ * dp_peer_reset_flowq_map() - reset peer flowq map table
+ * @peer - dp peer handle
+ *
+ * Return: none
+ */
+void dp_peer_reset_flowq_map(struct dp_peer *peer)
+{
+ int i = 0;
+
+ if (!peer)
+ return;
+
+ for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
+ peer->peer_ast_flowq_idx[i].is_valid = false;
+ peer->peer_ast_flowq_idx[i].valid_tid_mask = false;
+ peer->peer_ast_flowq_idx[i].ast_idx = DP_INVALID_AST_IDX;
+ peer->peer_ast_flowq_idx[i].flowQ = DP_INVALID_FLOW_PRIORITY;
+ }
+}
+
+/**
+ * dp_peer_get_flowid_from_flowmask() - get flow id from flow mask
+ * @peer - dp peer handle
+ * @mask - flow mask
+ *
+ * Return: flow id
+ */
+static int dp_peer_get_flowid_from_flowmask(struct dp_peer *peer,
+ uint8_t mask)
+{
+ if (!peer) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid peer\n", __func__);
+ return -1;
+ }
+
+ if (mask & DP_PEER_AST0_FLOW_MASK)
+ return DP_PEER_AST_FLOWQ_UDP;
+ else if (mask & DP_PEER_AST1_FLOW_MASK)
+ return DP_PEER_AST_FLOWQ_NON_UDP;
+ else if (mask & DP_PEER_AST2_FLOW_MASK)
+ return DP_PEER_AST_FLOWQ_HI_PRIO;
+ else if (mask & DP_PEER_AST3_FLOW_MASK)
+ return DP_PEER_AST_FLOWQ_LOW_PRIO;
+
+ return DP_PEER_AST_FLOWQ_MAX;
+}
+
+/**
+ * dp_peer_get_ast_valid() - get ast index valid from mask
+ * @mask - mask for ast valid bits
+ * @index - index for an ast
+ *
+ * Return - 1 if ast index is valid from mask else 0
+ */
+static inline bool dp_peer_get_ast_valid(uint8_t mask, uint16_t index)
+{
+ if (index == 0)
+ return 1;
+ return ((mask) & (1 << ((index) - 1)));
+}
+
+/**
+ * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
+ * @soc - genereic soc handle
+ * @is_wds - flag to indicate if peer is wds
+ * @peer_id - peer_id from htt peer map message
+ * @peer_mac_addr - mac address of the peer
+ * @ast_info - ast flow override information from peer map
+ *
+ * Return: none
+ */
+void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
+ bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
+ struct dp_ast_flow_override_info *ast_info)
+{
+ struct dp_soc *soc = (struct dp_soc *)soc_hdl;
+ struct dp_peer *peer = NULL;
+ uint8_t i;
+
+ /*
+ * Ast flow override feature is supported
+ * only for connected client
+ */
+ if (is_wds)
+ return;
+
+ peer = dp_peer_find_by_id(soc, peer_id);
+ if (!peer) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid peer\n", __func__);
+ return;
+ }
+
+ /* Valid only in AP mode */
+ if (peer->vdev->opmode != wlan_op_mode_ap) {
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+ "%s: Peer ast flow map not in STA mode\n", __func__);
+ /* Release peer reference */
+ dp_peer_unref_del_find_by_id(peer);
+ return;
+ }
+
+ /* Making sure the peer is for this mac address */
+ if (!qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
+ (struct qdf_mac_addr *)peer->mac_addr.raw)) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: Peer mac address mismatch\n", __func__);
+ dp_peer_unref_del_find_by_id(peer);
+ return;
+ }
+
+ /* Ast entry flow mapping not valid for self peer map */
+ if (qdf_is_macaddr_equal((struct qdf_mac_addr *)peer_mac_addr,
+ (struct qdf_mac_addr *)peer->vdev->mac_addr.raw)) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: Ast flow mapping not valid for self peer \n", __func__);
+ dp_peer_unref_del_find_by_id(peer);
+ return;
+ }
+
+ /* Fill up ast index <---> flow id mapping table for this peer */
+ for (i = 0; i < DP_MAX_AST_INDEX_PER_PEER; i++) {
+
+ /* Check if this ast index is valid */
+ peer->peer_ast_flowq_idx[i].is_valid =
+ dp_peer_get_ast_valid(ast_info->ast_valid_mask, i);
+ if (!peer->peer_ast_flowq_idx[i].is_valid)
+ continue;
+
+ /* Get the flow queue id which is mapped to this ast index */
+ peer->peer_ast_flowq_idx[i].flowQ =
+ dp_peer_get_flowid_from_flowmask(peer,
+ ast_info->ast_flow_mask[i]);
+ /*
+ * Update tid valid mask only if flow id HIGH or
+ * Low priority
+ */
+ if (peer->peer_ast_flowq_idx[i].flowQ ==
+ DP_PEER_AST_FLOWQ_HI_PRIO) {
+ peer->peer_ast_flowq_idx[i].valid_tid_mask =
+ ast_info->tid_valid_hi_pri_mask;
+ } else if (peer->peer_ast_flowq_idx[i].flowQ ==
+ DP_PEER_AST_FLOWQ_LOW_PRIO) {
+ peer->peer_ast_flowq_idx[i].valid_tid_mask =
+ ast_info->tid_valid_low_pri_mask;
+ }
+
+ /* Save the ast index for this entry */
+ peer->peer_ast_flowq_idx[i].ast_idx = ast_info->ast_idx[i];
+ }
+
+ if (soc->cdp_soc.ol_ops->peer_ast_flowid_map) {
+ soc->cdp_soc.ol_ops->peer_ast_flowid_map(
+ soc->ctrl_psoc, peer->peer_ids[0],
+ peer->vdev->vdev_id, peer_mac_addr);
+ }
+
+ /* Release peer reference */
+ dp_peer_unref_del_find_by_id(peer);
+}
+
+/**
+ * dp_peer_find_ast_index_by_flowq_id() - API to get ast idx for a given flowid
+ * @soc - soc handle
+ * @peer_mac_addr - mac address of the peer
+ * @flow_id - flow id to find ast index
+ *
+ * Return: ast index for a given flow id, -1 for fail cases
+ */
+int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
+ uint16_t vdev_id, uint8_t *peer_mac_addr,
+ uint8_t flow_id, uint8_t tid)
+{
+ struct dp_peer *peer = NULL;
+ uint8_t i;
+ uint16_t ast_index;
+
+ if (flow_id >= DP_PEER_AST_FLOWQ_MAX) {
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+ "Invalid Flow ID %d\n", flow_id);
+ return -1;
+ }
+
+ peer = dp_peer_find_hash_find((struct dp_soc *)soc,
+ peer_mac_addr, 0, vdev_id);
+ if (!peer) {
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid peer\n", __func__);
+ return -1;
+ }
+
+ /*
+ * Loop over the ast entry <----> flow-id mapping to find
+ * which ast index entry has this flow queue id enabled.
+ */
+ for (i = 0; i < DP_PEER_AST_FLOWQ_MAX; i++) {
+ if (peer->peer_ast_flowq_idx[i].flowQ == flow_id)
+ /*
+ * Found the matching index for this flow id
+ */
+ break;
+ }
+
+ /*
+ * No match found for this flow id
+ */
+ if (i == DP_PEER_AST_FLOWQ_MAX) {
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+ "%s: ast index not found for flow %d\n", __func__, flow_id);
+ dp_peer_unref_delete(peer);
+ return -1;
+ }
+
+ /* Check whether this ast entry is valid */
+ if (!peer->peer_ast_flowq_idx[i].is_valid) {
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+ "%s: ast index is invalid for flow %d\n", __func__, flow_id);
+ dp_peer_unref_delete(peer);
+ return -1;
+ }
+
+ if (flow_id == DP_PEER_AST_FLOWQ_HI_PRIO ||
+ flow_id == DP_PEER_AST_FLOWQ_LOW_PRIO) {
+ /*
+ * check if this tid is valid for Hi
+ * and Low priority flow id
+ */
+ if ((peer->peer_ast_flowq_idx[i].valid_tid_mask
+ & (1 << tid))) {
+ /* Release peer reference */
+ ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
+ dp_peer_unref_delete(peer);
+ return ast_index;
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+ "%s: TID %d is not valid for flow %d\n",
+ __func__, tid, flow_id);
+ /*
+ * TID is not valid for this flow
+ * Return -1
+ */
+ dp_peer_unref_delete(peer);
+ return -1;
+ }
+ }
+
+ /*
+ * TID valid check not required for
+ * UDP/NON UDP flow id
+ */
+ ast_index = peer->peer_ast_flowq_idx[i].ast_idx;
+ dp_peer_unref_delete(peer);
+ return ast_index;
+}
+#endif
diff --git a/dp/wifi3.0/dp_txrx_wds.h b/dp/wifi3.0/dp_txrx_wds.h
index 5e2ad2d..99a434c 100644
--- a/dp/wifi3.0/dp_txrx_wds.h
+++ b/dp/wifi3.0/dp_txrx_wds.h
@@ -26,7 +26,11 @@
((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
void dp_soc_wds_attach(struct dp_soc *soc);
void dp_soc_wds_detach(struct dp_soc *soc);
-
+#ifdef QCA_PEER_MULTIQ_SUPPORT
+int dp_peer_find_ast_index_by_flowq_id(struct cdp_soc_t *soc,
+ uint16_t vdev_id, uint8_t *peer_mac_addr,
+ uint8_t flow_id, uint8_t tid);
+#endif
void
dp_rx_da_learn(struct dp_soc *soc,
uint8_t *rx_tlv_hdr,
diff --git a/target_if/cfr/src/target_if_cfr_8074v2.c b/target_if/cfr/src/target_if_cfr_8074v2.c
index b6a2e1b..2c3eb3f 100644
--- a/target_if/cfr/src/target_if_cfr_8074v2.c
+++ b/target_if/cfr/src/target_if_cfr_8074v2.c
@@ -177,20 +177,21 @@
/*
* This is condition can occur if DBR buffer did not get
- * released or leaked either by Host / Target
+ * released, or leaked either by Host/Target.
* we may need to add recovery here.
*
* 1. Stop all captures
* 2. Flush/release DBR buffer and LUT
* 3. Start capture again
*/
- if (pdev_cfrobj->dbr_evt_cnt - pdev_cfrobj->release_cnt > 1) {
+ if ((pdev_cfrobj->dbr_evt_cnt -
+ pdev_cfrobj->release_cnt) >= MAX_LUT_ENTRIES) {
cfr_err("cookie = %u dbr_cnt = %d, release_cnt = %d",
cookie, pdev_cfrobj->dbr_evt_cnt,
pdev_cfrobj->release_cnt);
dump_lut(pdev);
dump_dma_hdr(&lut->dma_hdr, 1);
- cfr_debug("correlation_info1: 0x%08x correlation_info2 0x%08x",
+ cfr_err("correlation_info1: 0x%08x correlation_info2 0x%08x",
lut->tx_address1, lut->tx_address2);
}
diff --git a/umac/dfs/core/src/misc/dfs_zero_cac.c b/umac/dfs/core/src/misc/dfs_zero_cac.c
index 5671466..02dd68f 100644
--- a/umac/dfs/core/src/misc/dfs_zero_cac.c
+++ b/umac/dfs/core/src/misc/dfs_zero_cac.c
@@ -4599,6 +4599,7 @@
* dfs_get_precac_intermediate_chan() - Get interCAC channel.
* @dfs: Pointer to wlan_dfs.
*/
+#ifdef WLAN_DFS_PRECAC_AUTO_CHAN_SUPPORT
#ifdef CONFIG_CHAN_FREQ_API
uint32_t dfs_get_precac_intermediate_chan(struct wlan_dfs *dfs)
{
@@ -4612,6 +4613,7 @@
}
#endif
#endif
+#endif
#ifdef QCA_SUPPORT_AGILE_DFS
void dfs_reset_agile_config(struct dfs_soc_priv_obj *dfs_soc)
diff --git a/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c b/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c
index fa943d2..a9440d9 100644
--- a/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c
+++ b/umac/mlme/vdev_mgr/core/src/vdev_mlme_sm_actions.c
@@ -524,6 +524,9 @@
return QDF_STATUS_E_FAILURE;
}
+ /* if restart is pending on this VDEV, clear the bitmap */
+ mlme_stop_pending_restart(pdev, vdev);
+
error = mlme_vdev_ops_stop_fw_send(vdev);
return error;
diff --git a/wmi/src/wmi_unified_ap_tlv.c b/wmi/src/wmi_unified_ap_tlv.c
index 67fc2bc..9a1397e 100644
--- a/wmi/src/wmi_unified_ap_tlv.c
+++ b/wmi/src/wmi_unified_ap_tlv.c
@@ -1131,14 +1131,22 @@
wmi_pdev_multiple_vdev_restart_request_cmd_fixed_param *cmd;
int i;
uint8_t *buf_ptr;
- uint32_t *vdev_ids;
+ uint32_t *vdev_ids, *phymode;
wmi_channel *chan_info;
struct mlme_channel_param *tchan_info;
uint16_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE;
+ if (!param->num_vdevs) {
+ WMI_LOGE("vdev's not found for MVR cmd");
+ qdf_status = QDF_STATUS_E_FAULT;
+ goto end;
+ }
len += sizeof(wmi_channel);
- if (param->num_vdevs)
+ if (param->num_vdevs) {
+ len += sizeof(uint32_t) * param->num_vdevs + WMI_TLV_HDR_SIZE;
+ /* for phymode */
len += sizeof(uint32_t) * param->num_vdevs;
+ }
buf = wmi_buf_alloc(wmi_handle, len);
if (!buf) {
@@ -1224,6 +1232,23 @@
tchan_info->maxregpower, tchan_info->reg_class_id,
tchan_info->maxregpower);
+ buf_ptr += sizeof(*chan_info);
+ WMITLV_SET_HDR(buf_ptr,
+ WMITLV_TAG_ARRAY_UINT32,
+ sizeof(uint32_t) * param->num_vdevs);
+ phymode = (uint32_t *)(buf_ptr + WMI_TLV_HDR_SIZE);
+ for (i = 0; i < param->num_vdevs; i++)
+ WMI_MULTIPLE_VDEV_RESTART_FLAG_SET_PHYMODE(
+ phymode[i], param->mvr_param[i].phymode);
+
+ /* Target expects flag for phymode processing */
+ WMI_MULTIPLE_VDEV_RESTART_FLAG_SET_PHYMODE_PRESENT(cmd->flags, 1);
+ /*
+ * Target expects to be informed that MVR response is
+ * expected by host corresponding to this request.
+ */
+ WMI_MULTIPLE_VDEV_RESTART_FLAG_SET_MVRR_EVENT_SUPPORT(cmd->flags, 1);
+
wmi_mtrace(WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID, NO_SESSION, 0);
qdf_status = wmi_unified_cmd_send(wmi_handle, buf, len,
WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID);
@@ -2488,6 +2513,48 @@
return QDF_STATUS_SUCCESS;
}
+static QDF_STATUS extract_multi_vdev_restart_resp_event_tlv(
+ wmi_unified_t wmi_hdl, void *evt_buf,
+ struct multi_vdev_restart_resp *param)
+{
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_RESP_EVENTID_param_tlvs *param_buf;
+ wmi_pdev_multiple_vdev_restart_resp_event_fixed_param *ev;
+
+ param_buf =
+ (WMI_PDEV_MULTIPLE_VDEV_RESTART_RESP_EVENTID_param_tlvs *)evt_buf;
+ if (!param_buf) {
+ WMI_LOGE("Invalid buf multi_vdev restart response");
+ return QDF_STATUS_E_INVAL;
+ }
+
+ ev = (wmi_pdev_multiple_vdev_restart_resp_event_fixed_param *)
+ param_buf->fixed_param;
+ if (!ev) {
+ WMI_LOGE("Invalid ev multi_vdev restart response");
+ return QDF_STATUS_E_INVAL;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->status = ev->status;
+
+ if (!param_buf->num_vdev_ids_bitmap)
+ return QDF_STATUS_E_FAILURE;
+
+ if (param_buf->num_vdev_ids_bitmap > sizeof(param->vdev_id_bmap)) {
+ WMI_LOGE("vdevId bitmap overflow size:%d",
+ param_buf->num_vdev_ids_bitmap);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ qdf_mem_copy(param->vdev_id_bmap, param_buf->vdev_ids_bitmap,
+ param_buf->num_vdev_ids_bitmap);
+
+ WMI_LOGD("vdev_id_bmap :0x%x%x", param->vdev_id_bmap[1],
+ param->vdev_id_bmap[0]);
+
+ return QDF_STATUS_SUCCESS;
+}
+
void wmi_ap_attach_tlv(wmi_unified_t wmi_handle)
{
struct wmi_ops *ops = wmi_handle->ops;
@@ -2557,4 +2624,6 @@
set_rx_pkt_type_routing_tag_update_tlv;
#endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
ops->send_peer_vlan_config_cmd = send_peer_vlan_config_cmd_tlv;
+ ops->extract_multi_vdev_restart_resp_event =
+ extract_multi_vdev_restart_resp_event_tlv;
}
diff --git a/wmi/src/wmi_unified_non_tlv.c b/wmi/src/wmi_unified_non_tlv.c
index 2b45e99..6a1c80d 100644
--- a/wmi/src/wmi_unified_non_tlv.c
+++ b/wmi/src/wmi_unified_non_tlv.c
@@ -20,7 +20,6 @@
#include "wmi_unified_priv.h"
#include "target_type.h"
#include <qdf_module.h>
-
#if defined(WMI_NON_TLV_SUPPORT) || defined(WMI_TLV_AND_NON_TLV_SUPPORT)
#include "wmi.h"
#include "wmi_unified.h"
@@ -501,6 +500,11 @@
cmd->chan.band_center_freq1 = param->channel.cfreq1;
cmd->chan.band_center_freq2 = param->channel.cfreq2;
cmd->disable_hw_ack = param->disable_hw_ack;
+ cmd->beacon_interval = param->beacon_interval;
+ cmd->dtim_period = param->dtim_period;
+ cmd->bcn_tx_rate = param->bcn_tx_rate_code;
+ if (param->bcn_tx_rate_code)
+ cmd->flags |= WMI_UNIFIED_VDEV_START_BCN_TX_RATE_PRESENT;
WMI_SET_CHANNEL_MIN_POWER(&cmd->chan, param->channel.minpower);
WMI_SET_CHANNEL_MAX_POWER(&cmd->chan, param->channel.maxpower);
@@ -7046,22 +7050,100 @@
}
/**
- * extract_host_mem_req_non_tlv() - Extract host memory request event
+ * extract_num_mem_reqs_non_tlv() - Extract number of memory entries requested
* @wmi_handle: wmi handle
- * @param evt_buf: pointer to event buffer
- * @param num_entries: pointer to hold number of entries requested
+ * @evt_buf: pointer to event buffer
*
* Return: Number of entries requested
*/
-static host_mem_req *extract_host_mem_req_non_tlv(wmi_unified_t wmi_handle,
- void *evt_buf, uint8_t *num_entries)
+static uint32_t extract_num_mem_reqs_non_tlv(wmi_unified_t wmi_handle,
+ void *evt_buf)
{
wmi_service_ready_event *ev;
ev = (wmi_service_ready_event *) evt_buf;
- *num_entries = ev->num_mem_reqs;
- return (host_mem_req *)ev->mem_reqs;
+ return ev->num_mem_reqs;
+}
+
+/**
+ * extract_host_mem_req_non_tlv() - Extract host memory required from
+ * service ready event
+ * @wmi_handle: wmi handle
+ * @evt_buf: pointer to event buffer
+ * @mem_reqs: pointer to host memory request structure
+ * @num_active_peers: number of active peers for peer cache
+ * @num_peers: number of peers
+ * @fw_prio: FW priority
+ * @idx: index for memory request
+ *
+ * Return: Host memory request parameters requested by target
+ */
+static QDF_STATUS extract_host_mem_req_non_tlv(wmi_unified_t wmi_handle,
+ void *evt_buf, host_mem_req *mem_reqs,
+ uint32_t num_active_peers, uint32_t num_peers,
+ enum wmi_fw_mem_prio fw_prio, uint16_t idx)
+{
+ wmi_service_ready_event *ev;
+
+ ev = (wmi_service_ready_event *) evt_buf;
+
+ mem_reqs->req_id = (uint32_t)ev->mem_reqs[idx].req_id;
+ mem_reqs->unit_size = (uint32_t)ev->mem_reqs[idx].unit_size;
+ mem_reqs->num_unit_info =
+ (uint32_t)ev->mem_reqs[idx].num_unit_info;
+ mem_reqs->num_units = (uint32_t)ev->mem_reqs[idx].num_units;
+ mem_reqs->tgt_num_units = 0;
+
+ if (((fw_prio == WMI_FW_MEM_HIGH_PRIORITY) &&
+ (mem_reqs->num_unit_info &
+ REQ_TO_HOST_FOR_CONT_MEMORY)) ||
+ ((fw_prio == WMI_FW_MEM_LOW_PRIORITY) &&
+ (!(mem_reqs->num_unit_info &
+ REQ_TO_HOST_FOR_CONT_MEMORY)))) {
+ /* First allocate the memory that requires contiguous memory */
+ mem_reqs->tgt_num_units = mem_reqs->num_units;
+ if (mem_reqs->num_unit_info) {
+ if (mem_reqs->num_unit_info &
+ NUM_UNITS_IS_NUM_PEERS) {
+ /*
+ * number of units allocated is equal to number
+ * of peers, 1 extra for self peer on target.
+ * this needs to be fixed, host and target can
+ * get out of sync
+ */
+ mem_reqs->tgt_num_units =
+ num_peers + 1;
+ }
+ if (mem_reqs->num_unit_info &
+ NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ /*
+ * Requesting allocation of memory using
+ * num_active_peers in qcache. if qcache is
+ * disabled in host, then it should allocate
+ * memory for num_peers instead of
+ * num_active_peers.
+ */
+ if (num_active_peers)
+ mem_reqs->tgt_num_units =
+ num_active_peers + 1;
+ else
+ mem_reqs->tgt_num_units =
+ num_peers + 1;
+ }
+ }
+
+ WMI_LOGI("idx %d req %d num_units %d num_unit_info %d"
+ "unit size %d actual units %d",
+ idx, mem_reqs->req_id,
+ mem_reqs->num_units,
+ mem_reqs->num_unit_info,
+ mem_reqs->unit_size,
+ mem_reqs->tgt_num_units);
+
+ }
+
+ return QDF_STATUS_SUCCESS;
}
/**
@@ -9788,7 +9870,7 @@
}
/**
- * send_multiple_vdev_restart_req_cmd_non_tlv() - send multi vdev restart req
+ * send_mvr_cmd() - send multi vdev restart req
* @wmi_handle: wmi handle
* @param: wmi multiple vdev restart req param
*
@@ -9796,7 +9878,7 @@
*
* Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error
*/
-QDF_STATUS send_multiple_vdev_restart_req_cmd_non_tlv(
+static QDF_STATUS send_mvr_cmd(
wmi_unified_t wmi_handle,
struct multiple_vdev_restart_params *param)
{
@@ -9808,16 +9890,13 @@
wmi_pdev_multiple_vdev_restart_request_cmd *cmd;
uint16_t len = sizeof(*cmd);
- if (param->num_vdevs)
- len += sizeof(uint32_t) * param->num_vdevs;
-
+ len += sizeof(uint32_t) * param->num_vdevs;
buf = wmi_buf_alloc(wmi_handle, len);
if (!buf) {
WMI_LOGE("Failed to allocate memory");
return QDF_STATUS_E_NOMEM;
}
-
- cmd = (wmi_pdev_multiple_vdev_restart_request_cmd *)wmi_buf_data(buf);
+ cmd = (wmi_pdev_multiple_vdev_restart_request_cmd *) wmi_buf_data(buf);
cmd->requestor_id = param->requestor_id;
cmd->disable_hw_ack = param->disable_hw_ack;
@@ -9869,14 +9948,186 @@
ret = wmi_unified_cmd_send(
wmi_handle, buf, len,
WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID);
+
if (QDF_IS_STATUS_ERROR(ret)) {
WMI_LOGE("Failed to send WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID");
wmi_buf_free(buf);
}
+
return ret;
+}
+
+/**
+ * send_mvr_ext_cmd() - send multi vdev restart req extension
+ * @wmi_handle: wmi handle
+ * @param: wmi multiple vdev restart req ext param
+ *
+ * Send WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_EXT_ CMDID parameters to fw.
+ *
+ * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error
+ */
+static QDF_STATUS send_mvr_ext_cmd(
+ wmi_unified_t wmi_handle,
+ struct multiple_vdev_restart_params *param)
+{
+ int i;
+ wmi_buf_t buf_ptr;
+ QDF_STATUS ret;
+ wmi_channel *chan_info;
+ struct mlme_channel_param *tchan_info;
+ wmi_pdev_multiple_vdev_restart_request_ext_cmd *cmd;
+ uint8_t *buf;
+ uint16_t len = sizeof(*cmd);
+ uint16_t param_len;
+ wmi_vdev_param *vdev_param;
+
+ /*
+ * vdev_id & phymode are expected in TAG, VALUE format
+ * The vdev_param structure contains one "uint32_t" member.
+ * Hence, while calcuating the required buffer-length,
+ * the same needs to be excluded.The requirement is to
+ * send "num_vdev" count of (tag,value) parameters, e.g.
+ * vdev_id, phymode.
+ */
+ param_len = ((sizeof(*vdev_param) - sizeof(uint32_t)) +
+ ((param->num_vdevs) * sizeof(uint32_t)));
+ len += 2 * param_len;
+ buf_ptr = wmi_buf_alloc(wmi_handle, len);
+ if (!buf_ptr) {
+ WMI_LOGE("Failed to allocate memory");
+ return QDF_STATUS_E_NOMEM;
+ }
+ buf = (uint8_t *)wmi_buf_data(buf_ptr);
+ cmd = (wmi_pdev_multiple_vdev_restart_request_ext_cmd *)buf;
+ cmd->requestor_id = param->requestor_id;
+ cmd->disable_hw_ack = param->disable_hw_ack;
+
+ WMI_LOGI("req_id:%d dis_hw_ack:%d",
+ cmd->requestor_id, cmd->disable_hw_ack);
+
+ chan_info = &cmd->chan;
+ tchan_info = ¶m->ch_param;
+ chan_info->mhz = tchan_info->mhz;
+ chan_info->band_center_freq1 = tchan_info->cfreq1;
+ chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+ if (tchan_info->is_chan_passive)
+ WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_PASSIVE);
+
+ if (tchan_info->dfs_set)
+ WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS);
+
+ if (tchan_info->dfs_set_cfreq2)
+ WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_DFS_CFREQ2);
+
+ if (tchan_info->allow_vht)
+ WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_ALLOW_VHT);
+ else if (tchan_info->allow_ht)
+ WMI_SET_CHANNEL_FLAG(chan_info, WMI_CHAN_FLAG_ALLOW_HT);
+
+ WMI_SET_CHANNEL_MODE(chan_info, tchan_info->phy_mode);
+ WMI_SET_CHANNEL_MIN_POWER(chan_info, tchan_info->minpower);
+ WMI_SET_CHANNEL_MAX_POWER(chan_info, tchan_info->maxpower);
+ WMI_SET_CHANNEL_REG_POWER(chan_info, tchan_info->maxregpower);
+ WMI_SET_CHANNEL_ANTENNA_MAX(chan_info, tchan_info->antennamax);
+ WMI_SET_CHANNEL_REG_CLASSID(chan_info, tchan_info->reg_class_id);
+ WMI_SET_CHANNEL_MAX_TX_POWER(chan_info, tchan_info->maxregpower);
+
+ WMI_LOGI("is_chan_passive:%d dfs_set:%d allow_vht:%d allow_ht:%d",
+ tchan_info->is_chan_passive, tchan_info->dfs_set,
+ tchan_info->allow_vht, tchan_info->allow_ht);
+ WMI_LOGI("antennamax:%d phy_mode:%d minpower:%d maxpower:%d",
+ tchan_info->antennamax, tchan_info->phy_mode,
+ tchan_info->minpower, tchan_info->maxpower);
+ WMI_LOGI("maxregpower:%d reg_class_id:%d",
+ tchan_info->maxregpower, tchan_info->reg_class_id);
+
+ /* To fill the Tag,Value pairs, move the buf accordingly */
+ buf += sizeof(*cmd);
+
+ vdev_param = (wmi_vdev_param *)buf;
+ vdev_param->tag = WMI_VDEV_PARAM_TAG_VDEV_ID;
+ vdev_param->num_param_values = param->num_vdevs;
+ for (i = 0; i < param->num_vdevs; i++)
+ vdev_param->param_value[i] = param->vdev_ids[i];
+ buf += param_len;
+ vdev_param = (wmi_vdev_param *)buf;
+ vdev_param->tag = WMI_VDEV_PARAM_TAG_PHYMODE_ID;
+ vdev_param->num_param_values = param->num_vdevs;
+ for (i = 0; i < param->num_vdevs; i++)
+ vdev_param->param_value[i] = param->mvr_param[i].phymode;
+ ret = wmi_unified_cmd_send(
+ wmi_handle, buf_ptr, len,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_EXT_CMDID);
+ if (QDF_IS_STATUS_ERROR(ret)) {
+ WMI_LOGE("Failed to send WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID");
+ wmi_buf_free(buf_ptr);
+ }
+ return ret;
+}
+
+/**
+ * send_multiple_vdev_restart_req_cmd_non_tlv() - send multi vdev restart
+ * @wmi_handle: wmi handle
+ * @param: wmi multiple vdev restart req param
+ *
+ * Send mvr or mvr_ext parameters to fw.
+ *
+ * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error
+ */
+QDF_STATUS send_multiple_vdev_restart_req_cmd_non_tlv(
+ wmi_unified_t wmi_handle,
+ struct multiple_vdev_restart_params *param)
+{
+ bool mvr_ext;
+
+ if (!param->num_vdevs) {
+ WMI_LOGE("vdev's not found for MVR cmd");
+ return QDF_STATUS_E_FAULT;
+ }
+
+ mvr_ext = is_service_enabled_non_tlv(wmi_handle,
+ WMI_SERVICE_MULTI_VDEV_RESTART_EXT_COMMAND);
+
+ if (mvr_ext)
+ return send_mvr_ext_cmd(wmi_handle, param);
+ else
+ return send_mvr_cmd(wmi_handle, param);
+}
+
+/*
+ * extract_multi_vdev_restart_resp_event_non_tlv() -
+ * extract multiple vdev restart response event
+ * @wmi_handle: wmi handle
+ * @evt_buf: pointer to event buffer
+ * @param: Pointer to hold vdev_id of multiple vdev restart response
+ *
+ * Return: QDF_STATUS_SUCCESS for success or QDF_STATUS_E_FAILURE on error
+ */
+static QDF_STATUS extract_multi_vdev_restart_resp_event_non_tlv(
+ wmi_unified_t wmi_hdl, void *evt_buf,
+ struct multi_vdev_restart_resp *param)
+{
+ wmi_vdev_multi_vdev_restart_response_event *ev;
+
+ ev = (wmi_vdev_multi_vdev_restart_response_event *)evt_buf;
+ if (!ev) {
+ WMI_LOGE("Invalid multi_vdev restart response");
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ /* For legacy platforms, pdev_id is set to 0 by default */
+ param->pdev_id = 0;
+ param->status = ev->status;
+ qdf_mem_copy(param->vdev_id_bmap, &ev->requestor_id,
+ sizeof(uint32_t));
+
+ WMI_LOGD("vdev_id_bmap :0x%x%x", param->vdev_id_bmap[1],
+ param->vdev_id_bmap[0]);
+ return QDF_STATUS_SUCCESS;
}
/**
@@ -10120,6 +10371,7 @@
.extract_fw_version = extract_fw_version_non_tlv,
.extract_fw_abi_version = extract_fw_abi_version_non_tlv,
.extract_hal_reg_cap = extract_hal_reg_cap_non_tlv,
+ .extract_num_mem_reqs = extract_num_mem_reqs_non_tlv,
.extract_host_mem_req = extract_host_mem_req_non_tlv,
.save_service_bitmap = save_service_bitmap_non_tlv,
.is_service_enabled = is_service_enabled_non_tlv,
@@ -10231,6 +10483,8 @@
.send_vdev_tidmap_prec_cmd = send_vdev_tidmap_prec_cmd_non_tlv,
.send_multiple_vdev_restart_req_cmd =
send_multiple_vdev_restart_req_cmd_non_tlv,
+ .extract_multi_vdev_restart_resp_event =
+ extract_multi_vdev_restart_resp_event_non_tlv,
};
/**
@@ -10381,6 +10635,8 @@
wmi_service[wmi_service_mawc] = WMI_SERVICE_UNAVAILABLE;
wmi_service[wmi_service_multiple_vdev_restart] =
WMI_SERVICE_MULTIPLE_VDEV_RESTART;
+ wmi_service[wmi_service_multiple_vdev_restart_ext] =
+ WMI_SERVICE_MULTI_VDEV_RESTART_EXT_COMMAND;
wmi_service[wmi_service_peer_assoc_conf] = WMI_SERVICE_UNAVAILABLE;
wmi_service[wmi_service_egap] = WMI_SERVICE_UNAVAILABLE;
wmi_service[wmi_service_sta_pmf_offload] = WMI_SERVICE_UNAVAILABLE;
@@ -10549,6 +10805,8 @@
WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID;
event_ids[wmi_peer_delete_all_response_event_id] =
WMI_VDEV_DELETE_ALL_PEER_RESP_EVENTID;
+ event_ids[wmi_pdev_multi_vdev_restart_response_event_id] =
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_RESP_EVENTID;
}
#endif