File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644
index 0000000..72acb82
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -0,0 +1,47 @@
+config ATH10K
+        tristate "Atheros 802.11ac wireless cards support"
+        depends on MAC80211 && HAS_DMA
+	select ATH_COMMON
+        ---help---
+          This module adds support for wireless adapters based on
+          Atheros IEEE 802.11ac family of chipsets.
+
+          If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_PCI
+	tristate "Atheros ath10k PCI support"
+	depends on ATH10K && PCI
+	---help---
+	  This module adds support for PCIE bus
+
+config ATH10K_DEBUG
+	bool "Atheros ath10k debugging"
+	depends on ATH10K
+	---help---
+	  Enables debug support
+
+	  If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+	bool "Atheros ath10k debugfs support"
+	depends on ATH10K && DEBUG_FS
+	select RELAY
+	---help---
+	  Enabled debugfs support
+
+	  If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_TRACING
+	bool "Atheros ath10k tracing support"
+	depends on ATH10K
+	depends on EVENT_TRACING
+	---help---
+	  Select this to ath10k use tracing infrastructure.
+
+config ATH10K_DFS_CERTIFIED
+	bool "Atheros DFS support for certified platforms"
+	depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+	default n
+	---help---
+	This option enables DFS support for initiating radiation on
+	ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644
index 0000000..c04fb00
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -0,0 +1,29 @@
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+		 debug.o \
+		 core.o \
+		 htc.o \
+		 htt.o \
+		 htt_rx.o \
+		 htt_tx.o \
+		 txrx.o \
+		 wmi.o \
+		 wmi-tlv.o \
+		 bmi.o \
+		 hw.o \
+		 p2p.o \
+		 swap.o
+
+ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o \
+		ce.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644
index 0000000..3d29b08
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+
+void ath10k_bmi_start(struct ath10k *ar)
+{
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
+
+	ar->bmi.done_sent = false;
+}
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+	struct bmi_cmd cmd;
+	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
+
+	if (ar->bmi.done_sent) {
+		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
+		return 0;
+	}
+
+	ar->bmi.done_sent = true;
+	cmd.id = __cpu_to_le32(BMI_DONE);
+
+	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+	if (ret) {
+		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+			       struct bmi_target_info *target_info)
+{
+	struct bmi_cmd cmd;
+	union bmi_resp resp;
+	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+	u32 resplen = sizeof(resp.get_target_info);
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
+
+	if (ar->bmi.done_sent) {
+		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+		return -EBUSY;
+	}
+
+	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+	if (ret) {
+		ath10k_warn(ar, "unable to get target info from device\n");
+		return ret;
+	}
+
+	if (resplen < sizeof(resp.get_target_info)) {
+		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
+			    resplen);
+		return -EIO;
+	}
+
+	target_info->version = __le32_to_cpu(resp.get_target_info.version);
+	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
+
+	return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+			   u32 address, void *buffer, u32 length)
+{
+	struct bmi_cmd cmd;
+	union bmi_resp resp;
+	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+	u32 rxlen;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+		   address, length);
+
+	if (ar->bmi.done_sent) {
+		ath10k_warn(ar, "command disallowed\n");
+		return -EBUSY;
+	}
+
+	while (length) {
+		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
+		cmd.read_mem.addr = __cpu_to_le32(address);
+		cmd.read_mem.len  = __cpu_to_le32(rxlen);
+
+		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+						  &resp, &rxlen);
+		if (ret) {
+			ath10k_warn(ar, "unable to read from the device (%d)\n",
+				    ret);
+			return ret;
+		}
+
+		memcpy(buffer, resp.read_mem.payload, rxlen);
+		address += rxlen;
+		buffer  += rxlen;
+		length  -= rxlen;
+	}
+
+	return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+			    u32 address, const void *buffer, u32 length)
+{
+	struct bmi_cmd cmd;
+	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+	u32 txlen;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+		   address, length);
+
+	if (ar->bmi.done_sent) {
+		ath10k_warn(ar, "command disallowed\n");
+		return -EBUSY;
+	}
+
+	while (length) {
+		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+		/* copy before roundup to avoid reading beyond buffer*/
+		memcpy(cmd.write_mem.payload, buffer, txlen);
+		txlen = roundup(txlen, 4);
+
+		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
+		cmd.write_mem.addr = __cpu_to_le32(address);
+		cmd.write_mem.len  = __cpu_to_le32(txlen);
+
+		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+						  NULL, NULL);
+		if (ret) {
+			ath10k_warn(ar, "unable to write to the device (%d)\n",
+				    ret);
+			return ret;
+		}
+
+		/* fixup roundup() so `length` zeroes out for last chunk */
+		txlen = min(txlen, length);
+
+		address += txlen;
+		buffer  += txlen;
+		length  -= txlen;
+	}
+
+	return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
+{
+	struct bmi_cmd cmd;
+	union bmi_resp resp;
+	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+	u32 resplen = sizeof(resp.execute);
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+		   address, param);
+
+	if (ar->bmi.done_sent) {
+		ath10k_warn(ar, "command disallowed\n");
+		return -EBUSY;
+	}
+
+	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
+	cmd.execute.addr  = __cpu_to_le32(address);
+	cmd.execute.param = __cpu_to_le32(param);
+
+	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+	if (ret) {
+		ath10k_warn(ar, "unable to read from the device\n");
+		return ret;
+	}
+
+	if (resplen < sizeof(resp.execute)) {
+		ath10k_warn(ar, "invalid execute response length (%d)\n",
+			    resplen);
+		return -EIO;
+	}
+
+	*result = __le32_to_cpu(resp.execute.result);
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
+	return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+	struct bmi_cmd cmd;
+	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+	u32 txlen;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+		   buffer, length);
+
+	if (ar->bmi.done_sent) {
+		ath10k_warn(ar, "command disallowed\n");
+		return -EBUSY;
+	}
+
+	while (length) {
+		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+		WARN_ON_ONCE(txlen & 3);
+
+		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
+		cmd.lz_data.len = __cpu_to_le32(txlen);
+		memcpy(cmd.lz_data.payload, buffer, txlen);
+
+		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+						  NULL, NULL);
+		if (ret) {
+			ath10k_warn(ar, "unable to write to the device\n");
+			return ret;
+		}
+
+		buffer += txlen;
+		length -= txlen;
+	}
+
+	return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+	struct bmi_cmd cmd;
+	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+		   address);
+
+	if (ar->bmi.done_sent) {
+		ath10k_warn(ar, "command disallowed\n");
+		return -EBUSY;
+	}
+
+	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
+	cmd.lz_start.addr = __cpu_to_le32(address);
+
+	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+	if (ret) {
+		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+			     u32 address, const void *buffer, u32 length)
+{
+	u8 trailer[4] = {};
+	u32 head_len = rounddown(length, 4);
+	u32 trailer_len = length - head_len;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BMI,
+		   "bmi fast download address 0x%x buffer 0x%p length %d\n",
+		   address, buffer, length);
+
+	ret = ath10k_bmi_lz_stream_start(ar, address);
+	if (ret)
+		return ret;
+
+	/* copy the last word into a zero padded buffer */
+	if (trailer_len > 0)
+		memcpy(trailer, buffer + head_len, trailer_len);
+
+	ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+	if (ret)
+		return ret;
+
+	if (trailer_len > 0)
+		ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+	if (ret != 0)
+		return ret;
+
+	/*
+	 * Close compressed stream and open a new (fake) one.
+	 * This serves mainly to flush Target caches.
+	 */
+	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644
index 0000000..7d3231a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state.  The Host has an opportunity to use
+ * BMI only once during bootup.  Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0.   BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4.  As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE	256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+			sizeof(u32) + \
+			sizeof(u32) + \
+			sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+	BMI_NO_COMMAND          = 0,
+	BMI_DONE                = 1,
+	BMI_READ_MEMORY         = 2,
+	BMI_WRITE_MEMORY        = 3,
+	BMI_EXECUTE             = 4,
+	BMI_SET_APP_START       = 5,
+	BMI_READ_SOC_REGISTER   = 6,
+	BMI_READ_SOC_WORD       = 6,
+	BMI_WRITE_SOC_REGISTER  = 7,
+	BMI_WRITE_SOC_WORD      = 7,
+	BMI_GET_TARGET_ID       = 8,
+	BMI_GET_TARGET_INFO     = 8,
+	BMI_ROMPATCH_INSTALL    = 9,
+	BMI_ROMPATCH_UNINSTALL  = 10,
+	BMI_ROMPATCH_ACTIVATE   = 11,
+	BMI_ROMPATCH_DEACTIVATE = 12,
+	BMI_LZ_STREAM_START     = 13, /* should be followed by LZ_DATA */
+	BMI_LZ_DATA             = 14,
+	BMI_NVRAM_PROCESS       = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK   0x7c00
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB    10
+
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK    0x18000
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB     15
+
+#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+
+struct bmi_cmd {
+	__le32 id; /* enum bmi_cmd_id */
+	union {
+		struct {
+		} done;
+		struct {
+			__le32 addr;
+			__le32 len;
+		} read_mem;
+		struct {
+			__le32 addr;
+			__le32 len;
+			u8 payload[0];
+		} write_mem;
+		struct {
+			__le32 addr;
+			__le32 param;
+		} execute;
+		struct {
+			__le32 addr;
+		} set_app_start;
+		struct {
+			__le32 addr;
+		} read_soc_reg;
+		struct {
+			__le32 addr;
+			__le32 value;
+		} write_soc_reg;
+		struct {
+		} get_target_info;
+		struct {
+			__le32 rom_addr;
+			__le32 ram_addr; /* or value */
+			__le32 size;
+			__le32 activate; /* 0=install, but dont activate */
+		} rompatch_install;
+		struct {
+			__le32 patch_id;
+		} rompatch_uninstall;
+		struct {
+			__le32 count;
+			__le32 patch_ids[0]; /* length of @count */
+		} rompatch_activate;
+		struct {
+			__le32 count;
+			__le32 patch_ids[0]; /* length of @count */
+		} rompatch_deactivate;
+		struct {
+			__le32 addr;
+		} lz_start;
+		struct {
+			__le32 len; /* max BMI_MAX_DATA_SIZE */
+			u8 payload[0]; /* length of @len */
+		} lz_data;
+		struct {
+			u8 name[BMI_NVRAM_SEG_NAME_SZ];
+		} nvram_process;
+		u8 payload[BMI_MAX_CMDBUF_SIZE];
+	};
+} __packed;
+
+union bmi_resp {
+	struct {
+		u8 payload[0];
+	} read_mem;
+	struct {
+		__le32 result;
+	} execute;
+	struct {
+		__le32 value;
+	} read_soc_reg;
+	struct {
+		__le32 len;
+		__le32 version;
+		__le32 type;
+	} get_target_info;
+	struct {
+		__le32 patch_id;
+	} rompatch_install;
+	struct {
+		__le32 patch_id;
+	} rompatch_uninstall;
+	struct {
+		/* 0 = nothing executed
+		 * otherwise = NVRAM segment return value */
+		__le32 result;
+	} nvram_process;
+	u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+	u32 version;
+	u32 type;
+};
+
+/* in msec */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+void ath10k_bmi_start(struct ath10k *ar);
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+			       struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+			   void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+			    const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val)				\
+	({								\
+		int ret;						\
+		u32 addr;						\
+		__le32 tmp;						\
+									\
+		addr = host_interest_item_address(HI_ITEM(item));	\
+		ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+		if (!ret)						\
+			*val = __le32_to_cpu(tmp);			\
+		ret;							\
+	 })
+
+#define ath10k_bmi_write32(ar, item, val)				\
+	({								\
+		int ret;						\
+		u32 address;						\
+		__le32 v = __cpu_to_le32(val);				\
+									\
+		address = host_interest_item_address(HI_ITEM(item));	\
+		ret = ath10k_bmi_write_memory(ar, address,		\
+					      (u8 *)&v, sizeof(v));	\
+		ret;							\
+	})
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+			     const void *buffer, u32 length);
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644
index 0000000..edf3629
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hif.h"
+#include "pci.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ *   a source ring
+ *   a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * recieves an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers.  When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+						       u32 ce_ctrl_addr,
+						       unsigned int n)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+						      u32 ce_ctrl_addr)
+{
+	return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+						      u32 ce_ctrl_addr,
+						      unsigned int n)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+						     u32 ce_ctrl_addr)
+{
+	return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+						    u32 ce_ctrl_addr)
+{
+	return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+						    u32 ce_ctrl_addr,
+						    unsigned int addr)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+					       u32 ce_ctrl_addr,
+					       unsigned int n)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+					       u32 ce_ctrl_addr,
+					       unsigned int n)
+{
+	u32 ctrl1_addr = ath10k_pci_read32((ar),
+					   (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+			   (ctrl1_addr &  ~CE_CTRL1_DMAX_LENGTH_MASK) |
+			   CE_CTRL1_DMAX_LENGTH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+						    u32 ce_ctrl_addr,
+						    unsigned int n)
+{
+	u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+			   (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
+			   CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+						     u32 ce_ctrl_addr,
+						     unsigned int n)
+{
+	u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+			   (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
+			   CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+						     u32 ce_ctrl_addr)
+{
+	return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+						     u32 ce_ctrl_addr,
+						     u32 addr)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+						u32 ce_ctrl_addr,
+						unsigned int n)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+						   u32 ce_ctrl_addr,
+						   unsigned int n)
+{
+	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+			   (addr & ~SRC_WATERMARK_HIGH_MASK) |
+			   SRC_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+						  u32 ce_ctrl_addr,
+						  unsigned int n)
+{
+	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+			   (addr & ~SRC_WATERMARK_LOW_MASK) |
+			   SRC_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+						    u32 ce_ctrl_addr,
+						    unsigned int n)
+{
+	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+			   (addr & ~DST_WATERMARK_HIGH_MASK) |
+			   DST_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+						   u32 ce_ctrl_addr,
+						   unsigned int n)
+{
+	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+			   (addr & ~DST_WATERMARK_LOW_MASK) |
+			   DST_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+							u32 ce_ctrl_addr)
+{
+	u32 host_ie_addr = ath10k_pci_read32(ar,
+					     ce_ctrl_addr + HOST_IE_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+			   host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+							u32 ce_ctrl_addr)
+{
+	u32 host_ie_addr = ath10k_pci_read32(ar,
+					     ce_ctrl_addr + HOST_IE_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+			   host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+						    u32 ce_ctrl_addr)
+{
+	u32 host_ie_addr = ath10k_pci_read32(ar,
+					     ce_ctrl_addr + HOST_IE_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+			   host_ie_addr & ~CE_WATERMARK_MASK);
+}
+
+static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
+					       u32 ce_ctrl_addr)
+{
+	u32 misc_ie_addr = ath10k_pci_read32(ar,
+					     ce_ctrl_addr + MISC_IE_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+			   misc_ie_addr | CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+						u32 ce_ctrl_addr)
+{
+	u32 misc_ie_addr = ath10k_pci_read32(ar,
+					     ce_ctrl_addr + MISC_IE_ADDRESS);
+
+	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+			   misc_ie_addr & ~CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+						     u32 ce_ctrl_addr,
+						     unsigned int mask)
+{
+	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+}
+
+/*
+ * Guts of ath10k_ce_send, used by both ath10k_ce_send and
+ * ath10k_ce_sendlist_send.
+ * The caller takes responsibility for any needed locking.
+ */
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+			  void *per_transfer_context,
+			  u32 buffer,
+			  unsigned int nbytes,
+			  unsigned int transfer_id,
+			  unsigned int flags)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+	struct ce_desc *desc, sdesc;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index = src_ring->sw_index;
+	unsigned int write_index = src_ring->write_index;
+	u32 ctrl_addr = ce_state->ctrl_addr;
+	u32 desc_flags = 0;
+	int ret = 0;
+
+	if (nbytes > ce_state->src_sz_max)
+		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+			    __func__, nbytes, ce_state->src_sz_max);
+
+	if (unlikely(CE_RING_DELTA(nentries_mask,
+				   write_index, sw_index - 1) <= 0)) {
+		ret = -ENOSR;
+		goto exit;
+	}
+
+	desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+				   write_index);
+
+	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+	if (flags & CE_SEND_FLAG_GATHER)
+		desc_flags |= CE_DESC_FLAGS_GATHER;
+	if (flags & CE_SEND_FLAG_BYTE_SWAP)
+		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+	sdesc.addr   = __cpu_to_le32(buffer);
+	sdesc.nbytes = __cpu_to_le16(nbytes);
+	sdesc.flags  = __cpu_to_le16(desc_flags);
+
+	*desc = sdesc;
+
+	src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+	/* Update Source Ring Write Index */
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+	/* WORKAROUND */
+	if (!(flags & CE_SEND_FLAG_GATHER))
+		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+	src_ring->write_index = write_index;
+exit:
+	return ret;
+}
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_ring *src_ring = pipe->src_ring;
+	u32 ctrl_addr = pipe->ctrl_addr;
+
+	lockdep_assert_held(&ar_pci->ce_lock);
+
+	/*
+	 * This function must be called only if there is an incomplete
+	 * scatter-gather transfer (before index register is updated)
+	 * that needs to be cleaned up.
+	 */
+	if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
+		return;
+
+	if (WARN_ON_ONCE(src_ring->write_index ==
+			 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
+		return;
+
+	src_ring->write_index--;
+	src_ring->write_index &= src_ring->nentries_mask;
+
+	src_ring->per_transfer_context[src_ring->write_index] = NULL;
+}
+
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+		   void *per_transfer_context,
+		   u32 buffer,
+		   unsigned int nbytes,
+		   unsigned int transfer_id,
+		   unsigned int flags)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+				    buffer, nbytes, transfer_id, flags);
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int delta;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+			      pipe->src_ring->write_index,
+			      pipe->src_ring->sw_index - 1);
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return delta;
+}
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index = dest_ring->write_index;
+	unsigned int sw_index = dest_ring->sw_index;
+
+	lockdep_assert_held(&ar_pci->ce_lock);
+
+	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+}
+
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index = dest_ring->write_index;
+	unsigned int sw_index = dest_ring->sw_index;
+	struct ce_desc *base = dest_ring->base_addr_owner_space;
+	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+	u32 ctrl_addr = pipe->ctrl_addr;
+
+	lockdep_assert_held(&ar_pci->ce_lock);
+
+	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+		return -ENOSPC;
+
+	desc->addr = __cpu_to_le32(paddr);
+	desc->nbytes = 0;
+
+	dest_ring->per_transfer_context[write_index] = ctx;
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+	dest_ring->write_index = write_index;
+
+	return 0;
+}
+
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_contextp,
+					 u32 *bufferp,
+					 unsigned int *nbytesp,
+					 unsigned int *transfer_idp,
+					 unsigned int *flagsp)
+{
+	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	struct ath10k *ar = ce_state->ar;
+	unsigned int sw_index = dest_ring->sw_index;
+
+	struct ce_desc *base = dest_ring->base_addr_owner_space;
+	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+	struct ce_desc sdesc;
+	u16 nbytes;
+
+	/* Copy in one go for performance reasons */
+	sdesc = *desc;
+
+	nbytes = __le16_to_cpu(sdesc.nbytes);
+	if (nbytes == 0) {
+		/*
+		 * This closes a relatively unusual race where the Host
+		 * sees the updated DRRI before the update to the
+		 * corresponding descriptor has completed. We treat this
+		 * as a descriptor that is not yet done.
+		 */
+		return -EIO;
+	}
+
+	desc->nbytes = 0;
+
+	/* Return data from completed destination descriptor */
+	*bufferp = __le32_to_cpu(sdesc.addr);
+	*nbytesp = nbytes;
+	*transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
+
+	if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
+		*flagsp = CE_RECV_FLAG_SWAPPED;
+	else
+		*flagsp = 0;
+
+	if (per_transfer_contextp)
+		*per_transfer_contextp =
+			dest_ring->per_transfer_context[sw_index];
+
+	/* sanity */
+	dest_ring->per_transfer_context[sw_index] = NULL;
+
+	/* Update sw_index */
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	dest_ring->sw_index = sw_index;
+
+	return 0;
+}
+
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+				  void **per_transfer_contextp,
+				  u32 *bufferp,
+				  unsigned int *nbytesp,
+				  unsigned int *transfer_idp,
+				  unsigned int *flagsp)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+						   per_transfer_contextp,
+						   bufferp, nbytesp,
+						   transfer_idp, flagsp);
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+			       void **per_transfer_contextp,
+			       u32 *bufferp)
+{
+	struct ath10k_ce_ring *dest_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	int ret;
+	struct ath10k *ar;
+	struct ath10k_pci *ar_pci;
+
+	dest_ring = ce_state->dest_ring;
+
+	if (!dest_ring)
+		return -EIO;
+
+	ar = ce_state->ar;
+	ar_pci = ath10k_pci_priv(ar);
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	nentries_mask = dest_ring->nentries_mask;
+	sw_index = dest_ring->sw_index;
+	write_index = dest_ring->write_index;
+	if (write_index != sw_index) {
+		struct ce_desc *base = dest_ring->base_addr_owner_space;
+		struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+		/* Return data from completed destination descriptor */
+		*bufferp = __le32_to_cpu(desc->addr);
+
+		if (per_transfer_contextp)
+			*per_transfer_contextp =
+				dest_ring->per_transfer_context[sw_index];
+
+		/* sanity */
+		dest_ring->per_transfer_context[sw_index] = NULL;
+		desc->nbytes = 0;
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		dest_ring->sw_index = sw_index;
+		ret = 0;
+	} else {
+		ret = -EIO;
+	}
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_contextp)
+{
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+	u32 ctrl_addr = ce_state->ctrl_addr;
+	struct ath10k *ar = ce_state->ar;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index = src_ring->sw_index;
+	unsigned int read_index;
+
+	if (src_ring->hw_index == sw_index) {
+		/*
+		 * The SW completion index has caught up with the cached
+		 * version of the HW completion index.
+		 * Update the cached HW completion index to see whether
+		 * the SW has really caught up to the HW, or if the cached
+		 * value of the HW index has become stale.
+		 */
+
+		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+		if (read_index == 0xffffffff)
+			return -ENODEV;
+
+		read_index &= nentries_mask;
+		src_ring->hw_index = read_index;
+	}
+
+	read_index = src_ring->hw_index;
+
+	if (read_index == sw_index)
+		return -EIO;
+
+	if (per_transfer_contextp)
+		*per_transfer_contextp =
+			src_ring->per_transfer_context[sw_index];
+
+	/* sanity */
+	src_ring->per_transfer_context[sw_index] = NULL;
+
+	/* Update sw_index */
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	src_ring->sw_index = sw_index;
+
+	return 0;
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+			       void **per_transfer_contextp,
+			       u32 *bufferp,
+			       unsigned int *nbytesp,
+			       unsigned int *transfer_idp)
+{
+	struct ath10k_ce_ring *src_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	int ret;
+	struct ath10k *ar;
+	struct ath10k_pci *ar_pci;
+
+	src_ring = ce_state->src_ring;
+
+	if (!src_ring)
+		return -EIO;
+
+	ar = ce_state->ar;
+	ar_pci = ath10k_pci_priv(ar);
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	nentries_mask = src_ring->nentries_mask;
+	sw_index = src_ring->sw_index;
+	write_index = src_ring->write_index;
+
+	if (write_index != sw_index) {
+		struct ce_desc *base = src_ring->base_addr_owner_space;
+		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+		/* Return data from completed source descriptor */
+		*bufferp = __le32_to_cpu(desc->addr);
+		*nbytesp = __le16_to_cpu(desc->nbytes);
+		*transfer_idp = MS(__le16_to_cpu(desc->flags),
+						CE_DESC_FLAGS_META_DATA);
+
+		if (per_transfer_contextp)
+			*per_transfer_contextp =
+				src_ring->per_transfer_context[sw_index];
+
+		/* sanity */
+		src_ring->per_transfer_context[sw_index] = NULL;
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		src_ring->sw_index = sw_index;
+		ret = 0;
+	} else {
+		ret = -EIO;
+	}
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+				  void **per_transfer_contextp)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	ret = ath10k_ce_completed_send_next_nolock(ce_state,
+						   per_transfer_contextp);
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	u32 ctrl_addr = ce_state->ctrl_addr;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	/* Clear the copy-complete interrupts that will be handled here. */
+	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+					  HOST_IS_COPY_COMPLETE_MASK);
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	if (ce_state->recv_cb)
+		ce_state->recv_cb(ce_state);
+
+	if (ce_state->send_cb)
+		ce_state->send_cb(ce_state);
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	/*
+	 * Misc CE interrupts are not being handled, but still need
+	 * to be cleared.
+	 */
+	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrput for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+	int ce_id;
+	u32 intr_summary;
+
+	intr_summary = CE_INTERRUPT_SUMMARY(ar);
+
+	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
+		if (intr_summary & (1 << ce_id))
+			intr_summary &= ~(1 << ce_id);
+		else
+			/* no intr pending on this CE */
+			continue;
+
+		ath10k_ce_per_engine_service(ar, ce_id);
+	}
+}
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
+{
+	u32 ctrl_addr = ce_state->ctrl_addr;
+	struct ath10k *ar = ce_state->ar;
+	bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
+
+	if ((!disable_copy_compl_intr) &&
+	    (ce_state->send_cb || ce_state->recv_cb))
+		ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+	else
+		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+
+int ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+	int ce_id;
+
+	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
+		u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+		ath10k_ce_error_intr_disable(ar, ctrl_addr);
+		ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+	}
+
+	return 0;
+}
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ce_id;
+
+	/* Skip the last copy engine, CE7 the diagnostic window, as that
+	 * uses polling and isn't initialized for interrupts.
+	 */
+	for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
+		ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
+}
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+				   unsigned int ce_id,
+				   const struct ce_attr *attr)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+	nentries = roundup_pow_of_two(attr->src_nentries);
+
+	memset(src_ring->base_addr_owner_space, 0,
+	       nentries * sizeof(struct ce_desc));
+
+	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+	src_ring->sw_index &= src_ring->nentries_mask;
+	src_ring->hw_index = src_ring->sw_index;
+
+	src_ring->write_index =
+		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+	src_ring->write_index &= src_ring->nentries_mask;
+
+	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+					 src_ring->base_addr_ce_space);
+	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot init ce src ring id %d entries %d base_addr %p\n",
+		   ce_id, nentries, src_ring->base_addr_owner_space);
+
+	return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+				    unsigned int ce_id,
+				    const struct ce_attr *attr)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+	nentries = roundup_pow_of_two(attr->dest_nentries);
+
+	memset(dest_ring->base_addr_owner_space, 0,
+	       nentries * sizeof(struct ce_desc));
+
+	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+	dest_ring->sw_index &= dest_ring->nentries_mask;
+	dest_ring->write_index =
+		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+	dest_ring->write_index &= dest_ring->nentries_mask;
+
+	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+					  dest_ring->base_addr_ce_space);
+	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot ce dest ring id %d entries %d base_addr %p\n",
+		   ce_id, nentries, dest_ring->base_addr_owner_space);
+
+	return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+			 const struct ce_attr *attr)
+{
+	struct ath10k_ce_ring *src_ring;
+	u32 nentries = attr->src_nentries;
+	dma_addr_t base_addr;
+
+	nentries = roundup_pow_of_two(nentries);
+
+	src_ring = kzalloc(sizeof(*src_ring) +
+			   (nentries *
+			    sizeof(*src_ring->per_transfer_context)),
+			   GFP_KERNEL);
+	if (src_ring == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	src_ring->nentries = nentries;
+	src_ring->nentries_mask = nentries - 1;
+
+	/*
+	 * Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	src_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ar->dev,
+				   (nentries * sizeof(struct ce_desc) +
+				    CE_DESC_RING_ALIGN),
+				   &base_addr, GFP_KERNEL);
+	if (!src_ring->base_addr_owner_space_unaligned) {
+		kfree(src_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	src_ring->base_addr_ce_space_unaligned = base_addr;
+
+	src_ring->base_addr_owner_space = PTR_ALIGN(
+			src_ring->base_addr_owner_space_unaligned,
+			CE_DESC_RING_ALIGN);
+	src_ring->base_addr_ce_space = ALIGN(
+			src_ring->base_addr_ce_space_unaligned,
+			CE_DESC_RING_ALIGN);
+
+	return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+			  const struct ce_attr *attr)
+{
+	struct ath10k_ce_ring *dest_ring;
+	u32 nentries;
+	dma_addr_t base_addr;
+
+	nentries = roundup_pow_of_two(attr->dest_nentries);
+
+	dest_ring = kzalloc(sizeof(*dest_ring) +
+			    (nentries *
+			     sizeof(*dest_ring->per_transfer_context)),
+			    GFP_KERNEL);
+	if (dest_ring == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	dest_ring->nentries = nentries;
+	dest_ring->nentries_mask = nentries - 1;
+
+	/*
+	 * Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	dest_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ar->dev,
+				   (nentries * sizeof(struct ce_desc) +
+				    CE_DESC_RING_ALIGN),
+				   &base_addr, GFP_KERNEL);
+	if (!dest_ring->base_addr_owner_space_unaligned) {
+		kfree(dest_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+	/*
+	 * Correctly initialize memory to 0 to prevent garbage
+	 * data crashing system when download firmware
+	 */
+	memset(dest_ring->base_addr_owner_space_unaligned, 0,
+	       nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
+
+	dest_ring->base_addr_owner_space = PTR_ALIGN(
+			dest_ring->base_addr_owner_space_unaligned,
+			CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_ce_space = ALIGN(
+			dest_ring->base_addr_ce_space_unaligned,
+			CE_DESC_RING_ALIGN);
+
+	return dest_ring;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+			const struct ce_attr *attr)
+{
+	int ret;
+
+	if (attr->src_nentries) {
+		ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
+		if (ret) {
+			ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
+				   ce_id, ret);
+			return ret;
+		}
+	}
+
+	if (attr->dest_nentries) {
+		ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
+		if (ret) {
+			ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
+				   ce_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+	ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+	ath10k_ce_deinit_src_ring(ar, ce_id);
+	ath10k_ce_deinit_dest_ring(ar, ce_id);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+			 const struct ce_attr *attr)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+	int ret;
+
+	/*
+	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+	 * additional TX locking checks.
+	 *
+	 * For the lack of a better place do the check here.
+	 */
+	BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+	ce_state->ar = ar;
+	ce_state->id = ce_id;
+	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+	ce_state->attr_flags = attr->flags;
+	ce_state->src_sz_max = attr->src_sz_max;
+
+	if (attr->src_nentries)
+		ce_state->send_cb = attr->send_cb;
+
+	if (attr->dest_nentries)
+		ce_state->recv_cb = attr->recv_cb;
+
+	if (attr->src_nentries) {
+		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+		if (IS_ERR(ce_state->src_ring)) {
+			ret = PTR_ERR(ce_state->src_ring);
+			ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
+				   ce_id, ret);
+			ce_state->src_ring = NULL;
+			return ret;
+		}
+	}
+
+	if (attr->dest_nentries) {
+		ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+								attr);
+		if (IS_ERR(ce_state->dest_ring)) {
+			ret = PTR_ERR(ce_state->dest_ring);
+			ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
+				   ce_id, ret);
+			ce_state->dest_ring = NULL;
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+
+	if (ce_state->src_ring) {
+		dma_free_coherent(ar->dev,
+				  (ce_state->src_ring->nentries *
+				   sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				  ce_state->src_ring->base_addr_owner_space,
+				  ce_state->src_ring->base_addr_ce_space);
+		kfree(ce_state->src_ring);
+	}
+
+	if (ce_state->dest_ring) {
+		dma_free_coherent(ar->dev,
+				  (ce_state->dest_ring->nentries *
+				   sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				  ce_state->dest_ring->base_addr_owner_space,
+				  ce_state->dest_ring->base_addr_ce_space);
+		kfree(ce_state->dest_ring);
+	}
+
+	ce_state->src_ring = NULL;
+	ce_state->dest_ring = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644
index 0000000..47b734c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 12
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN	8
+#define CE_SEND_FLAG_GATHER	0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ath10k_ce_pipe;
+
+#define CE_DESC_FLAGS_GATHER         (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS	(1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS	(1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
+
+struct ce_desc {
+	__le32 addr;
+	__le16 nbytes;
+	__le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+struct ath10k_ce_ring {
+	/* Number of entries in this ring; must be power of 2 */
+	unsigned int nentries;
+	unsigned int nentries_mask;
+
+	/*
+	 * For dest ring, this is the next index to be processed
+	 * by software after it was/is received into.
+	 *
+	 * For src ring, this is the last descriptor that was sent
+	 * and completion processed by software.
+	 *
+	 * Regardless of src or dest ring, this is an invariant
+	 * (modulo ring size):
+	 *     write index >= read index >= sw_index
+	 */
+	unsigned int sw_index;
+	/* cached copy */
+	unsigned int write_index;
+	/*
+	 * For src ring, this is the next index not yet processed by HW.
+	 * This is a cached copy of the real HW index (read index), used
+	 * for avoiding reading the HW index register more often than
+	 * necessary.
+	 * This extends the invariant:
+	 *     write index >= read index >= hw_index >= sw_index
+	 *
+	 * For dest ring, this is currently unused.
+	 */
+	/* cached copy */
+	unsigned int hw_index;
+
+	/* Start of DMA-coherent area reserved for descriptors */
+	/* Host address space */
+	void *base_addr_owner_space_unaligned;
+	/* CE address space */
+	u32 base_addr_ce_space_unaligned;
+
+	/*
+	 * Actual start of descriptors.
+	 * Aligned to descriptor-size boundary.
+	 * Points into reserved DMA-coherent area, above.
+	 */
+	/* Host address space */
+	void *base_addr_owner_space;
+
+	/* CE address space */
+	u32 base_addr_ce_space;
+
+	/* keep last */
+	void *per_transfer_context[0];
+};
+
+struct ath10k_ce_pipe {
+	struct ath10k *ar;
+	unsigned int id;
+
+	unsigned int attr_flags;
+
+	u32 ctrl_addr;
+
+	void (*send_cb)(struct ath10k_ce_pipe *);
+	void (*recv_cb)(struct ath10k_ce_pipe *);
+
+	unsigned int src_sz_max;
+	struct ath10k_ce_ring *src_ring;
+	struct ath10k_ce_ring *dest_ring;
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ *   ce         - which copy engine to use
+ *   buffer          - address of buffer
+ *   nbytes          - number of bytes to send
+ *   transfer_id     - arbitrary ID; reflected to destination
+ *   flags           - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+		   void *per_transfer_send_context,
+		   u32 buffer,
+		   unsigned int nbytes,
+		   /* 14 bits */
+		   unsigned int transfer_id,
+		   unsigned int flags);
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+			  void *per_transfer_context,
+			  u32 buffer,
+			  unsigned int nbytes,
+			  unsigned int transfer_id,
+			  unsigned int flags);
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
+
+/*==================Recv=======================*/
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED	1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+				  void **per_transfer_contextp,
+				  u32 *bufferp,
+				  unsigned int *nbytesp,
+				  unsigned int *transfer_idp,
+				  unsigned int *flagsp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+				  void **per_transfer_contextp);
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_contextp);
+
+/*==================CE Engine Initialization=======================*/
+
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+			const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+			 const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers.  Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+			       void **per_transfer_contextp,
+			       u32 *bufferp);
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_contextp,
+					 u32 *bufferp,
+					 unsigned int *nbytesp,
+					 unsigned int *transfer_idp,
+					 unsigned int *flagsp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends.  Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+			       void **per_transfer_contextp,
+			       u32 *bufferp,
+			       unsigned int *nbytesp,
+			       unsigned int *transfer_idp);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupts(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP		1
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA		2
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS	4
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR		8
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+	/* CE_ATTR_* values */
+	unsigned int flags;
+
+	/* #entries in source ring - Must be a power of 2 */
+	unsigned int src_nentries;
+
+	/*
+	 * Max source send size for this CE.
+	 * This is also the minimum size of a destination buffer.
+	 */
+	unsigned int src_sz_max;
+
+	/* #entries in destination ring - Must be a power of 2 */
+	unsigned int dest_nentries;
+
+	void (*send_cb)(struct ath10k_ce_pipe *);
+	void (*recv_cb)(struct ath10k_ce_pipe *);
+};
+
+#define SR_BA_ADDRESS		0x0000
+#define SR_SIZE_ADDRESS		0x0004
+#define DR_BA_ADDRESS		0x0008
+#define DR_SIZE_ADDRESS		0x000c
+#define CE_CMD_ADDRESS		0x0018
+
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB	17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB	17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK	0x00020000
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
+	(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
+	CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB	16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB	16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK	0x00010000
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
+	(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
+	 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
+	(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
+	 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_DMAX_LENGTH_MSB		15
+#define CE_CTRL1_DMAX_LENGTH_LSB		0
+#define CE_CTRL1_DMAX_LENGTH_MASK		0x0000ffff
+#define CE_CTRL1_DMAX_LENGTH_GET(x) \
+	(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
+#define CE_CTRL1_DMAX_LENGTH_SET(x) \
+	(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
+
+#define CE_CTRL1_ADDRESS			0x0010
+#define CE_CTRL1_HW_MASK			0x0007ffff
+#define CE_CTRL1_SW_MASK			0x0007ffff
+#define CE_CTRL1_HW_WRITE_MASK			0x00000000
+#define CE_CTRL1_SW_WRITE_MASK			0x0007ffff
+#define CE_CTRL1_RSTMASK			0xffffffff
+#define CE_CTRL1_RESET				0x00000080
+
+#define CE_CMD_HALT_STATUS_MSB			3
+#define CE_CMD_HALT_STATUS_LSB			3
+#define CE_CMD_HALT_STATUS_MASK			0x00000008
+#define CE_CMD_HALT_STATUS_GET(x) \
+	(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
+#define CE_CMD_HALT_STATUS_SET(x) \
+	(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
+#define CE_CMD_HALT_STATUS_RESET		0
+#define CE_CMD_HALT_MSB				0
+#define CE_CMD_HALT_MASK			0x00000001
+
+#define HOST_IE_COPY_COMPLETE_MSB		0
+#define HOST_IE_COPY_COMPLETE_LSB		0
+#define HOST_IE_COPY_COMPLETE_MASK		0x00000001
+#define HOST_IE_COPY_COMPLETE_GET(x) \
+	(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
+#define HOST_IE_COPY_COMPLETE_SET(x) \
+	(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
+#define HOST_IE_COPY_COMPLETE_RESET		0
+#define HOST_IE_ADDRESS				0x002c
+
+#define HOST_IS_DST_RING_LOW_WATERMARK_MASK	0x00000010
+#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK	0x00000008
+#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK	0x00000004
+#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK	0x00000002
+#define HOST_IS_COPY_COMPLETE_MASK		0x00000001
+#define HOST_IS_ADDRESS				0x0030
+
+#define MISC_IE_ADDRESS				0x0034
+
+#define MISC_IS_AXI_ERR_MASK			0x00000400
+
+#define MISC_IS_DST_ADDR_ERR_MASK		0x00000200
+#define MISC_IS_SRC_LEN_ERR_MASK		0x00000100
+#define MISC_IS_DST_MAX_LEN_VIO_MASK		0x00000080
+#define MISC_IS_DST_RING_OVERFLOW_MASK		0x00000040
+#define MISC_IS_SRC_RING_OVERFLOW_MASK		0x00000020
+
+#define MISC_IS_ADDRESS				0x0038
+
+#define SR_WR_INDEX_ADDRESS			0x003c
+
+#define DST_WR_INDEX_ADDRESS			0x0040
+
+#define CURRENT_SRRI_ADDRESS			0x0044
+
+#define CURRENT_DRRI_ADDRESS			0x0048
+
+#define SRC_WATERMARK_LOW_MSB			31
+#define SRC_WATERMARK_LOW_LSB			16
+#define SRC_WATERMARK_LOW_MASK			0xffff0000
+#define SRC_WATERMARK_LOW_GET(x) \
+	(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
+#define SRC_WATERMARK_LOW_SET(x) \
+	(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
+#define SRC_WATERMARK_LOW_RESET			0
+#define SRC_WATERMARK_HIGH_MSB			15
+#define SRC_WATERMARK_HIGH_LSB			0
+#define SRC_WATERMARK_HIGH_MASK			0x0000ffff
+#define SRC_WATERMARK_HIGH_GET(x) \
+	(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
+#define SRC_WATERMARK_HIGH_SET(x) \
+	(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
+#define SRC_WATERMARK_HIGH_RESET		0
+#define SRC_WATERMARK_ADDRESS			0x004c
+
+#define DST_WATERMARK_LOW_LSB			16
+#define DST_WATERMARK_LOW_MASK			0xffff0000
+#define DST_WATERMARK_LOW_SET(x) \
+	(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
+#define DST_WATERMARK_LOW_RESET			0
+#define DST_WATERMARK_HIGH_MSB			15
+#define DST_WATERMARK_HIGH_LSB			0
+#define DST_WATERMARK_HIGH_MASK			0x0000ffff
+#define DST_WATERMARK_HIGH_GET(x) \
+	(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
+#define DST_WATERMARK_HIGH_SET(x) \
+	(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
+#define DST_WATERMARK_HIGH_RESET		0
+#define DST_WATERMARK_ADDRESS			0x0050
+
+static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
+{
+	return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK  | \
+			   HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
+			   HOST_IS_DST_RING_LOW_WATERMARK_MASK  | \
+			   HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
+
+#define CE_ERROR_MASK	(MISC_IS_AXI_ERR_MASK           | \
+			 MISC_IS_DST_ADDR_ERR_MASK      | \
+			 MISC_IS_SRC_LEN_ERR_MASK       | \
+			 MISC_IS_DST_MAX_LEN_VIO_MASK   | \
+			 MISC_IS_DST_RING_OVERFLOW_MASK | \
+			 MISC_IS_SRC_RING_OVERFLOW_MASK)
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+	(&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+	(&(((struct ce_desc *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+	(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+				ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+				ar->regs->ce_wrap_intr_sum_host_msi_mask
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+	(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+		CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS			0x0000
+
+#define CE_INTERRUPT_SUMMARY(ar) \
+	CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
+		ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
+		CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644
index 0000000..05de753
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -0,0 +1,2051 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+
+unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
+static bool uart_print;
+static bool skip_otp;
+static bool rawmode;
+
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param(skip_otp, bool, 0644);
+module_param(rawmode, bool, 0644);
+
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
+MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+	{
+		.id = QCA988X_HW_2_0_VERSION,
+		.dev_id = QCA988X_2_0_DEVICE_ID,
+		.name = "qca988x hw2.0",
+		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.has_shifted_cc_wraparound = true,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			.dir = QCA988X_HW_2_0_FW_DIR,
+			.fw = QCA988X_HW_2_0_FW_FILE,
+			.otp = QCA988X_HW_2_0_OTP_FILE,
+			.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+			.board_size = QCA988X_BOARD_DATA_SZ,
+			.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA6174_HW_2_1_VERSION,
+		.dev_id = QCA6164_2_1_DEVICE_ID,
+		.name = "qca6164 hw2.1",
+		.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			.dir = QCA6174_HW_2_1_FW_DIR,
+			.fw = QCA6174_HW_2_1_FW_FILE,
+			.otp = QCA6174_HW_2_1_OTP_FILE,
+			.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+			.board_size = QCA6174_BOARD_DATA_SZ,
+			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA6174_HW_2_1_VERSION,
+		.dev_id = QCA6174_2_1_DEVICE_ID,
+		.name = "qca6174 hw2.1",
+		.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			.dir = QCA6174_HW_2_1_FW_DIR,
+			.fw = QCA6174_HW_2_1_FW_FILE,
+			.otp = QCA6174_HW_2_1_OTP_FILE,
+			.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+			.board_size = QCA6174_BOARD_DATA_SZ,
+			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA6174_HW_3_0_VERSION,
+		.dev_id = QCA6174_2_1_DEVICE_ID,
+		.name = "qca6174 hw3.0",
+		.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			.dir = QCA6174_HW_3_0_FW_DIR,
+			.fw = QCA6174_HW_3_0_FW_FILE,
+			.otp = QCA6174_HW_3_0_OTP_FILE,
+			.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+			.board_size = QCA6174_BOARD_DATA_SZ,
+			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA6174_HW_3_2_VERSION,
+		.dev_id = QCA6174_2_1_DEVICE_ID,
+		.name = "qca6174 hw3.2",
+		.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			/* uses same binaries as hw3.0 */
+			.dir = QCA6174_HW_3_0_FW_DIR,
+			.fw = QCA6174_HW_3_0_FW_FILE,
+			.otp = QCA6174_HW_3_0_OTP_FILE,
+			.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+			.board_size = QCA6174_BOARD_DATA_SZ,
+			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA99X0_HW_2_0_DEV_VERSION,
+		.dev_id = QCA99X0_2_0_DEVICE_ID,
+		.name = "qca99x0 hw2.0",
+		.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.otp_exe_param = 0x00000700,
+		.continuous_frag_desc = true,
+		.channel_counters_freq_hz = 150000,
+		.max_probe_resp_desc_thres = 24,
+		.fw = {
+			.dir = QCA99X0_HW_2_0_FW_DIR,
+			.fw = QCA99X0_HW_2_0_FW_FILE,
+			.otp = QCA99X0_HW_2_0_OTP_FILE,
+			.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+			.board_size = QCA99X0_BOARD_DATA_SZ,
+			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA9377_HW_1_0_DEV_VERSION,
+		.dev_id = QCA9377_1_0_DEVICE_ID,
+		.name = "qca9377 hw1.0",
+		.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			.dir = QCA9377_HW_1_0_FW_DIR,
+			.fw = QCA9377_HW_1_0_FW_FILE,
+			.otp = QCA9377_HW_1_0_OTP_FILE,
+			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+			.board_size = QCA9377_BOARD_DATA_SZ,
+			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+		},
+	},
+	{
+		.id = QCA9377_HW_1_1_DEV_VERSION,
+		.dev_id = QCA9377_1_0_DEVICE_ID,
+		.name = "qca9377 hw1.1",
+		.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
+		.fw = {
+			.dir = QCA9377_HW_1_0_FW_DIR,
+			.fw = QCA9377_HW_1_0_FW_FILE,
+			.otp = QCA9377_HW_1_0_OTP_FILE,
+			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+			.board_size = QCA9377_BOARD_DATA_SZ,
+			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+		},
+	},
+};
+
+static const char *const ath10k_core_fw_feature_str[] = {
+	[ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+	[ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+	[ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+	[ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+	[ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+	[ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+	[ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+	[ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+	[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+	[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+	[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
+	[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+						   size_t buf_len,
+						   enum ath10k_fw_features feat)
+{
+	/* make sure that ath10k_core_fw_feature_str[] gets updated */
+	BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) !=
+		     ATH10K_FW_FEATURE_COUNT);
+
+	if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+	    WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+		return scnprintf(buf, buf_len, "bit%d", feat);
+	}
+
+	return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+				     char *buf,
+				     size_t buf_len)
+{
+	unsigned int len = 0;
+	int i;
+
+	for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+		if (test_bit(i, ar->fw_features)) {
+			if (len > 0)
+				len += scnprintf(buf + len, buf_len - len, ",");
+
+			len += ath10k_core_get_fw_feature_str(buf + len,
+							      buf_len - len,
+							      i);
+		}
+	}
+}
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
+
+	complete(&ar->target_suspend);
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+	u32 param_host;
+	int ret;
+
+	/* tell target which HTC version it is used*/
+	ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+				 HTC_PROTOCOL_VERSION);
+	if (ret) {
+		ath10k_err(ar, "settings HTC version failed\n");
+		return ret;
+	}
+
+	/* set the firmware mode to STA/IBSS/AP */
+	ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+	if (ret) {
+		ath10k_err(ar, "setting firmware mode (1/2) failed\n");
+		return ret;
+	}
+
+	/* TODO following parameters need to be re-visited. */
+	/* num_device */
+	param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+	/* Firmware mode */
+	/* FIXME: Why FW_MODE_AP ??.*/
+	param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+	/* mac_addr_method */
+	param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+	/* firmware_bridge */
+	param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+	/* fwsubmode */
+	param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+	ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+	if (ret) {
+		ath10k_err(ar, "setting firmware mode (2/2) failed\n");
+		return ret;
+	}
+
+	/* We do all byte-swapping on the host */
+	ret = ath10k_bmi_write32(ar, hi_be, 0);
+	if (ret) {
+		ath10k_err(ar, "setting host CPU BE mode failed\n");
+		return ret;
+	}
+
+	/* FW descriptor/Data swap flags */
+	ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+	if (ret) {
+		ath10k_err(ar, "setting FW data/desc swap flags failed\n");
+		return ret;
+	}
+
+	/* Some devices have a special sanity check that verifies the PCI
+	 * Device ID is written to this host interest var. It is known to be
+	 * required to boot QCA6164.
+	 */
+	ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext,
+				 ar->dev_id);
+	if (ret) {
+		ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+						   const char *dir,
+						   const char *file)
+{
+	char filename[100];
+	const struct firmware *fw;
+	int ret;
+
+	if (file == NULL)
+		return ERR_PTR(-ENOENT);
+
+	if (dir == NULL)
+		dir = ".";
+
+	snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+	ret = request_firmware(&fw, filename, ar->dev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
+				      size_t data_len)
+{
+	u32 board_data_size = ar->hw_params.fw.board_size;
+	u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
+	u32 board_ext_data_addr;
+	int ret;
+
+	ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+	if (ret) {
+		ath10k_err(ar, "could not read board ext data addr (%d)\n",
+			   ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot push board extended data addr 0x%x\n",
+		   board_ext_data_addr);
+
+	if (board_ext_data_addr == 0)
+		return 0;
+
+	if (data_len != (board_data_size + board_ext_data_size)) {
+		ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
+			   data_len, board_data_size, board_ext_data_size);
+		return -EINVAL;
+	}
+
+	ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+				      data + board_data_size,
+				      board_ext_data_size);
+	if (ret) {
+		ath10k_err(ar, "could not write board ext data (%d)\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+				 (board_ext_data_size << 16) | 1);
+	if (ret) {
+		ath10k_err(ar, "could not write board ext data bit (%d)\n",
+			   ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+				      size_t data_len)
+{
+	u32 board_data_size = ar->hw_params.fw.board_size;
+	u32 address;
+	int ret;
+
+	ret = ath10k_push_board_ext_data(ar, data, data_len);
+	if (ret) {
+		ath10k_err(ar, "could not push board ext data (%d)\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_bmi_read32(ar, hi_board_data, &address);
+	if (ret) {
+		ath10k_err(ar, "could not read board data addr (%d)\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_bmi_write_memory(ar, address, data,
+				      min_t(u32, board_data_size,
+					    data_len));
+	if (ret) {
+		ath10k_err(ar, "could not write board data (%d)\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+	if (ret) {
+		ath10k_err(ar, "could not write board data bit (%d)\n", ret);
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int ath10k_download_cal_file(struct ath10k *ar)
+{
+	int ret;
+
+	if (!ar->cal_file)
+		return -ENOENT;
+
+	if (IS_ERR(ar->cal_file))
+		return PTR_ERR(ar->cal_file);
+
+	ret = ath10k_download_board_data(ar, ar->cal_file->data,
+					 ar->cal_file->size);
+	if (ret) {
+		ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+	return 0;
+}
+
+static int ath10k_download_cal_dt(struct ath10k *ar)
+{
+	struct device_node *node;
+	int data_len;
+	void *data;
+	int ret;
+
+	node = ar->dev->of_node;
+	if (!node)
+		/* Device Tree is optional, don't print any warnings if
+		 * there's no node for ath10k.
+		 */
+		return -ENOENT;
+
+	if (!of_get_property(node, "qcom,ath10k-calibration-data",
+			     &data_len)) {
+		/* The calibration data node is optional */
+		return -ENOENT;
+	}
+
+	if (data_len != QCA988X_CAL_DATA_LEN) {
+		ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+			    data_len);
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
+	data = kmalloc(data_len, GFP_KERNEL);
+	if (!data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
+					data, data_len);
+	if (ret) {
+		ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+			    ret);
+		goto out_free;
+	}
+
+	ret = ath10k_download_board_data(ar, data, data_len);
+	if (ret) {
+		ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+			    ret);
+		goto out_free;
+	}
+
+	ret = 0;
+
+out_free:
+	kfree(data);
+
+out:
+	return ret;
+}
+
+static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
+{
+	u32 result, address;
+	u8 board_id, chip_id;
+	int ret;
+
+	address = ar->hw_params.patch_load_addr;
+
+	if (!ar->otp_data || !ar->otp_len) {
+		ath10k_warn(ar,
+			    "failed to retrieve board id because of invalid otp\n");
+		return -ENODATA;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot upload otp to 0x%x len %zd for board id\n",
+		   address, ar->otp_len);
+
+	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	if (ret) {
+		ath10k_err(ar, "could not write otp for board id check: %d\n",
+			   ret);
+		return ret;
+	}
+
+	ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
+				 &result);
+	if (ret) {
+		ath10k_err(ar, "could not execute otp for board id check: %d\n",
+			   ret);
+		return ret;
+	}
+
+	board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
+	chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
+		   result, board_id, chip_id);
+
+	if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+		return -EOPNOTSUPP;
+
+	ar->id.bmi_ids_valid = true;
+	ar->id.bmi_board_id = board_id;
+	ar->id.bmi_chip_id = chip_id;
+
+	return 0;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+	u32 result, address = ar->hw_params.patch_load_addr;
+	u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
+	int ret;
+
+	ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+	if (ret) {
+		ath10k_err(ar, "failed to download board data: %d\n", ret);
+		return ret;
+	}
+
+	/* OTP is optional */
+
+	if (!ar->otp_data || !ar->otp_len) {
+		ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+			    ar->otp_data, ar->otp_len);
+		return 0;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+		   address, ar->otp_len);
+
+	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	if (ret) {
+		ath10k_err(ar, "could not write otp (%d)\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
+	if (ret) {
+		ath10k_err(ar, "could not execute otp (%d)\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+	if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+				   ar->fw_features)) &&
+	    result != 0) {
+		ath10k_err(ar, "otp calibration failed: %d", result);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+{
+	u32 address, data_len;
+	const char *mode_name;
+	const void *data;
+	int ret;
+
+	address = ar->hw_params.patch_load_addr;
+
+	switch (mode) {
+	case ATH10K_FIRMWARE_MODE_NORMAL:
+		data = ar->firmware_data;
+		data_len = ar->firmware_len;
+		mode_name = "normal";
+		ret = ath10k_swap_code_seg_configure(ar,
+						     ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+		if (ret) {
+			ath10k_err(ar, "failed to configure fw code swap: %d\n",
+				   ret);
+			return ret;
+		}
+		break;
+	case ATH10K_FIRMWARE_MODE_UTF:
+		data = ar->testmode.utf_firmware_data;
+		data_len = ar->testmode.utf_firmware_len;
+		mode_name = "utf";
+		break;
+	default:
+		ath10k_err(ar, "unknown firmware mode: %d\n", mode);
+		return -EINVAL;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot uploading firmware image %p len %d mode %s\n",
+		   data, data_len, mode_name);
+
+	ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+	if (ret) {
+		ath10k_err(ar, "failed to download %s firmware: %d\n",
+			   mode_name, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void ath10k_core_free_board_files(struct ath10k *ar)
+{
+	if (!IS_ERR(ar->board))
+		release_firmware(ar->board);
+
+	ar->board = NULL;
+	ar->board_data = NULL;
+	ar->board_len = 0;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+	if (!IS_ERR(ar->otp))
+		release_firmware(ar->otp);
+
+	if (!IS_ERR(ar->firmware))
+		release_firmware(ar->firmware);
+
+	if (!IS_ERR(ar->cal_file))
+		release_firmware(ar->cal_file);
+
+	ath10k_swap_code_seg_release(ar);
+
+	ar->otp = NULL;
+	ar->otp_data = NULL;
+	ar->otp_len = 0;
+
+	ar->firmware = NULL;
+	ar->firmware_data = NULL;
+	ar->firmware_len = 0;
+
+	ar->cal_file = NULL;
+}
+
+static int ath10k_fetch_cal_file(struct ath10k *ar)
+{
+	char filename[100];
+
+	/* cal-<bus>-<id>.bin */
+	scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+	ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+	if (IS_ERR(ar->cal_file))
+		/* calibration file is optional, don't print any warnings */
+		return PTR_ERR(ar->cal_file);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
+		   ATH10K_FW_DIR, filename);
+
+	return 0;
+}
+
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
+{
+	if (!ar->hw_params.fw.board) {
+		ath10k_err(ar, "failed to find board file fw entry\n");
+		return -EINVAL;
+	}
+
+	ar->board = ath10k_fetch_fw_file(ar,
+					 ar->hw_params.fw.dir,
+					 ar->hw_params.fw.board);
+	if (IS_ERR(ar->board))
+		return PTR_ERR(ar->board);
+
+	ar->board_data = ar->board->data;
+	ar->board_len = ar->board->size;
+
+	return 0;
+}
+
+static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
+					 const void *buf, size_t buf_len,
+					 const char *boardname)
+{
+	const struct ath10k_fw_ie *hdr;
+	bool name_match_found;
+	int ret, board_ie_id;
+	size_t board_ie_len;
+	const void *board_ie_data;
+
+	name_match_found = false;
+
+	/* go through ATH10K_BD_IE_BOARD_ elements */
+	while (buf_len > sizeof(struct ath10k_fw_ie)) {
+		hdr = buf;
+		board_ie_id = le32_to_cpu(hdr->id);
+		board_ie_len = le32_to_cpu(hdr->len);
+		board_ie_data = hdr->data;
+
+		buf_len -= sizeof(*hdr);
+		buf += sizeof(*hdr);
+
+		if (buf_len < ALIGN(board_ie_len, 4)) {
+			ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n",
+				   buf_len, ALIGN(board_ie_len, 4));
+			ret = -EINVAL;
+			goto out;
+		}
+
+		switch (board_ie_id) {
+		case ATH10K_BD_IE_BOARD_NAME:
+			ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "",
+					board_ie_data, board_ie_len);
+
+			if (board_ie_len != strlen(boardname))
+				break;
+
+			ret = memcmp(board_ie_data, boardname, strlen(boardname));
+			if (ret)
+				break;
+
+			name_match_found = true;
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "boot found match for name '%s'",
+				   boardname);
+			break;
+		case ATH10K_BD_IE_BOARD_DATA:
+			if (!name_match_found)
+				/* no match found */
+				break;
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "boot found board data for '%s'",
+				   boardname);
+
+			ar->board_data = board_ie_data;
+			ar->board_len = board_ie_len;
+
+			ret = 0;
+			goto out;
+		default:
+			ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n",
+				    board_ie_id);
+			break;
+		}
+
+		/* jump over the padding */
+		board_ie_len = ALIGN(board_ie_len, 4);
+
+		buf_len -= board_ie_len;
+		buf += board_ie_len;
+	}
+
+	/* no match found */
+	ret = -ENOENT;
+
+out:
+	return ret;
+}
+
+static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
+					      const char *boardname,
+					      const char *filename)
+{
+	size_t len, magic_len, ie_len;
+	struct ath10k_fw_ie *hdr;
+	const u8 *data;
+	int ret, ie_id;
+
+	ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+	if (IS_ERR(ar->board))
+		return PTR_ERR(ar->board);
+
+	data = ar->board->data;
+	len = ar->board->size;
+
+	/* magic has extra null byte padded */
+	magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
+	if (len < magic_len) {
+		ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n",
+			   ar->hw_params.fw.dir, filename, len);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) {
+		ath10k_err(ar, "found invalid board magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* magic is padded to 4 bytes */
+	magic_len = ALIGN(magic_len, 4);
+	if (len < magic_len) {
+		ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n",
+			   ar->hw_params.fw.dir, filename, len);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	data += magic_len;
+	len -= magic_len;
+
+	while (len > sizeof(struct ath10k_fw_ie)) {
+		hdr = (struct ath10k_fw_ie *)data;
+		ie_id = le32_to_cpu(hdr->id);
+		ie_len = le32_to_cpu(hdr->len);
+
+		len -= sizeof(*hdr);
+		data = hdr->data;
+
+		if (len < ALIGN(ie_len, 4)) {
+			ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+				   ie_id, ie_len, len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		switch (ie_id) {
+		case ATH10K_BD_IE_BOARD:
+			ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+							    boardname);
+			if (ret == -ENOENT)
+				/* no match found, continue */
+				break;
+			else if (ret)
+				/* there was an error, bail out */
+				goto err;
+
+			/* board data found */
+			goto out;
+		}
+
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		len -= ie_len;
+		data += ie_len;
+	}
+
+out:
+	if (!ar->board_data || !ar->board_len) {
+		ath10k_err(ar,
+			   "failed to fetch board data for %s from %s/%s\n",
+			   ar->hw_params.fw.dir, boardname, filename);
+		ret = -ENODATA;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	ath10k_core_free_board_files(ar);
+	return ret;
+}
+
+static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
+					 size_t name_len)
+{
+	if (ar->id.bmi_ids_valid) {
+		scnprintf(name, name_len,
+			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+			  ath10k_bus_str(ar->hif.bus),
+			  ar->id.bmi_chip_id,
+			  ar->id.bmi_board_id);
+		goto out;
+	}
+
+	scnprintf(name, name_len,
+		  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
+		  ath10k_bus_str(ar->hif.bus),
+		  ar->id.vendor, ar->id.device,
+		  ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+out:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+	return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+	char boardname[100];
+	int ret;
+
+	ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname));
+	if (ret) {
+		ath10k_err(ar, "failed to create board name: %d", ret);
+		return ret;
+	}
+
+	ar->bd_api = 2;
+	ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+						 ATH10K_BOARD_API2_FILE);
+	if (!ret)
+		goto success;
+
+	ar->bd_api = 1;
+	ret = ath10k_core_fetch_board_data_api_1(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch board data\n");
+		return ret;
+	}
+
+success:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
+	return 0;
+}
+
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+{
+	int ret = 0;
+
+	if (ar->hw_params.fw.fw == NULL) {
+		ath10k_err(ar, "firmware file not defined\n");
+		return -EINVAL;
+	}
+
+	ar->firmware = ath10k_fetch_fw_file(ar,
+					    ar->hw_params.fw.dir,
+					    ar->hw_params.fw.fw);
+	if (IS_ERR(ar->firmware)) {
+		ret = PTR_ERR(ar->firmware);
+		ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
+		goto err;
+	}
+
+	ar->firmware_data = ar->firmware->data;
+	ar->firmware_len = ar->firmware->size;
+
+	/* OTP may be undefined. If so, don't fetch it at all */
+	if (ar->hw_params.fw.otp == NULL)
+		return 0;
+
+	ar->otp = ath10k_fetch_fw_file(ar,
+				       ar->hw_params.fw.dir,
+				       ar->hw_params.fw.otp);
+	if (IS_ERR(ar->otp)) {
+		ret = PTR_ERR(ar->otp);
+		ath10k_err(ar, "could not fetch otp (%d)\n", ret);
+		goto err;
+	}
+
+	ar->otp_data = ar->otp->data;
+	ar->otp_len = ar->otp->size;
+
+	return 0;
+
+err:
+	ath10k_core_free_firmware_files(ar);
+	return ret;
+}
+
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+	size_t magic_len, len, ie_len;
+	int ie_id, i, index, bit, ret;
+	struct ath10k_fw_ie *hdr;
+	const u8 *data;
+	__le32 *timestamp, *version;
+
+	/* first fetch the firmware file (firmware-*.bin) */
+	ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+	if (IS_ERR(ar->firmware)) {
+		ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
+			   ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
+		return PTR_ERR(ar->firmware);
+	}
+
+	data = ar->firmware->data;
+	len = ar->firmware->size;
+
+	/* magic also includes the null byte, check that as well */
+	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+	if (len < magic_len) {
+		ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
+			   ar->hw_params.fw.dir, name, len);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+		ath10k_err(ar, "invalid firmware magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* jump over the padding */
+	magic_len = ALIGN(magic_len, 4);
+
+	len -= magic_len;
+	data += magic_len;
+
+	/* loop elements */
+	while (len > sizeof(struct ath10k_fw_ie)) {
+		hdr = (struct ath10k_fw_ie *)data;
+
+		ie_id = le32_to_cpu(hdr->id);
+		ie_len = le32_to_cpu(hdr->len);
+
+		len -= sizeof(*hdr);
+		data += sizeof(*hdr);
+
+		if (len < ie_len) {
+			ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
+				   ie_id, len, ie_len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		switch (ie_id) {
+		case ATH10K_FW_IE_FW_VERSION:
+			if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+				break;
+
+			memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+			ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found fw version %s\n",
+				    ar->hw->wiphy->fw_version);
+			break;
+		case ATH10K_FW_IE_TIMESTAMP:
+			if (ie_len != sizeof(u32))
+				break;
+
+			timestamp = (__le32 *)data;
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+				   le32_to_cpup(timestamp));
+			break;
+		case ATH10K_FW_IE_FEATURES:
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found firmware features ie (%zd B)\n",
+				   ie_len);
+
+			for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+				index = i / 8;
+				bit = i % 8;
+
+				if (index == ie_len)
+					break;
+
+				if (data[index] & (1 << bit)) {
+					ath10k_dbg(ar, ATH10K_DBG_BOOT,
+						   "Enabling feature bit: %i\n",
+						   i);
+					__set_bit(i, ar->fw_features);
+				}
+			}
+
+			ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
+					ar->fw_features,
+					sizeof(ar->fw_features));
+			break;
+		case ATH10K_FW_IE_FW_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found fw image ie (%zd B)\n",
+				   ie_len);
+
+			ar->firmware_data = data;
+			ar->firmware_len = ie_len;
+
+			break;
+		case ATH10K_FW_IE_OTP_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found otp image ie (%zd B)\n",
+				   ie_len);
+
+			ar->otp_data = data;
+			ar->otp_len = ie_len;
+
+			break;
+		case ATH10K_FW_IE_WMI_OP_VERSION:
+			if (ie_len != sizeof(u32))
+				break;
+
+			version = (__le32 *)data;
+
+			ar->wmi.op_version = le32_to_cpup(version);
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
+				   ar->wmi.op_version);
+			break;
+		case ATH10K_FW_IE_HTT_OP_VERSION:
+			if (ie_len != sizeof(u32))
+				break;
+
+			version = (__le32 *)data;
+
+			ar->htt.op_version = le32_to_cpup(version);
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+				   ar->htt.op_version);
+			break;
+		case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found fw code swap image ie (%zd B)\n",
+				   ie_len);
+			ar->swap.firmware_codeswap_data = data;
+			ar->swap.firmware_codeswap_len = ie_len;
+			break;
+		default:
+			ath10k_warn(ar, "Unknown FW IE: %u\n",
+				    le32_to_cpu(hdr->id));
+			break;
+		}
+
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		len -= ie_len;
+		data += ie_len;
+	}
+
+	if (!ar->firmware_data || !ar->firmware_len) {
+		ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+			    ar->hw_params.fw.dir, name);
+		ret = -ENOMEDIUM;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	ath10k_core_free_firmware_files(ar);
+	return ret;
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+	int ret;
+
+	/* calibration file is optional, don't check for any errors */
+	ath10k_fetch_cal_file(ar);
+
+	ar->fw_api = 5;
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+	if (ret == 0)
+		goto success;
+
+	ar->fw_api = 4;
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+	if (ret == 0)
+		goto success;
+
+	ar->fw_api = 3;
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+	if (ret == 0)
+		goto success;
+
+	ar->fw_api = 2;
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+	if (ret == 0)
+		goto success;
+
+	ar->fw_api = 1;
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+	ret = ath10k_core_fetch_firmware_api_1(ar);
+	if (ret)
+		return ret;
+
+success:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+	return 0;
+}
+
+static int ath10k_download_cal_data(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_download_cal_file(ar);
+	if (ret == 0) {
+		ar->cal_mode = ATH10K_CAL_MODE_FILE;
+		goto done;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot did not find a calibration file, try DT next: %d\n",
+		   ret);
+
+	ret = ath10k_download_cal_dt(ar);
+	if (ret == 0) {
+		ar->cal_mode = ATH10K_CAL_MODE_DT;
+		goto done;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot did not find DT entry, try OTP next: %d\n",
+		   ret);
+
+	ret = ath10k_download_and_run_otp(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to run otp: %d\n", ret);
+		return ret;
+	}
+
+	ar->cal_mode = ATH10K_CAL_MODE_OTP;
+
+done:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+		   ath10k_cal_mode_str(ar->cal_mode));
+	return 0;
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+	int ret;
+
+	/*
+	 * Explicitly setting UART prints to zero as target turns it on
+	 * based on scratch registers.
+	 */
+	ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+	if (ret) {
+		ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
+		return ret;
+	}
+
+	if (!uart_print)
+		return 0;
+
+	ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
+	if (ret) {
+		ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+	if (ret) {
+		ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+		return ret;
+	}
+
+	/* Set the UART baud rate to 19200. */
+	ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+	if (ret) {
+		ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
+		return ret;
+	}
+
+	ath10k_info(ar, "UART prints enabled\n");
+	return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+	const struct ath10k_hw_params *uninitialized_var(hw_params);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+		hw_params = &ath10k_hw_params_list[i];
+
+		if (hw_params->id == ar->target_version &&
+		    hw_params->dev_id == ar->dev_id)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+		ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
+			   ar->target_version);
+		return -EINVAL;
+	}
+
+	ar->hw_params = *hw_params;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+		   ar->hw_params.name, ar->target_version);
+
+	return 0;
+}
+
+static void ath10k_core_restart(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+
+	set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+	/* Place a barrier to make sure the compiler doesn't reorder
+	 * CRASH_FLUSH and calling other functions.
+	 */
+	barrier();
+
+	ieee80211_stop_queues(ar->hw);
+	ath10k_drain_tx(ar);
+	complete_all(&ar->scan.started);
+	complete_all(&ar->scan.completed);
+	complete_all(&ar->scan.on_channel);
+	complete_all(&ar->offchan_tx_completed);
+	complete_all(&ar->install_key_done);
+	complete_all(&ar->vdev_setup_done);
+	complete_all(&ar->thermal.wmi_sync);
+	wake_up(&ar->htt.empty_tx_wq);
+	wake_up(&ar->wmi.tx_credits_wq);
+	wake_up(&ar->peer_mapping_wq);
+
+	mutex_lock(&ar->conf_mutex);
+
+	switch (ar->state) {
+	case ATH10K_STATE_ON:
+		ar->state = ATH10K_STATE_RESTARTING;
+		ath10k_hif_stop(ar);
+		ath10k_scan_finish(ar);
+		ieee80211_restart_hw(ar->hw);
+		break;
+	case ATH10K_STATE_OFF:
+		/* this can happen if driver is being unloaded
+		 * or if the crash happens during FW probing */
+		ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
+		break;
+	case ATH10K_STATE_RESTARTING:
+		/* hw restart might be requested from multiple places */
+		break;
+	case ATH10K_STATE_RESTARTED:
+		ar->state = ATH10K_STATE_WEDGED;
+		/* fall through */
+	case ATH10K_STATE_WEDGED:
+		ath10k_warn(ar, "device is wedged, will not restart\n");
+		break;
+	case ATH10K_STATE_UTF:
+		ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
+		break;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_core_init_firmware_features(struct ath10k *ar)
+{
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
+	    !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+		ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
+		return -EINVAL;
+	}
+
+	if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+		ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
+			   ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+		return -EINVAL;
+	}
+
+	ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+	switch (ath10k_cryptmode_param) {
+	case ATH10K_CRYPT_MODE_HW:
+		clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+		clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+		break;
+	case ATH10K_CRYPT_MODE_SW:
+		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+			      ar->fw_features)) {
+			ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+			return -EINVAL;
+		}
+
+		set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+		set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+		break;
+	default:
+		ath10k_info(ar, "invalid cryptmode: %d\n",
+			    ath10k_cryptmode_param);
+		return -EINVAL;
+	}
+
+	ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+	ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+	if (rawmode) {
+		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+			      ar->fw_features)) {
+			ath10k_err(ar, "rawmode = 1 requires support from firmware");
+			return -EINVAL;
+		}
+		set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+	}
+
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+		/* Workaround:
+		 *
+		 * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+		 * and causes enormous performance issues (malformed frames,
+		 * etc).
+		 *
+		 * Disabling A-MSDU makes RAW mode stable with heavy traffic
+		 * albeit a bit slower compared to regular operation.
+		 */
+		ar->htt.max_num_amsdu = 1;
+	}
+
+	/* Backwards compatibility for firmwares without
+	 * ATH10K_FW_IE_WMI_OP_VERSION.
+	 */
+	if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+			if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
+				     ar->fw_features))
+				ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+			else
+				ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+		} else {
+			ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+		}
+	}
+
+	switch (ar->wmi.op_version) {
+	case ATH10K_FW_WMI_OP_VERSION_MAIN:
+		ar->max_num_peers = TARGET_NUM_PEERS;
+		ar->max_num_stations = TARGET_NUM_STATIONS;
+		ar->max_num_vdevs = TARGET_NUM_VDEVS;
+		ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+			WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_1:
+	case ATH10K_FW_WMI_OP_VERSION_10_2:
+	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+		ar->max_num_peers = TARGET_10X_NUM_PEERS;
+		ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+		ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
+		ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_TLV:
+		ar->max_num_peers = TARGET_TLV_NUM_PEERS;
+		ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
+		ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+		ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
+		ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+		ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+			WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+		ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+		ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+		ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+		ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+		ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_UNSET:
+	case ATH10K_FW_WMI_OP_VERSION_MAX:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	/* Backwards compatibility for firmwares without
+	 * ATH10K_FW_IE_HTT_OP_VERSION.
+	 */
+	if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+		switch (ar->wmi.op_version) {
+		case ATH10K_FW_WMI_OP_VERSION_MAIN:
+			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+			break;
+		case ATH10K_FW_WMI_OP_VERSION_10_1:
+		case ATH10K_FW_WMI_OP_VERSION_10_2:
+		case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+			break;
+		case ATH10K_FW_WMI_OP_VERSION_TLV:
+			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+			break;
+		case ATH10K_FW_WMI_OP_VERSION_10_4:
+		case ATH10K_FW_WMI_OP_VERSION_UNSET:
+		case ATH10K_FW_WMI_OP_VERSION_MAX:
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+{
+	int status;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+	ath10k_bmi_start(ar);
+
+	if (ath10k_init_configure_target(ar)) {
+		status = -EINVAL;
+		goto err;
+	}
+
+	status = ath10k_download_cal_data(ar);
+	if (status)
+		goto err;
+
+	/* Some of of qca988x solutions are having global reset issue
+	 * during target initialization. Bypassing PLL setting before
+	 * downloading firmware and letting the SoC run on REF_CLK is
+	 * fixing the problem. Corresponding firmware change is also needed
+	 * to set the clock source once the target is initialized.
+	 */
+	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+		     ar->fw_features)) {
+		status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+		if (status) {
+			ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+				   status);
+			goto err;
+		}
+	}
+
+	status = ath10k_download_fw(ar, mode);
+	if (status)
+		goto err;
+
+	status = ath10k_init_uart(ar);
+	if (status)
+		goto err;
+
+	ar->htc.htc_ops.target_send_suspend_complete =
+		ath10k_send_suspend_complete;
+
+	status = ath10k_htc_init(ar);
+	if (status) {
+		ath10k_err(ar, "could not init HTC (%d)\n", status);
+		goto err;
+	}
+
+	status = ath10k_bmi_done(ar);
+	if (status)
+		goto err;
+
+	status = ath10k_wmi_attach(ar);
+	if (status) {
+		ath10k_err(ar, "WMI attach failed: %d\n", status);
+		goto err;
+	}
+
+	status = ath10k_htt_init(ar);
+	if (status) {
+		ath10k_err(ar, "failed to init htt: %d\n", status);
+		goto err_wmi_detach;
+	}
+
+	status = ath10k_htt_tx_alloc(&ar->htt);
+	if (status) {
+		ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
+		goto err_wmi_detach;
+	}
+
+	/* If firmware indicates Full Rx Reorder support it must be used in a
+	 * slightly different manner. Let HTT code know.
+	 */
+	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+						ar->wmi.svc_map));
+
+	status = ath10k_htt_rx_alloc(&ar->htt);
+	if (status) {
+		ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+		goto err_htt_tx_detach;
+	}
+
+	status = ath10k_hif_start(ar);
+	if (status) {
+		ath10k_err(ar, "could not start HIF: %d\n", status);
+		goto err_htt_rx_detach;
+	}
+
+	status = ath10k_htc_wait_target(&ar->htc);
+	if (status) {
+		ath10k_err(ar, "failed to connect to HTC: %d\n", status);
+		goto err_hif_stop;
+	}
+
+	if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+		status = ath10k_htt_connect(&ar->htt);
+		if (status) {
+			ath10k_err(ar, "failed to connect htt (%d)\n", status);
+			goto err_hif_stop;
+		}
+	}
+
+	status = ath10k_wmi_connect(ar);
+	if (status) {
+		ath10k_err(ar, "could not connect wmi: %d\n", status);
+		goto err_hif_stop;
+	}
+
+	status = ath10k_htc_start(&ar->htc);
+	if (status) {
+		ath10k_err(ar, "failed to start htc: %d\n", status);
+		goto err_hif_stop;
+	}
+
+	if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+		status = ath10k_wmi_wait_for_service_ready(ar);
+		if (status) {
+			ath10k_warn(ar, "wmi service ready event not received");
+			goto err_hif_stop;
+		}
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
+		   ar->hw->wiphy->fw_version);
+
+	status = ath10k_wmi_cmd_init(ar);
+	if (status) {
+		ath10k_err(ar, "could not send WMI init command (%d)\n",
+			   status);
+		goto err_hif_stop;
+	}
+
+	status = ath10k_wmi_wait_for_unified_ready(ar);
+	if (status) {
+		ath10k_err(ar, "wmi unified ready event not received\n");
+		goto err_hif_stop;
+	}
+
+	status = ath10k_htt_rx_ring_refill(ar);
+	if (status) {
+		ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+		goto err_hif_stop;
+	}
+
+	ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+
+	INIT_LIST_HEAD(&ar->arvifs);
+
+	/* we don't care about HTT in UTF mode */
+	if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+		status = ath10k_htt_setup(&ar->htt);
+		if (status) {
+			ath10k_err(ar, "failed to setup htt: %d\n", status);
+			goto err_hif_stop;
+		}
+	}
+
+	status = ath10k_debug_start(ar);
+	if (status)
+		goto err_hif_stop;
+
+	return 0;
+
+err_hif_stop:
+	ath10k_hif_stop(ar);
+err_htt_rx_detach:
+	ath10k_htt_rx_free(&ar->htt);
+err_htt_tx_detach:
+	ath10k_htt_tx_free(&ar->htt);
+err_wmi_detach:
+	ath10k_wmi_detach(ar);
+err:
+	return status;
+}
+EXPORT_SYMBOL(ath10k_core_start);
+
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+	int ret;
+	unsigned long time_left;
+
+	reinit_completion(&ar->target_suspend);
+
+	ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+	if (ret) {
+		ath10k_warn(ar, "could not suspend target (%d)\n", ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+	if (!time_left) {
+		ath10k_warn(ar, "suspend timed out - target pause event never came\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+void ath10k_core_stop(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+	ath10k_debug_stop(ar);
+
+	/* try to suspend target */
+	if (ar->state != ATH10K_STATE_RESTARTING &&
+	    ar->state != ATH10K_STATE_UTF)
+		ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
+	ath10k_hif_stop(ar);
+	ath10k_htt_tx_free(&ar->htt);
+	ath10k_htt_rx_free(&ar->htt);
+	ath10k_wmi_detach(ar);
+}
+EXPORT_SYMBOL(ath10k_core_stop);
+
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering */
+static int ath10k_core_probe_fw(struct ath10k *ar)
+{
+	struct bmi_target_info target_info;
+	int ret = 0;
+
+	ret = ath10k_hif_power_up(ar);
+	if (ret) {
+		ath10k_err(ar, "could not start pci hif (%d)\n", ret);
+		return ret;
+	}
+
+	memset(&target_info, 0, sizeof(target_info));
+	ret = ath10k_bmi_get_target_info(ar, &target_info);
+	if (ret) {
+		ath10k_err(ar, "could not get target info (%d)\n", ret);
+		goto err_power_down;
+	}
+
+	ar->target_version = target_info.version;
+	ar->hw->wiphy->hw_version = target_info.version;
+
+	ret = ath10k_init_hw_params(ar);
+	if (ret) {
+		ath10k_err(ar, "could not get hw params (%d)\n", ret);
+		goto err_power_down;
+	}
+
+	ret = ath10k_core_fetch_firmware_files(ar);
+	if (ret) {
+		ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
+		goto err_power_down;
+	}
+
+	ret = ath10k_core_get_board_id_from_otp(ar);
+	if (ret && ret != -EOPNOTSUPP) {
+		ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n",
+			   ret);
+		return ret;
+	}
+
+	ret = ath10k_core_fetch_board_file(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+		goto err_free_firmware_files;
+	}
+
+	ret = ath10k_core_init_firmware_features(ar);
+	if (ret) {
+		ath10k_err(ar, "fatal problem with firmware features: %d\n",
+			   ret);
+		goto err_free_firmware_files;
+	}
+
+	ret = ath10k_swap_code_seg_init(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+			   ret);
+		goto err_free_firmware_files;
+	}
+
+	mutex_lock(&ar->conf_mutex);
+
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	if (ret) {
+		ath10k_err(ar, "could not init core (%d)\n", ret);
+		goto err_unlock;
+	}
+
+	ath10k_print_driver_info(ar);
+	ath10k_core_stop(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	ath10k_hif_power_down(ar);
+	return 0;
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+
+err_free_firmware_files:
+	ath10k_core_free_firmware_files(ar);
+
+err_power_down:
+	ath10k_hif_power_down(ar);
+
+	return ret;
+}
+
+static void ath10k_core_register_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, register_work);
+	int status;
+
+	status = ath10k_core_probe_fw(ar);
+	if (status) {
+		ath10k_err(ar, "could not probe fw (%d)\n", status);
+		goto err;
+	}
+
+	status = ath10k_mac_register(ar);
+	if (status) {
+		ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
+		goto err_release_fw;
+	}
+
+	status = ath10k_debug_register(ar);
+	if (status) {
+		ath10k_err(ar, "unable to initialize debugfs\n");
+		goto err_unregister_mac;
+	}
+
+	status = ath10k_spectral_create(ar);
+	if (status) {
+		ath10k_err(ar, "failed to initialize spectral\n");
+		goto err_debug_destroy;
+	}
+
+	status = ath10k_thermal_register(ar);
+	if (status) {
+		ath10k_err(ar, "could not register thermal device: %d\n",
+			   status);
+		goto err_spectral_destroy;
+	}
+
+	set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
+	return;
+
+err_spectral_destroy:
+	ath10k_spectral_destroy(ar);
+err_debug_destroy:
+	ath10k_debug_destroy(ar);
+err_unregister_mac:
+	ath10k_mac_unregister(ar);
+err_release_fw:
+	ath10k_core_free_firmware_files(ar);
+err:
+	/* TODO: It's probably a good idea to release device from the driver
+	 * but calling device_release_driver() here will cause a deadlock.
+	 */
+	return;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+{
+	ar->chip_id = chip_id;
+	queue_work(ar->workqueue, &ar->register_work);
+
+	return 0;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+	cancel_work_sync(&ar->register_work);
+
+	if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+		return;
+
+	ath10k_thermal_unregister(ar);
+	/* Stop spectral before unregistering from mac80211 to remove the
+	 * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
+	 * would be already be free'd recursively, leading to a double free.
+	 */
+	ath10k_spectral_destroy(ar);
+
+	/* We must unregister from mac80211 before we stop HTC and HIF.
+	 * Otherwise we will fail to submit commands to FW and mac80211 will be
+	 * unhappy about callback failures. */
+	ath10k_mac_unregister(ar);
+
+	ath10k_testmode_destroy(ar);
+
+	ath10k_core_free_firmware_files(ar);
+	ath10k_core_free_board_files(ar);
+
+	ath10k_debug_unregister(ar);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+				  enum ath10k_bus bus,
+				  enum ath10k_hw_rev hw_rev,
+				  const struct ath10k_hif_ops *hif_ops)
+{
+	struct ath10k *ar;
+	int ret;
+
+	ar = ath10k_mac_create(priv_size);
+	if (!ar)
+		return NULL;
+
+	ar->ath_common.priv = ar;
+	ar->ath_common.hw = ar->hw;
+	ar->dev = dev;
+	ar->hw_rev = hw_rev;
+	ar->hif.ops = hif_ops;
+	ar->hif.bus = bus;
+
+	switch (hw_rev) {
+	case ATH10K_HW_QCA988X:
+		ar->regs = &qca988x_regs;
+		ar->hw_values = &qca988x_values;
+		break;
+	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
+		ar->regs = &qca6174_regs;
+		ar->hw_values = &qca6174_values;
+		break;
+	case ATH10K_HW_QCA99X0:
+		ar->regs = &qca99x0_regs;
+		ar->hw_values = &qca99x0_values;
+		break;
+	default:
+		ath10k_err(ar, "unsupported core hardware revision %d\n",
+			   hw_rev);
+		ret = -ENOTSUPP;
+		goto err_free_mac;
+	}
+
+	init_completion(&ar->scan.started);
+	init_completion(&ar->scan.completed);
+	init_completion(&ar->scan.on_channel);
+	init_completion(&ar->target_suspend);
+	init_completion(&ar->wow.wakeup_completed);
+
+	init_completion(&ar->install_key_done);
+	init_completion(&ar->vdev_setup_done);
+	init_completion(&ar->thermal.wmi_sync);
+
+	INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
+
+	ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+	if (!ar->workqueue)
+		goto err_free_mac;
+
+	ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+	if (!ar->workqueue_aux)
+		goto err_free_wq;
+
+	mutex_init(&ar->conf_mutex);
+	spin_lock_init(&ar->data_lock);
+
+	INIT_LIST_HEAD(&ar->peers);
+	init_waitqueue_head(&ar->peer_mapping_wq);
+	init_waitqueue_head(&ar->htt.empty_tx_wq);
+	init_waitqueue_head(&ar->wmi.tx_credits_wq);
+
+	init_completion(&ar->offchan_tx_completed);
+	INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+	skb_queue_head_init(&ar->offchan_tx_queue);
+
+	INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+	skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+	INIT_WORK(&ar->register_work, ath10k_core_register_work);
+	INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
+	ret = ath10k_debug_create(ar);
+	if (ret)
+		goto err_free_aux_wq;
+
+	return ar;
+
+err_free_aux_wq:
+	destroy_workqueue(ar->workqueue_aux);
+err_free_wq:
+	destroy_workqueue(ar->workqueue);
+
+err_free_mac:
+	ath10k_mac_destroy(ar);
+
+	return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+	flush_workqueue(ar->workqueue);
+	destroy_workqueue(ar->workqueue);
+
+	flush_workqueue(ar->workqueue_aux);
+	destroy_workqueue(ar->workqueue_aux);
+
+	ath10k_debug_destroy(ar);
+	ath10k_wmi_free_host_mem(ar);
+	ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644
index 0000000..858d75f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -0,0 +1,870 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+
+#include "htt.h"
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+#include "../dfs_pattern_detector.h"
+#include "spectral.h"
+#include "thermal.h"
+#include "wow.h"
+#include "swap.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f)      ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_NUM_CHANS 39
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+struct ath10k;
+
+enum ath10k_bus {
+	ATH10K_BUS_PCI,
+};
+
+static inline const char *ath10k_bus_str(enum ath10k_bus bus)
+{
+	switch (bus) {
+	case ATH10K_BUS_PCI:
+		return "pci";
+	}
+
+	return "unknown";
+}
+
+struct ath10k_skb_cb {
+	dma_addr_t paddr;
+	u8 eid;
+	u8 vdev_id;
+	enum ath10k_hw_txrx_mode txmode;
+	bool is_protected;
+
+	struct {
+		u8 tid;
+		u16 freq;
+		bool is_offchan;
+		bool nohwcrypt;
+		struct ath10k_htt_txbuf *txbuf;
+		u32 txbuf_paddr;
+	} __packed htt;
+
+	struct {
+		bool dtim_zero;
+		bool deliver_cab;
+	} bcn;
+} __packed;
+
+struct ath10k_skb_rxcb {
+	dma_addr_t paddr;
+	struct hlist_node hlist;
+};
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+		     IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+	return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
+	return (struct ath10k_skb_rxcb *)skb->cb;
+}
+
+#define ATH10K_RXCB_SKB(rxcb) \
+		container_of((void *)rxcb, struct sk_buff, cb)
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+	return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+struct ath10k_bmi {
+	bool done_sent;
+};
+
+struct ath10k_mem_chunk {
+	void *vaddr;
+	dma_addr_t paddr;
+	u32 len;
+	u32 req_id;
+};
+
+struct ath10k_wmi {
+	enum ath10k_fw_wmi_op_version op_version;
+	enum ath10k_htc_ep_id eid;
+	struct completion service_ready;
+	struct completion unified_ready;
+	wait_queue_head_t tx_credits_wq;
+	DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
+	struct wmi_cmd_map *cmd;
+	struct wmi_vdev_param_map *vdev_param;
+	struct wmi_pdev_param_map *pdev_param;
+	const struct wmi_ops *ops;
+
+	u32 num_mem_chunks;
+	u32 rx_decap_mode;
+	struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+};
+
+struct ath10k_fw_stats_peer {
+	struct list_head list;
+
+	u8 peer_macaddr[ETH_ALEN];
+	u32 peer_rssi;
+	u32 peer_tx_rate;
+	u32 peer_rx_rate; /* 10x only */
+};
+
+struct ath10k_fw_stats_vdev {
+	struct list_head list;
+
+	u32 vdev_id;
+	u32 beacon_snr;
+	u32 data_snr;
+	u32 num_tx_frames[4];
+	u32 num_rx_frames;
+	u32 num_tx_frames_retries[4];
+	u32 num_tx_frames_failures[4];
+	u32 num_rts_fail;
+	u32 num_rts_success;
+	u32 num_rx_err;
+	u32 num_rx_discard;
+	u32 num_tx_not_acked;
+	u32 tx_rate_history[10];
+	u32 beacon_rssi_history[10];
+};
+
+struct ath10k_fw_stats_pdev {
+	struct list_head list;
+
+	/* PDEV stats */
+	s32 ch_noise_floor;
+	u32 tx_frame_count;
+	u32 rx_frame_count;
+	u32 rx_clear_count;
+	u32 cycle_count;
+	u32 phy_err_count;
+	u32 chan_tx_power;
+	u32 ack_rx_bad;
+	u32 rts_bad;
+	u32 rts_good;
+	u32 fcs_bad;
+	u32 no_beacons;
+	u32 mib_int_count;
+
+	/* PDEV TX stats */
+	s32 comp_queued;
+	s32 comp_delivered;
+	s32 msdu_enqued;
+	s32 mpdu_enqued;
+	s32 wmm_drop;
+	s32 local_enqued;
+	s32 local_freed;
+	s32 hw_queued;
+	s32 hw_reaped;
+	s32 underrun;
+	u32 hw_paused;
+	s32 tx_abort;
+	s32 mpdus_requed;
+	u32 tx_ko;
+	u32 data_rc;
+	u32 self_triggers;
+	u32 sw_retry_failure;
+	u32 illgl_rate_phy_err;
+	u32 pdev_cont_xretry;
+	u32 pdev_tx_timeout;
+	u32 pdev_resets;
+	u32 phy_underrun;
+	u32 txop_ovf;
+	u32 seq_posted;
+	u32 seq_failed_queueing;
+	u32 seq_completed;
+	u32 seq_restarted;
+	u32 mu_seq_posted;
+	u32 mpdus_sw_flush;
+	u32 mpdus_hw_filter;
+	u32 mpdus_truncated;
+	u32 mpdus_ack_failed;
+	u32 mpdus_expired;
+
+	/* PDEV RX stats */
+	s32 mid_ppdu_route_change;
+	s32 status_rcvd;
+	s32 r0_frags;
+	s32 r1_frags;
+	s32 r2_frags;
+	s32 r3_frags;
+	s32 htt_msdus;
+	s32 htt_mpdus;
+	s32 loc_msdus;
+	s32 loc_mpdus;
+	s32 oversize_amsdu;
+	s32 phy_errs;
+	s32 phy_err_drop;
+	s32 mpdu_errs;
+	s32 rx_ovfl_errs;
+};
+
+struct ath10k_fw_stats {
+	struct list_head pdevs;
+	struct list_head vdevs;
+	struct list_head peers;
+};
+
+#define ATH10K_TPC_TABLE_TYPE_FLAG	1
+#define ATH10K_TPC_PREAM_TABLE_END	0xFFFF
+
+struct ath10k_tpc_table {
+	u32 pream_idx[WMI_TPC_RATE_MAX];
+	u8 rate_code[WMI_TPC_RATE_MAX];
+	char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats {
+	u32 reg_domain;
+	u32 chan_freq;
+	u32 phy_mode;
+	u32 twice_antenna_reduction;
+	u32 twice_max_rd_power;
+	s32 twice_antenna_gain;
+	u32 power_limit;
+	u32 num_tx_chain;
+	u32 ctl;
+	u32 rate_max;
+	u8 flag[WMI_TPC_FLAG];
+	struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG];
+};
+
+struct ath10k_dfs_stats {
+	u32 phy_errors;
+	u32 pulses_total;
+	u32 pulses_detected;
+	u32 pulses_discarded;
+	u32 radar_detected;
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+	struct list_head list;
+	int vdev_id;
+	u8 addr[ETH_ALEN];
+	DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+
+	/* protected by ar->data_lock */
+	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+};
+
+struct ath10k_sta {
+	struct ath10k_vif *arvif;
+
+	/* the following are protected by ar->data_lock */
+	u32 changed; /* IEEE80211_RC_* */
+	u32 bw;
+	u32 nss;
+	u32 smps;
+
+	struct work_struct update_wk;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/* protected by conf_mutex */
+	bool aggr_mode;
+#endif
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+
+enum ath10k_beacon_state {
+	ATH10K_BEACON_SCHEDULED = 0,
+	ATH10K_BEACON_SENDING,
+	ATH10K_BEACON_SENT,
+};
+
+struct ath10k_vif {
+	struct list_head list;
+
+	u32 vdev_id;
+	enum wmi_vdev_type vdev_type;
+	enum wmi_vdev_subtype vdev_subtype;
+	u32 beacon_interval;
+	u32 dtim_period;
+	struct sk_buff *beacon;
+	/* protected by data_lock */
+	enum ath10k_beacon_state beacon_state;
+	void *beacon_buf;
+	dma_addr_t beacon_paddr;
+	unsigned long tx_paused; /* arbitrary values defined by target */
+
+	struct ath10k *ar;
+	struct ieee80211_vif *vif;
+
+	bool is_started;
+	bool is_up;
+	bool spectral_enabled;
+	bool ps;
+	u32 aid;
+	u8 bssid[ETH_ALEN];
+
+	struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+	s8 def_wep_key_idx;
+
+	u16 tx_seq_no;
+
+	union {
+		struct {
+			u32 uapsd;
+		} sta;
+		struct {
+			/* 512 stations */
+			u8 tim_bitmap[64];
+			u8 tim_len;
+			u32 ssid_len;
+			u8 ssid[IEEE80211_MAX_SSID_LEN];
+			bool hidden_ssid;
+			/* P2P_IE with NoA attribute for P2P_GO case */
+			u32 noa_len;
+			u8 *noa_data;
+		} ap;
+	} u;
+
+	bool use_cts_prot;
+	bool nohwcrypt;
+	int num_legacy_stations;
+	int txpower;
+	struct wmi_wmm_params_all_arg wmm_params;
+	struct work_struct ap_csa_work;
+	struct delayed_work connection_loss_work;
+	struct cfg80211_bitrate_mask bitrate_mask;
+};
+
+struct ath10k_vif_iter {
+	u32 vdev_id;
+	struct ath10k_vif *arvif;
+};
+
+/* used for crash-dump storage, protected by data-lock */
+struct ath10k_fw_crash_data {
+	bool crashed_since_read;
+
+	uuid_le uuid;
+	struct timespec timestamp;
+	__le32 registers[REG_DUMP_COUNT_QCA988X];
+};
+
+struct ath10k_debug {
+	struct dentry *debugfs_phy;
+
+	struct ath10k_fw_stats fw_stats;
+	struct completion fw_stats_complete;
+	bool fw_stats_done;
+
+	unsigned long htt_stats_mask;
+	struct delayed_work htt_stats_dwork;
+	struct ath10k_dfs_stats dfs_stats;
+	struct ath_dfs_pool_stats dfs_pool_stats;
+
+	/* used for tpc-dump storage, protected by data-lock */
+	struct ath10k_tpc_stats *tpc_stats;
+
+	struct completion tpc_complete;
+
+	/* protected by conf_mutex */
+	u32 fw_dbglog_mask;
+	u32 fw_dbglog_level;
+	u32 pktlog_filter;
+	u32 reg_addr;
+	u32 nf_cal_period;
+
+	struct ath10k_fw_crash_data *fw_crash_data;
+};
+
+enum ath10k_state {
+	ATH10K_STATE_OFF = 0,
+	ATH10K_STATE_ON,
+
+	/* When doing firmware recovery the device is first powered down.
+	 * mac80211 is supposed to call in to start() hook later on. It is
+	 * however possible that driver unloading and firmware crash overlap.
+	 * mac80211 can wait on conf_mutex in stop() while the device is
+	 * stopped in ath10k_core_restart() work holding conf_mutex. The state
+	 * RESTARTED means that the device is up and mac80211 has started hw
+	 * reconfiguration. Once mac80211 is done with the reconfiguration we
+	 * set the state to STATE_ON in reconfig_complete(). */
+	ATH10K_STATE_RESTARTING,
+	ATH10K_STATE_RESTARTED,
+
+	/* The device has crashed while restarting hw. This state is like ON
+	 * but commands are blocked in HTC and -ECOMM response is given. This
+	 * prevents completion timeouts and makes the driver more responsive to
+	 * userspace commands. This is also prevents recursive recovery. */
+	ATH10K_STATE_WEDGED,
+
+	/* factory tests */
+	ATH10K_STATE_UTF,
+};
+
+enum ath10k_firmware_mode {
+	/* the default mode, standard 802.11 functionality */
+	ATH10K_FIRMWARE_MODE_NORMAL,
+
+	/* factory tests etc */
+	ATH10K_FIRMWARE_MODE_UTF,
+};
+
+enum ath10k_fw_features {
+	/* wmi_mgmt_rx_hdr contains extra RSSI information */
+	ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+	/* Firmware from 10X branch. Deprecated, don't use in new code. */
+	ATH10K_FW_FEATURE_WMI_10X = 1,
+
+	/* firmware support tx frame management over WMI, otherwise it's HTT */
+	ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+	/* Firmware does not support P2P */
+	ATH10K_FW_FEATURE_NO_P2P = 3,
+
+	/* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
+	 * bit is required to be set as well. Deprecated, don't use in new
+	 * code.
+	 */
+	ATH10K_FW_FEATURE_WMI_10_2 = 4,
+
+	/* Some firmware revisions lack proper multi-interface client powersave
+	 * implementation. Enabling PS could result in connection drops,
+	 * traffic stalls, etc.
+	 */
+	ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
+	/* Some firmware revisions have an incomplete WoWLAN implementation
+	 * despite WMI service bit being advertised. This feature flag is used
+	 * to distinguish whether WoWLAN is really supported or not.
+	 */
+	ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+	/* Don't trust error code from otp.bin */
+	ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
+
+	/* Some firmware revisions pad 4th hw address to 4 byte boundary making
+	 * it 8 bytes long in Native Wifi Rx decap.
+	 */
+	ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
+
+	/* Firmware supports bypassing PLL setting on init. */
+	ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
+
+	/* Raw mode support. If supported, FW supports receiving and trasmitting
+	 * frames in raw mode.
+	 */
+	ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
+	/* Firmware Supports Adaptive CCA*/
+	ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
+
+	/* keep last */
+	ATH10K_FW_FEATURE_COUNT,
+};
+
+enum ath10k_dev_flags {
+	/* Indicates that ath10k device is during CAC phase of DFS */
+	ATH10K_CAC_RUNNING,
+	ATH10K_FLAG_CORE_REGISTERED,
+
+	/* Device has crashed and needs to restart. This indicates any pending
+	 * waiters should immediately cancel instead of waiting for a time out.
+	 */
+	ATH10K_FLAG_CRASH_FLUSH,
+
+	/* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+	 * Raw mode supports both hardware and software crypto. Native WiFi only
+	 * supports hardware crypto.
+	 */
+	ATH10K_FLAG_RAW_MODE,
+
+	/* Disable HW crypto engine */
+	ATH10K_FLAG_HW_CRYPTO_DISABLED,
+};
+
+enum ath10k_cal_mode {
+	ATH10K_CAL_MODE_FILE,
+	ATH10K_CAL_MODE_OTP,
+	ATH10K_CAL_MODE_DT,
+};
+
+enum ath10k_crypt_mode {
+	/* Only use hardware crypto engine */
+	ATH10K_CRYPT_MODE_HW,
+	/* Only use software crypto engine */
+	ATH10K_CRYPT_MODE_SW,
+};
+
+static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
+{
+	switch (mode) {
+	case ATH10K_CAL_MODE_FILE:
+		return "file";
+	case ATH10K_CAL_MODE_OTP:
+		return "otp";
+	case ATH10K_CAL_MODE_DT:
+		return "dt";
+	}
+
+	return "unknown";
+}
+
+enum ath10k_scan_state {
+	ATH10K_SCAN_IDLE,
+	ATH10K_SCAN_STARTING,
+	ATH10K_SCAN_RUNNING,
+	ATH10K_SCAN_ABORTING,
+};
+
+static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
+{
+	switch (state) {
+	case ATH10K_SCAN_IDLE:
+		return "idle";
+	case ATH10K_SCAN_STARTING:
+		return "starting";
+	case ATH10K_SCAN_RUNNING:
+		return "running";
+	case ATH10K_SCAN_ABORTING:
+		return "aborting";
+	}
+
+	return "unknown";
+}
+
+enum ath10k_tx_pause_reason {
+	ATH10K_TX_PAUSE_Q_FULL,
+	ATH10K_TX_PAUSE_MAX,
+};
+
+struct ath10k {
+	struct ath_common ath_common;
+	struct ieee80211_hw *hw;
+	struct device *dev;
+	u8 mac_addr[ETH_ALEN];
+
+	enum ath10k_hw_rev hw_rev;
+	u16 dev_id;
+	u32 chip_id;
+	u32 target_version;
+	u8 fw_version_major;
+	u32 fw_version_minor;
+	u16 fw_version_release;
+	u16 fw_version_build;
+	u32 fw_stats_req_mask;
+	u32 phy_capability;
+	u32 hw_min_tx_power;
+	u32 hw_max_tx_power;
+	u32 ht_cap_info;
+	u32 vht_cap_info;
+	u32 num_rf_chains;
+	u32 max_spatial_stream;
+	/* protected by conf_mutex */
+	bool ani_enabled;
+
+	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+	bool p2p;
+
+	struct {
+		enum ath10k_bus bus;
+		const struct ath10k_hif_ops *ops;
+	} hif;
+
+	struct completion target_suspend;
+
+	const struct ath10k_hw_regs *regs;
+	const struct ath10k_hw_values *hw_values;
+	struct ath10k_bmi bmi;
+	struct ath10k_wmi wmi;
+	struct ath10k_htc htc;
+	struct ath10k_htt htt;
+
+	struct ath10k_hw_params {
+		u32 id;
+		u16 dev_id;
+		const char *name;
+		u32 patch_load_addr;
+		int uart_pin;
+		u32 otp_exe_param;
+
+		/* This is true if given HW chip has a quirky Cycle Counter
+		 * wraparound which resets to 0x7fffffff instead of 0. All
+		 * other CC related counters (e.g. Rx Clear Count) are divided
+		 * by 2 so they never wraparound themselves.
+		 */
+		bool has_shifted_cc_wraparound;
+
+		/* Some of chip expects fragment descriptor to be continuous
+		 * memory for any TX operation. Set continuous_frag_desc flag
+		 * for the hardware which have such requirement.
+		 */
+		bool continuous_frag_desc;
+
+		u32 channel_counters_freq_hz;
+
+		/* Mgmt tx descriptors threshold for limiting probe response
+		 * frames.
+		 */
+		u32 max_probe_resp_desc_thres;
+
+		struct ath10k_hw_params_fw {
+			const char *dir;
+			const char *fw;
+			const char *otp;
+			const char *board;
+			size_t board_size;
+			size_t board_ext_size;
+		} fw;
+	} hw_params;
+
+	const struct firmware *board;
+	const void *board_data;
+	size_t board_len;
+
+	const struct firmware *otp;
+	const void *otp_data;
+	size_t otp_len;
+
+	const struct firmware *firmware;
+	const void *firmware_data;
+	size_t firmware_len;
+
+	const struct firmware *cal_file;
+
+	struct {
+		const void *firmware_codeswap_data;
+		size_t firmware_codeswap_len;
+		struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+	} swap;
+
+	struct {
+		u32 vendor;
+		u32 device;
+		u32 subsystem_vendor;
+		u32 subsystem_device;
+
+		bool bmi_ids_valid;
+		u8 bmi_board_id;
+		u8 bmi_chip_id;
+	} id;
+
+	int fw_api;
+	int bd_api;
+	enum ath10k_cal_mode cal_mode;
+
+	struct {
+		struct completion started;
+		struct completion completed;
+		struct completion on_channel;
+		struct delayed_work timeout;
+		enum ath10k_scan_state state;
+		bool is_roc;
+		int vdev_id;
+		int roc_freq;
+		bool roc_notify;
+	} scan;
+
+	struct {
+		struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+	} mac;
+
+	/* should never be NULL; needed for regular htt rx */
+	struct ieee80211_channel *rx_channel;
+
+	/* valid during scan; needed for mgmt rx during scan */
+	struct ieee80211_channel *scan_channel;
+
+	/* current operating channel definition */
+	struct cfg80211_chan_def chandef;
+
+	unsigned long long free_vdev_map;
+	struct ath10k_vif *monitor_arvif;
+	bool monitor;
+	int monitor_vdev_id;
+	bool monitor_started;
+	unsigned int filter_flags;
+	unsigned long dev_flags;
+	bool dfs_block_radar_events;
+
+	/* protected by conf_mutex */
+	bool radar_enabled;
+	int num_started_vdevs;
+
+	/* Protected by conf-mutex */
+	u8 cfg_tx_chainmask;
+	u8 cfg_rx_chainmask;
+
+	struct completion install_key_done;
+
+	struct completion vdev_setup_done;
+
+	struct workqueue_struct *workqueue;
+	/* Auxiliary workqueue */
+	struct workqueue_struct *workqueue_aux;
+
+	/* prevents concurrent FW reconfiguration */
+	struct mutex conf_mutex;
+
+	/* protects shared structure data */
+	spinlock_t data_lock;
+
+	struct list_head arvifs;
+	struct list_head peers;
+	wait_queue_head_t peer_mapping_wq;
+
+	/* protected by conf_mutex */
+	int num_peers;
+	int num_stations;
+
+	int max_num_peers;
+	int max_num_stations;
+	int max_num_vdevs;
+	int max_num_tdls_vdevs;
+	int num_active_peers;
+	int num_tids;
+
+	struct work_struct svc_rdy_work;
+	struct sk_buff *svc_rdy_skb;
+
+	struct work_struct offchan_tx_work;
+	struct sk_buff_head offchan_tx_queue;
+	struct completion offchan_tx_completed;
+	struct sk_buff *offchan_tx_skb;
+
+	struct work_struct wmi_mgmt_tx_work;
+	struct sk_buff_head wmi_mgmt_tx_queue;
+
+	enum ath10k_state state;
+
+	struct work_struct register_work;
+	struct work_struct restart_work;
+
+	/* cycle count is reported twice for each visited channel during scan.
+	 * access protected by data_lock */
+	u32 survey_last_rx_clear_count;
+	u32 survey_last_cycle_count;
+	struct survey_info survey[ATH10K_NUM_CHANS];
+
+	/* Channel info events are expected to come in pairs without and with
+	 * COMPLETE flag set respectively for each channel visit during scan.
+	 *
+	 * However there are deviations from this rule. This flag is used to
+	 * avoid reporting garbage data.
+	 */
+	bool ch_info_can_report_survey;
+
+	struct dfs_pattern_detector *dfs_detector;
+
+	unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+	struct ath10k_debug debug;
+#endif
+
+	struct {
+		/* relay(fs) channel for spectral scan */
+		struct rchan *rfs_chan_spec_scan;
+
+		/* spectral_mode and spec_config are protected by conf_mutex */
+		enum ath10k_spectral_mode mode;
+		struct ath10k_spec_scan config;
+	} spectral;
+
+	struct {
+		/* protected by conf_mutex */
+		const struct firmware *utf;
+		char utf_version[32];
+		const void *utf_firmware_data;
+		size_t utf_firmware_len;
+		DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
+		enum ath10k_fw_wmi_op_version orig_wmi_op_version;
+		enum ath10k_fw_wmi_op_version op_version;
+		/* protected by data_lock */
+		bool utf_monitor;
+	} testmode;
+
+	struct {
+		/* protected by data_lock */
+		u32 fw_crash_counter;
+		u32 fw_warm_reset_counter;
+		u32 fw_cold_reset_counter;
+	} stats;
+
+	struct ath10k_thermal thermal;
+	struct ath10k_wow wow;
+
+	/* must be last */
+	u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+				  enum ath10k_bus bus,
+				  enum ath10k_hw_rev hw_rev,
+				  const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+				     char *buf,
+				     size_t max_len);
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
+void ath10k_core_stop(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
+void ath10k_core_unregister(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644
index 0000000..1a88a24
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -0,0 +1,2255 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <linux/utsname.h>
+
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include "wmi-ops.h"
+
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+	ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+
+	ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+	/* see ath10k_fw_crash_dump_type above */
+	__le32 type;
+
+	/* in bytes */
+	__le32 tlv_len;
+
+	/* pad to 32-bit boundaries as needed */
+	u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+	/* dump file information */
+
+	/* "ATH10K-FW-DUMP" */
+	char df_magic[16];
+
+	__le32 len;
+
+	/* file dump version */
+	__le32 version;
+
+	/* some info we can get from ath10k struct that might help */
+
+	u8 uuid[16];
+
+	__le32 chip_id;
+
+	/* 0 for now, in place for later hardware */
+	__le32 bus_type;
+
+	__le32 target_version;
+	__le32 fw_version_major;
+	__le32 fw_version_minor;
+	__le32 fw_version_release;
+	__le32 fw_version_build;
+	__le32 phy_capability;
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+	__le32 ht_cap_info;
+	__le32 vht_cap_info;
+	__le32 num_rf_chains;
+
+	/* firmware version string */
+	char fw_ver[ETHTOOL_FWVERS_LEN];
+
+	/* Kernel related information */
+
+	/* time-of-day stamp */
+	__le64 tv_sec;
+
+	/* time-of-day stamp, nano-seconds */
+	__le64 tv_nsec;
+
+	/* LINUX_VERSION_CODE */
+	__le32 kernel_ver_code;
+
+	/* VERMAGIC_STRING */
+	char kernel_ver[64];
+
+	/* room for growth w/out changing binary format */
+	u8 unused[128];
+
+	/* struct ath10k_tlv_dump_data + more */
+	u8 data[0];
+} __packed;
+
+void ath10k_info(struct ath10k *ar, const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+	dev_info(ar->dev, "%pV", &vaf);
+	trace_ath10k_log_info(ar, &vaf);
+	va_end(args);
+}
+EXPORT_SYMBOL(ath10k_info);
+
+void ath10k_print_driver_info(struct ath10k *ar)
+{
+	char fw_features[128] = {};
+	char boardinfo[100];
+
+	ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+	if (ar->id.bmi_ids_valid)
+		scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d",
+			  ar->id.bmi_chip_id, ar->id.bmi_board_id);
+	else
+		scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x",
+			  ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+	ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
+		    ar->hw_params.name,
+		    ar->target_version,
+		    ar->chip_id,
+		    boardinfo,
+		    ar->hw->wiphy->fw_version,
+		    ar->fw_api,
+		    ar->bd_api,
+		    ar->htt.target_version_major,
+		    ar->htt.target_version_minor,
+		    ar->wmi.op_version,
+		    ar->htt.op_version,
+		    ath10k_cal_mode_str(ar->cal_mode),
+		    ar->max_num_stations,
+		    test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+		    !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
+		    fw_features);
+	ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
+		    config_enabled(CONFIG_ATH10K_DEBUG),
+		    config_enabled(CONFIG_ATH10K_DEBUGFS),
+		    config_enabled(CONFIG_ATH10K_TRACING),
+		    config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
+		    config_enabled(CONFIG_NL80211_TESTMODE));
+}
+EXPORT_SYMBOL(ath10k_print_driver_info);
+
+void ath10k_err(struct ath10k *ar, const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+	dev_err(ar->dev, "%pV", &vaf);
+	trace_ath10k_log_err(ar, &vaf);
+	va_end(args);
+}
+EXPORT_SYMBOL(ath10k_err);
+
+void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+	dev_warn_ratelimited(ar->dev, "%pV", &vaf);
+	trace_ath10k_log_warn(ar, &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char *buf;
+	unsigned int len = 0, buf_len = 4096;
+	const char *name;
+	ssize_t ret_cnt;
+	bool enabled;
+	int i;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (len > buf_len)
+		len = buf_len;
+
+	spin_lock_bh(&ar->data_lock);
+	for (i = 0; i < WMI_SERVICE_MAX; i++) {
+		enabled = test_bit(i, ar->wmi.svc_map);
+		name = wmi_service_name(i);
+
+		if (!name) {
+			if (enabled)
+				len += scnprintf(buf + len, buf_len - len,
+						 "%-40s %s (bit %d)\n",
+						 "unknown", "enabled", i);
+
+			continue;
+		}
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "%-40s %s\n",
+				 name, enabled ? "enabled" : "-");
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	kfree(buf);
+	return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+	.read = ath10k_read_wmi_services,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
+{
+	struct ath10k_fw_stats_pdev *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+{
+	struct ath10k_fw_stats_vdev *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+{
+	struct ath10k_fw_stats_peer *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
+{
+	spin_lock_bh(&ar->data_lock);
+	ar->debug.fw_stats_done = false;
+	ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+	ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+	ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_fw_stats stats = {};
+	bool is_start, is_started, is_end;
+	size_t num_peers;
+	size_t num_vdevs;
+	int ret;
+
+	INIT_LIST_HEAD(&stats.pdevs);
+	INIT_LIST_HEAD(&stats.vdevs);
+	INIT_LIST_HEAD(&stats.peers);
+
+	spin_lock_bh(&ar->data_lock);
+	ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
+	if (ret) {
+		ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
+		goto free;
+	}
+
+	/* Stat data may exceed htc-wmi buffer limit. In such case firmware
+	 * splits the stats data and delivers it in a ping-pong fashion of
+	 * request cmd-update event.
+	 *
+	 * However there is no explicit end-of-data. Instead start-of-data is
+	 * used as an implicit one. This works as follows:
+	 *  a) discard stat update events until one with pdev stats is
+	 *     delivered - this skips session started at end of (b)
+	 *  b) consume stat update events until another one with pdev stats is
+	 *     delivered which is treated as end-of-data and is itself discarded
+	 */
+
+	if (ar->debug.fw_stats_done) {
+		ath10k_warn(ar, "received unsolicited stats update event\n");
+		goto free;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
+	is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
+		    !list_empty(&stats.pdevs));
+	is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
+		  !list_empty(&stats.pdevs));
+
+	if (is_start)
+		list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+
+	if (is_end)
+		ar->debug.fw_stats_done = true;
+
+	is_started = !list_empty(&ar->debug.fw_stats.pdevs);
+
+	if (is_started && !is_end) {
+		if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
+			/* Although this is unlikely impose a sane limit to
+			 * prevent firmware from DoS-ing the host.
+			 */
+			ath10k_warn(ar, "dropping fw peer stats\n");
+			goto free;
+		}
+
+		if (num_vdevs >= BITS_PER_LONG) {
+			ath10k_warn(ar, "dropping fw vdev stats\n");
+			goto free;
+		}
+
+		list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+		list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+	}
+
+	complete(&ar->debug.fw_stats_complete);
+
+free:
+	/* In some cases lists have been spliced and cleared. Free up
+	 * resources if that is not the case.
+	 */
+	ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
+	ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
+	ath10k_debug_fw_stats_peers_free(&stats.peers);
+
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+	unsigned long timeout, time_left;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	timeout = jiffies + msecs_to_jiffies(1 * HZ);
+
+	ath10k_debug_fw_stats_reset(ar);
+
+	for (;;) {
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+
+		reinit_completion(&ar->debug.fw_stats_complete);
+
+		ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
+		if (ret) {
+			ath10k_warn(ar, "could not request stats (%d)\n", ret);
+			return ret;
+		}
+
+		time_left =
+		wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+					    1 * HZ);
+		if (!time_left)
+			return -ETIMEDOUT;
+
+		spin_lock_bh(&ar->data_lock);
+		if (ar->debug.fw_stats_done) {
+			spin_unlock_bh(&ar->data_lock);
+			break;
+		}
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	return 0;
+}
+
+static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
+	void *buf = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = ath10k_debug_fw_stats_request(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
+		goto err_free;
+	}
+
+	ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+	if (ret) {
+		ath10k_warn(ar, "failed to fill fw stats: %d\n", ret);
+		goto err_free;
+	}
+
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_free:
+	vfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	unsigned int len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_fw_stats = {
+	.open = ath10k_fw_stats_open,
+	.release = ath10k_fw_stats_release,
+	.read = ath10k_fw_stats_read,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int ret, len, buf_len;
+	char *buf;
+
+	buf_len = 500;
+	buf = kmalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_bh(&ar->data_lock);
+
+	len = 0;
+	len += scnprintf(buf + len, buf_len - len,
+			 "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
+	len += scnprintf(buf + len, buf_len - len,
+			 "fw_warm_reset_counter\t\t%d\n",
+			 ar->stats.fw_warm_reset_counter);
+	len += scnprintf(buf + len, buf_len - len,
+			 "fw_cold_reset_counter\t\t%d\n",
+			 ar->stats.fw_cold_reset_counter);
+
+	spin_unlock_bh(&ar->data_lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
+	.open = simple_open,
+	.read = ath10k_debug_fw_reset_stats_read,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/* This is a clean assert crash in firmware. */
+static int ath10k_debug_fw_assert(struct ath10k *ar)
+{
+	struct wmi_vdev_install_key_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+	memset(cmd, 0, sizeof(*cmd));
+
+	/* big enough number so that firmware asserts */
+	cmd->vdev_id = __cpu_to_le32(0x7ffe);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	const char buf[] =
+		"To simulate firmware crash write one of the keywords to this file:\n"
+		"`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
+		"`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
+		"`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
+		"`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = 0;
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	/* drop the possible '\n' from the end */
+	if (buf[count - 1] == '\n') {
+		buf[count - 1] = 0;
+		count--;
+	}
+
+	if (!strcmp(buf, "soft")) {
+		ath10k_info(ar, "simulating soft firmware crash\n");
+		ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+	} else if (!strcmp(buf, "hard")) {
+		ath10k_info(ar, "simulating hard firmware crash\n");
+		/* 0x7fff is vdev id, and it is always out of range for all
+		 * firmware variants in order to force a firmware crash.
+		 */
+		ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
+						ar->wmi.vdev_param->rts_threshold,
+						0);
+	} else if (!strcmp(buf, "assert")) {
+		ath10k_info(ar, "simulating firmware assert crash\n");
+		ret = ath10k_debug_fw_assert(ar);
+	} else if (!strcmp(buf, "hw-restart")) {
+		ath10k_info(ar, "user requested hw restart\n");
+		queue_work(ar->workqueue, &ar->restart_work);
+		ret = 0;
+	} else {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (ret) {
+		ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
+		goto exit;
+	}
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+	.read = ath10k_read_simulate_fw_crash,
+	.write = ath10k_write_simulate_fw_crash,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned int len;
+	char buf[50];
+
+	len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+	.read = ath10k_read_chip_id,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+struct ath10k_fw_crash_data *
+ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	crash_data->crashed_since_read = true;
+	uuid_le_gen(&crash_data->uuid);
+	getnstimeofday(&crash_data->timestamp);
+
+	return crash_data;
+}
+EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
+
+static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
+	struct ath10k_dump_file_data *dump_data;
+	struct ath10k_tlv_dump_data *dump_tlv;
+	int hdr_len = sizeof(*dump_data);
+	unsigned int len, sofar = 0;
+	unsigned char *buf;
+
+	len = hdr_len;
+	len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+	sofar += hdr_len;
+
+	/* This is going to get big when we start dumping FW RAM and such,
+	 * so go ahead and use vmalloc.
+	 */
+	buf = vzalloc(len);
+	if (!buf)
+		return NULL;
+
+	spin_lock_bh(&ar->data_lock);
+
+	if (!crash_data->crashed_since_read) {
+		spin_unlock_bh(&ar->data_lock);
+		vfree(buf);
+		return NULL;
+	}
+
+	dump_data = (struct ath10k_dump_file_data *)(buf);
+	strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+		sizeof(dump_data->df_magic));
+	dump_data->len = cpu_to_le32(len);
+
+	dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+	memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
+	dump_data->chip_id = cpu_to_le32(ar->chip_id);
+	dump_data->bus_type = cpu_to_le32(0);
+	dump_data->target_version = cpu_to_le32(ar->target_version);
+	dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+	dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+	dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+	dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+	dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+	dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+	dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+	dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+	dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+	dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+	strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+		sizeof(dump_data->fw_ver));
+
+	dump_data->kernel_ver_code = 0;
+	strlcpy(dump_data->kernel_ver, init_utsname()->release,
+		sizeof(dump_data->kernel_ver));
+
+	dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+	dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+	/* Gather crash-dump */
+	dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+	dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+	dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+	memcpy(dump_tlv->tlv_data, &crash_data->registers,
+	       sizeof(crash_data->registers));
+	sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+	ar->debug.fw_crash_data->crashed_since_read = false;
+
+	spin_unlock_bh(&ar->data_lock);
+
+	return dump_data;
+}
+
+static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
+	struct ath10k_dump_file_data *dump;
+
+	dump = ath10k_build_dump_file(ar);
+	if (!dump)
+		return -ENODATA;
+
+	file->private_data = dump;
+
+	return 0;
+}
+
+static ssize_t ath10k_fw_crash_dump_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath10k_dump_file_data *dump_file = file->private_data;
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+				       dump_file,
+				       le32_to_cpu(dump_file->len));
+}
+
+static int ath10k_fw_crash_dump_release(struct inode *inode,
+					struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static const struct file_operations fops_fw_crash_dump = {
+	.open = ath10k_fw_crash_dump_open,
+	.read = ath10k_fw_crash_dump_read,
+	.release = ath10k_fw_crash_dump_release,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_addr_read(struct file *file,
+				    char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u8 buf[32];
+	unsigned int len = 0;
+	u32 reg_addr;
+
+	mutex_lock(&ar->conf_mutex);
+	reg_addr = ar->debug.reg_addr;
+	mutex_unlock(&ar->conf_mutex);
+
+	len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_reg_addr_write(struct file *file,
+				     const char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u32 reg_addr;
+	int ret;
+
+	ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
+	if (ret)
+		return ret;
+
+	if (!IS_ALIGNED(reg_addr, 4))
+		return -EFAULT;
+
+	mutex_lock(&ar->conf_mutex);
+	ar->debug.reg_addr = reg_addr;
+	mutex_unlock(&ar->conf_mutex);
+
+	return count;
+}
+
+static const struct file_operations fops_reg_addr = {
+	.read = ath10k_reg_addr_read,
+	.write = ath10k_reg_addr_write,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_value_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u8 buf[48];
+	unsigned int len;
+	u32 reg_addr, reg_val;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	reg_addr = ar->debug.reg_addr;
+
+	reg_val = ath10k_hif_read32(ar, reg_addr);
+	len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_reg_value_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u32 reg_addr, reg_val;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	reg_addr = ar->debug.reg_addr;
+
+	ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
+	if (ret)
+		goto exit;
+
+	ath10k_hif_write32(ar, reg_addr, reg_val);
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static const struct file_operations fops_reg_value = {
+	.read = ath10k_reg_value_read,
+	.write = ath10k_reg_value_write,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_mem_value_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u8 *buf;
+	int ret;
+
+	if (*ppos < 0)
+		return -EINVAL;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	buf = vmalloc(count);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
+	if (ret) {
+		ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+			    (u32)(*ppos), ret);
+		goto exit;
+	}
+
+	ret = copy_to_user(user_buf, buf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	count -= ret;
+	*ppos += count;
+	ret = count;
+
+exit:
+	vfree(buf);
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_mem_value_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u8 *buf;
+	int ret;
+
+	if (*ppos < 0)
+		return -EINVAL;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	buf = vmalloc(count);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	ret = copy_from_user(buf, user_buf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
+	if (ret) {
+		ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+			    (u32)(*ppos), ret);
+		goto exit;
+	}
+
+	*ppos += count;
+	ret = count;
+
+exit:
+	vfree(buf);
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static const struct file_operations fops_mem_value = {
+	.read = ath10k_mem_value_read,
+	.write = ath10k_mem_value_write,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+	u64 cookie;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (ar->debug.htt_stats_mask == 0)
+		/* htt stats are disabled */
+		return 0;
+
+	if (ar->state != ATH10K_STATE_ON)
+		return 0;
+
+	cookie = get_jiffies_64();
+
+	ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+				       cookie);
+	if (ret) {
+		ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
+		return ret;
+	}
+
+	queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+			   msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+	return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k,
+					 debug.htt_stats_dwork.work);
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_debug_htt_stats_req(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	unsigned int len;
+
+	len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned long mask;
+	int ret;
+
+	ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+	if (ret)
+		return ret;
+
+	/* max 8 bit masks (for now) */
+	if (mask > 0xff)
+		return -E2BIG;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ar->debug.htt_stats_mask = mask;
+
+	ret = ath10k_debug_htt_stats_req(ar);
+	if (ret)
+		goto out;
+
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+	.read = ath10k_read_htt_stats_mask,
+	.write = ath10k_write_htt_stats_mask,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
+					       char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[64];
+	u8 amsdu = 3, ampdu = 64;
+	unsigned int len;
+
+	mutex_lock(&ar->conf_mutex);
+
+	amsdu = ar->htt.max_num_amsdu;
+	ampdu = ar->htt.max_num_ampdu;
+	mutex_unlock(&ar->conf_mutex);
+
+	len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int res;
+	char buf[64];
+	unsigned int amsdu, ampdu;
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = 0;
+
+	res = sscanf(buf, "%u %u", &amsdu, &ampdu);
+
+	if (res != 2)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
+	if (res)
+		goto out;
+
+	res = count;
+	ar->htt.max_num_amsdu = amsdu;
+	ar->htt.max_num_ampdu = ampdu;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return res;
+}
+
+static const struct file_operations fops_htt_max_amsdu_ampdu = {
+	.read = ath10k_read_htt_max_amsdu_ampdu,
+	.write = ath10k_write_htt_max_amsdu_ampdu,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned int len;
+	char buf[64];
+
+	len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+			ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int ret;
+	char buf[64];
+	unsigned int log_level, mask;
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = 0;
+
+	ret = sscanf(buf, "%x %u", &mask, &log_level);
+
+	if (!ret)
+		return -EINVAL;
+
+	if (ret == 1)
+		/* default if user did not specify */
+		log_level = ATH10K_DBGLOG_LEVEL_WARN;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ar->debug.fw_dbglog_mask = mask;
+	ar->debug.fw_dbglog_level = log_level;
+
+	if (ar->state == ATH10K_STATE_ON) {
+		ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+					    ar->debug.fw_dbglog_level);
+		if (ret) {
+			ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
+				    ret);
+			goto exit;
+		}
+	}
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+/* TODO:  Would be nice to always support ethtool stats, would need to
+ * move the stats storage out of ath10k_debug, or always have ath10k_debug
+ * struct available..
+ */
+
+/* This generally cooresponds to the debugfs fw_stats file */
+static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"tx_pkts_nic",
+	"tx_bytes_nic",
+	"rx_pkts_nic",
+	"rx_bytes_nic",
+	"d_noise_floor",
+	"d_cycle_count",
+	"d_phy_error",
+	"d_rts_bad",
+	"d_rts_good",
+	"d_tx_power", /* in .5 dbM I think */
+	"d_rx_crc_err", /* fcs_bad */
+	"d_no_beacon",
+	"d_tx_mpdus_queued",
+	"d_tx_msdu_queued",
+	"d_tx_msdu_dropped",
+	"d_local_enqued",
+	"d_local_freed",
+	"d_tx_ppdu_hw_queued",
+	"d_tx_ppdu_reaped",
+	"d_tx_fifo_underrun",
+	"d_tx_ppdu_abort",
+	"d_tx_mpdu_requed",
+	"d_tx_excessive_retries",
+	"d_tx_hw_rate",
+	"d_tx_dropped_sw_retries",
+	"d_tx_illegal_rate",
+	"d_tx_continuous_xretries",
+	"d_tx_timeout",
+	"d_tx_mpdu_txop_limit",
+	"d_pdev_resets",
+	"d_rx_mid_ppdu_route_change",
+	"d_rx_status",
+	"d_rx_extra_frags_ring0",
+	"d_rx_extra_frags_ring1",
+	"d_rx_extra_frags_ring2",
+	"d_rx_extra_frags_ring3",
+	"d_rx_msdu_htt",
+	"d_rx_mpdu_htt",
+	"d_rx_msdu_stack",
+	"d_rx_mpdu_stack",
+	"d_rx_phy_err",
+	"d_rx_phy_err_drops",
+	"d_rx_mpdu_errors", /* FCS, MIC, ENC */
+	"d_fw_crash_count",
+	"d_fw_warm_reset_count",
+	"d_fw_cold_reset_count",
+};
+
+#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 u32 sset, u8 *data)
+{
+	if (sset == ETH_SS_STATS)
+		memcpy(data, *ath10k_gstrings_stats,
+		       sizeof(ath10k_gstrings_stats));
+}
+
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ATH10K_SSTATS_LEN;
+
+	return 0;
+}
+
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ethtool_stats *stats, u64 *data)
+{
+	struct ath10k *ar = hw->priv;
+	static const struct ath10k_fw_stats_pdev zero_stats = {};
+	const struct ath10k_fw_stats_pdev *pdev_stats;
+	int i = 0, ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state == ATH10K_STATE_ON) {
+		ret = ath10k_debug_fw_stats_request(ar);
+		if (ret) {
+			/* just print a warning and try to use older results */
+			ath10k_warn(ar,
+				    "failed to get fw stats for ethtool: %d\n",
+				    ret);
+		}
+	}
+
+	pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
+					      struct ath10k_fw_stats_pdev,
+					      list);
+	if (!pdev_stats) {
+		/* no results available so just return zeroes */
+		pdev_stats = &zero_stats;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+
+	data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
+	data[i++] = 0; /* tx bytes */
+	data[i++] = pdev_stats->htt_mpdus;
+	data[i++] = 0; /* rx bytes */
+	data[i++] = pdev_stats->ch_noise_floor;
+	data[i++] = pdev_stats->cycle_count;
+	data[i++] = pdev_stats->phy_err_count;
+	data[i++] = pdev_stats->rts_bad;
+	data[i++] = pdev_stats->rts_good;
+	data[i++] = pdev_stats->chan_tx_power;
+	data[i++] = pdev_stats->fcs_bad;
+	data[i++] = pdev_stats->no_beacons;
+	data[i++] = pdev_stats->mpdu_enqued;
+	data[i++] = pdev_stats->msdu_enqued;
+	data[i++] = pdev_stats->wmm_drop;
+	data[i++] = pdev_stats->local_enqued;
+	data[i++] = pdev_stats->local_freed;
+	data[i++] = pdev_stats->hw_queued;
+	data[i++] = pdev_stats->hw_reaped;
+	data[i++] = pdev_stats->underrun;
+	data[i++] = pdev_stats->tx_abort;
+	data[i++] = pdev_stats->mpdus_requed;
+	data[i++] = pdev_stats->tx_ko;
+	data[i++] = pdev_stats->data_rc;
+	data[i++] = pdev_stats->sw_retry_failure;
+	data[i++] = pdev_stats->illgl_rate_phy_err;
+	data[i++] = pdev_stats->pdev_cont_xretry;
+	data[i++] = pdev_stats->pdev_tx_timeout;
+	data[i++] = pdev_stats->txop_ovf;
+	data[i++] = pdev_stats->pdev_resets;
+	data[i++] = pdev_stats->mid_ppdu_route_change;
+	data[i++] = pdev_stats->status_rcvd;
+	data[i++] = pdev_stats->r0_frags;
+	data[i++] = pdev_stats->r1_frags;
+	data[i++] = pdev_stats->r2_frags;
+	data[i++] = pdev_stats->r3_frags;
+	data[i++] = pdev_stats->htt_msdus;
+	data[i++] = pdev_stats->htt_mpdus;
+	data[i++] = pdev_stats->loc_msdus;
+	data[i++] = pdev_stats->loc_mpdus;
+	data[i++] = pdev_stats->phy_errs;
+	data[i++] = pdev_stats->phy_err_drop;
+	data[i++] = pdev_stats->mpdu_errs;
+	data[i++] = ar->stats.fw_crash_counter;
+	data[i++] = ar->stats.fw_warm_reset_counter;
+	data[i++] = ar->stats.fw_cold_reset_counter;
+
+	spin_unlock_bh(&ar->data_lock);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	WARN_ON(i != ATH10K_SSTATS_LEN);
+}
+
+static const struct file_operations fops_fw_dbglog = {
+	.read = ath10k_read_fw_dbglog,
+	.write = ath10k_write_fw_dbglog,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
+	void *buf;
+	u32 hi_addr;
+	__le32 addr;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto err;
+	}
+
+	buf = vmalloc(QCA988X_CAL_DATA_LEN);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+	ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+	if (ret) {
+		ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
+		goto err_vfree;
+	}
+
+	ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+				   QCA988X_CAL_DATA_LEN);
+	if (ret) {
+		ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+		goto err_vfree;
+	}
+
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+
+err_vfree:
+	vfree(buf);
+
+err:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_debug_cal_data_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	void *buf = file->private_data;
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+				       buf, QCA988X_CAL_DATA_LEN);
+}
+
+static int ath10k_debug_cal_data_release(struct inode *inode,
+					 struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath10k_write_ani_enable(struct file *file,
+				       const char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int ret;
+	u8 enable;
+
+	if (kstrtou8_from_user(user_buf, count, 0, &enable))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->ani_enabled == enable) {
+		ret = count;
+		goto exit;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+					enable);
+	if (ret) {
+		ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+		goto exit;
+	}
+	ar->ani_enabled = enable;
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+	char buf[32];
+
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			ar->ani_enabled);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+	.read = ath10k_read_ani_enable,
+	.write = ath10k_write_ani_enable,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static const struct file_operations fops_cal_data = {
+	.open = ath10k_debug_cal_data_open,
+	.read = ath10k_debug_cal_data_read,
+	.release = ath10k_debug_cal_data_release,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_nf_cal_period(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned int len;
+	char buf[32];
+
+	len = scnprintf(buf, sizeof(buf), "%d\n",
+			ar->debug.nf_cal_period);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_nf_cal_period(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned long period;
+	int ret;
+
+	ret = kstrtoul_from_user(user_buf, count, 0, &period);
+	if (ret)
+		return ret;
+
+	if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
+		return -EINVAL;
+
+	/* there's no way to switch back to the firmware default */
+	if (period == 0)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ar->debug.nf_cal_period = period;
+
+	if (ar->state != ATH10K_STATE_ON) {
+		/* firmware is not running, nothing else to do */
+		ret = count;
+		goto exit;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
+					ar->debug.nf_cal_period);
+	if (ret) {
+		ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
+			    ret);
+		goto exit;
+	}
+
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static const struct file_operations fops_nf_cal_period = {
+	.read = ath10k_read_nf_cal_period,
+	.write = ath10k_write_nf_cal_period,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+#define ATH10K_TPC_CONFIG_BUF_SIZE	(1024 * 1024)
+
+static int ath10k_debug_tpc_stats_request(struct ath10k *ar)
+{
+	int ret;
+	unsigned long time_left;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->debug.tpc_complete);
+
+	ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM);
+	if (ret) {
+		ath10k_warn(ar, "failed to request tpc config: %d\n", ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+						1 * HZ);
+	if (time_left == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+				    struct ath10k_tpc_stats *tpc_stats)
+{
+	spin_lock_bh(&ar->data_lock);
+
+	kfree(ar->debug.tpc_stats);
+	ar->debug.tpc_stats = tpc_stats;
+	complete(&ar->debug.tpc_complete);
+
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+				   unsigned int j, char *buf, unsigned int *len)
+{
+	unsigned int i, buf_len;
+	static const char table_str[][5] = { "CDD",
+					     "STBC",
+					     "TXBF" };
+	static const char pream_str[][6] = { "CCK",
+					     "OFDM",
+					     "HT20",
+					     "HT40",
+					     "VHT20",
+					     "VHT40",
+					     "VHT80",
+					     "HTCUP" };
+
+	buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "********************************\n");
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "******************* %s POWER TABLE ****************\n",
+			  table_str[j]);
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "********************************\n");
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "No.  Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n");
+
+	for (i = 0; i < tpc_stats->rate_max; i++) {
+		*len += scnprintf(buf + *len, buf_len - *len,
+				  "%8d %s 0x%2x %s\n", i,
+				  pream_str[tpc_stats->tpc_table[j].pream_idx[i]],
+				  tpc_stats->tpc_table[j].rate_code[i],
+				  tpc_stats->tpc_table[j].tpc_value[i]);
+	}
+
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "***********************************\n");
+}
+
+static void ath10k_tpc_stats_fill(struct ath10k *ar,
+				  struct ath10k_tpc_stats *tpc_stats,
+				  char *buf)
+{
+	unsigned int len, j, buf_len;
+
+	len = 0;
+	buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+
+	spin_lock_bh(&ar->data_lock);
+
+	if (!tpc_stats) {
+		ath10k_warn(ar, "failed to get tpc stats\n");
+		goto unlock;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "*************************************\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "TPC config for channel %4d mode %d\n",
+			 tpc_stats->chan_freq,
+			 tpc_stats->phy_mode);
+	len += scnprintf(buf + len, buf_len - len,
+			 "*************************************\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "CTL		=  0x%2x Reg. Domain		= %2d\n",
+			 tpc_stats->ctl,
+			 tpc_stats->reg_domain);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Antenna Gain	= %2d Reg. Max Antenna Gain	=  %2d\n",
+			 tpc_stats->twice_antenna_gain,
+			 tpc_stats->twice_antenna_reduction);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Power Limit	= %2d Reg. Max Power		= %2d\n",
+			 tpc_stats->power_limit,
+			 tpc_stats->twice_max_rd_power / 2);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Num tx chains	= %2d Num supported rates	= %2d\n",
+			 tpc_stats->num_tx_chain,
+			 tpc_stats->rate_max);
+
+	for (j = 0; j < tpc_stats->num_tx_chain ; j++) {
+		switch (j) {
+		case WMI_TPC_TABLE_TYPE_CDD:
+			if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+				len += scnprintf(buf + len, buf_len - len,
+						 "CDD not supported\n");
+				break;
+			}
+
+			ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+			break;
+		case WMI_TPC_TABLE_TYPE_STBC:
+			if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+				len += scnprintf(buf + len, buf_len - len,
+						 "STBC not supported\n");
+				break;
+			}
+
+			ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+			break;
+		case WMI_TPC_TABLE_TYPE_TXBF:
+			if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+				len += scnprintf(buf + len, buf_len - len,
+						 "TXBF not supported\n***************************\n");
+				break;
+			}
+
+			ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+			break;
+		default:
+			len += scnprintf(buf + len, buf_len - len,
+					 "Invalid Type\n");
+			break;
+		}
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+static int ath10k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
+	void *buf = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = ath10k_debug_tpc_stats_request(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request tpc config stats: %d\n",
+			    ret);
+		goto err_free;
+	}
+
+	ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_free:
+	vfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_tpc_stats_release(struct inode *inode, struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	unsigned int len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats = {
+	.open = ath10k_tpc_stats_open,
+	.release = ath10k_tpc_stats_release,
+	.read = ath10k_tpc_stats_read,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_debug_htt_stats_req(ar);
+	if (ret)
+		/* continue normally anyway, this isn't serious */
+		ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
+			    ret);
+
+	if (ar->debug.fw_dbglog_mask) {
+		ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+					    ATH10K_DBGLOG_LEVEL_WARN);
+		if (ret)
+			/* not serious */
+			ath10k_warn(ar, "failed to enable dbglog during start: %d",
+				    ret);
+	}
+
+	if (ar->debug.pktlog_filter) {
+		ret = ath10k_wmi_pdev_pktlog_enable(ar,
+						    ar->debug.pktlog_filter);
+		if (ret)
+			/* not serious */
+			ath10k_warn(ar,
+				    "failed to enable pktlog filter %x: %d\n",
+				    ar->debug.pktlog_filter, ret);
+	} else {
+		ret = ath10k_wmi_pdev_pktlog_disable(ar);
+		if (ret)
+			/* not serious */
+			ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+	}
+
+	if (ar->debug.nf_cal_period) {
+		ret = ath10k_wmi_pdev_set_param(ar,
+						ar->wmi.pdev_param->cal_period,
+						ar->debug.nf_cal_period);
+		if (ret)
+			/* not serious */
+			ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
+				    ret);
+	}
+
+	return ret;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* Must not use _sync to avoid deadlock, we do that in
+	 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+	 * warning from del_timer(). */
+	if (ar->debug.htt_stats_mask != 0)
+		cancel_delayed_work(&ar->debug.htt_stats_dwork);
+
+	ath10k_wmi_pdev_pktlog_disable(ar);
+}
+
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+
+	ieee80211_radar_detected(ar->hw);
+
+	return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+	.write = ath10k_write_simulate_radar,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+	len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+			 ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+	len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+			 ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	int retval = 0, len = 0;
+	const int size = 8000;
+	struct ath10k *ar = file->private_data;
+	char *buf;
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (!ar->dfs_detector) {
+		len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+		goto exit;
+	}
+
+	ar->debug.dfs_pool_stats =
+			ar->dfs_detector->get_stats(ar->dfs_detector);
+
+	len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+	ATH10K_DFS_STAT("reported phy errors", phy_errors);
+	ATH10K_DFS_STAT("pulse events reported", pulses_total);
+	ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+	ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+	ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+	len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+	ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+	ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+	ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+	ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+	ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+	ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+	ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+	if (len > size)
+		len = size;
+
+	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+
+	return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+	.read = ath10k_read_dfs_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_pktlog_filter(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u32 filter;
+	int ret;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &filter))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON) {
+		ar->debug.pktlog_filter = filter;
+		ret = count;
+		goto out;
+	}
+
+	if (filter == ar->debug.pktlog_filter) {
+		ret = count;
+		goto out;
+	}
+
+	if (filter) {
+		ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+				    ar->debug.pktlog_filter, ret);
+			goto out;
+		}
+	} else {
+		ret = ath10k_wmi_pdev_pktlog_disable(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+			goto out;
+		}
+	}
+
+	ar->debug.pktlog_filter = filter;
+	ret = count;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
+					 size_t count, loff_t *ppos)
+{
+	char buf[32];
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+			ar->debug.pktlog_filter);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+	.read = ath10k_read_pktlog_filter,
+	.write = ath10k_write_pktlog_filter,
+	.open = simple_open
+};
+
+static ssize_t ath10k_write_quiet_period(struct file *file,
+					 const char __user *ubuf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	u32 period;
+
+	if (kstrtouint_from_user(ubuf, count, 0, &period))
+		return -EINVAL;
+
+	if (period < ATH10K_QUIET_PERIOD_MIN) {
+		ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+			    period);
+		return -EINVAL;
+	}
+	mutex_lock(&ar->conf_mutex);
+	ar->thermal.quiet_period = period;
+	ath10k_thermal_set_throttling(ar);
+	mutex_unlock(&ar->conf_mutex);
+
+	return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	char buf[32];
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			ar->thermal.quiet_period);
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+	.read = ath10k_read_quiet_period,
+	.write = ath10k_write_quiet_period,
+	.open = simple_open
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+	ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
+	if (!ar->debug.fw_crash_data)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+	INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+	INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+
+	return 0;
+}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+	vfree(ar->debug.fw_crash_data);
+	ar->debug.fw_crash_data = NULL;
+
+	ath10k_debug_fw_stats_reset(ar);
+
+	kfree(ar->debug.tpc_stats);
+}
+
+int ath10k_debug_register(struct ath10k *ar)
+{
+	ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+						   ar->hw->wiphy->debugfsdir);
+	if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) {
+		if (IS_ERR(ar->debug.debugfs_phy))
+			return PTR_ERR(ar->debug.debugfs_phy);
+
+		return -ENOMEM;
+	}
+
+	INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+			  ath10k_debug_htt_stats_dwork);
+
+	init_completion(&ar->debug.tpc_complete);
+	init_completion(&ar->debug.fw_stats_complete);
+
+	debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
+			    &fops_fw_stats);
+
+	debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_fw_reset_stats);
+
+	debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
+			    &fops_wmi_services);
+
+	debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_simulate_fw_crash);
+
+	debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_fw_crash_dump);
+
+	debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_reg_addr);
+
+	debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_reg_value);
+
+	debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_mem_value);
+
+	debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_chip_id);
+
+	debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_htt_stats_mask);
+
+	debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar,
+			    &fops_htt_max_amsdu_ampdu);
+
+	debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_fw_dbglog);
+
+	debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
+			    ar, &fops_cal_data);
+
+	debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_ani_enable);
+
+	debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
+
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+		debugfs_create_file("dfs_simulate_radar", S_IWUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_simulate_radar);
+
+		debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
+				    ar->debug.debugfs_phy,
+				    &ar->dfs_block_radar_events);
+
+		debugfs_create_file("dfs_stats", S_IRUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_dfs_stats);
+	}
+
+	debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+
+	debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_quiet_period);
+
+	debugfs_create_file("tpc_stats", S_IRUSR,
+			    ar->debug.debugfs_phy, ar, &fops_tpc_stats);
+
+	return 0;
+}
+
+void ath10k_debug_unregister(struct ath10k *ar)
+{
+	cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
+		const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	if (ath10k_debug_mask & mask)
+		dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
+
+	trace_ath10k_log_dbg(ar, mask, &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(ath10k_dbg);
+
+void ath10k_dbg_dump(struct ath10k *ar,
+		     enum ath10k_debug_mask mask,
+		     const char *msg, const char *prefix,
+		     const void *buf, size_t len)
+{
+	char linebuf[256];
+	unsigned int linebuflen;
+	const void *ptr;
+
+	if (ath10k_debug_mask & mask) {
+		if (msg)
+			ath10k_dbg(ar, mask, "%s\n", msg);
+
+		for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+			linebuflen = 0;
+			linebuflen += scnprintf(linebuf + linebuflen,
+						sizeof(linebuf) - linebuflen,
+						"%s%08x: ",
+						(prefix ? prefix : ""),
+						(unsigned int)(ptr - buf));
+			hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+					   linebuf + linebuflen,
+					   sizeof(linebuf) - linebuflen, true);
+			dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
+		}
+	}
+
+	/* tracing code doesn't like null strings :/ */
+	trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "",
+				  buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644
index 0000000..7de780c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+	ATH10K_DBG_PCI		= 0x00000001,
+	ATH10K_DBG_WMI		= 0x00000002,
+	ATH10K_DBG_HTC		= 0x00000004,
+	ATH10K_DBG_HTT		= 0x00000008,
+	ATH10K_DBG_MAC		= 0x00000010,
+	ATH10K_DBG_BOOT		= 0x00000020,
+	ATH10K_DBG_PCI_DUMP	= 0x00000040,
+	ATH10K_DBG_HTT_DUMP	= 0x00000080,
+	ATH10K_DBG_MGMT		= 0x00000100,
+	ATH10K_DBG_DATA		= 0x00000200,
+	ATH10K_DBG_BMI		= 0x00000400,
+	ATH10K_DBG_REGULATORY	= 0x00000800,
+	ATH10K_DBG_TESTMODE	= 0x00001000,
+	ATH10K_DBG_WMI_PRINT	= 0x00002000,
+	ATH10K_DBG_PCI_PS	= 0x00004000,
+	ATH10K_DBG_ANY		= 0xffffffff,
+};
+
+enum ath10k_pktlog_filter {
+	ATH10K_PKTLOG_RX         = 0x000000001,
+	ATH10K_PKTLOG_TX         = 0x000000002,
+	ATH10K_PKTLOG_RCFIND     = 0x000000004,
+	ATH10K_PKTLOG_RCUPDATE   = 0x000000008,
+	ATH10K_PKTLOG_DBG_PRINT  = 0x000000010,
+	ATH10K_PKTLOG_ANY        = 0x00000001f,
+};
+
+enum ath10k_dbg_aggr_mode {
+	ATH10K_DBG_AGGR_MODE_AUTO,
+	ATH10K_DBG_AGGR_MODE_MANUAL,
+	ATH10K_DBG_AGGR_MODE_MAX,
+};
+
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
+extern unsigned int ath10k_debug_mask;
+
+__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+void ath10k_print_driver_info(struct ath10k *ar);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
+int ath10k_debug_register(struct ath10k *ar);
+void ath10k_debug_unregister(struct ath10k *ar);
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+				    struct ath10k_tpc_stats *tpc_stats);
+struct ath10k_fw_crash_data *
+ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
+
+void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 u32 sset, u8 *data);
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif, int sset);
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ethtool_stats *stats, u64 *data);
+#else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_register(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_debug_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
+						 struct sk_buff *skb)
+{
+}
+
+static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+						  struct ath10k_tpc_stats *tpc_stats)
+{
+	kfree(tpc_stats);
+}
+
+static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
+					   int len)
+{
+}
+
+static inline struct ath10k_fw_crash_data *
+ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
+{
+	return NULL;
+}
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
+#define ath10k_debug_get_et_strings NULL
+#define ath10k_debug_get_et_sset_count NULL
+#define ath10k_debug_get_et_stats NULL
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+#ifdef CONFIG_MAC80211_DEBUGFS
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, struct dentry *dir);
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
+			       enum ath10k_debug_mask mask,
+			       const char *fmt, ...);
+void ath10k_dbg_dump(struct ath10k *ar,
+		     enum ath10k_debug_mask mask,
+		     const char *msg, const char *prefix,
+		     const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int ath10k_dbg(struct ath10k *ar,
+			     enum ath10k_debug_mask dbg_mask,
+			     const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline void ath10k_dbg_dump(struct ath10k *ar,
+				   enum ath10k_debug_mask mask,
+				   const char *msg, const char *prefix,
+				   const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
new file mode 100644
index 0000000..95b5c49
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi-ops.h"
+#include "debug.h"
+
+static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+	char buf[32];
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
+			(arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
+			"auto" : "manual");
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+	u32 aggr_mode;
+	int ret;
+
+	if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+		return -EINVAL;
+
+	if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if ((ar->state != ATH10K_STATE_ON) ||
+	    (aggr_mode == arsta->aggr_mode)) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+	if (ret) {
+		ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
+		goto out;
+	}
+
+	arsta->aggr_mode = aggr_mode;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+	.read = ath10k_dbg_sta_read_aggr_mode,
+	.write = ath10k_dbg_sta_write_aggr_mode,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+	u32 tid, buf_size;
+	int ret;
+	char buf[64];
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = '\0';
+
+	ret = sscanf(buf, "%u %u", &tid, &buf_size);
+	if (ret != 2)
+		return -EINVAL;
+
+	/* Valid TID values are 0 through 15 */
+	if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if ((ar->state != ATH10K_STATE_ON) ||
+	    (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+				    tid, buf_size);
+	if (ret) {
+		ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+			    arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+	}
+
+	ret = count;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_addba = {
+	.write = ath10k_dbg_sta_write_addba,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
+					       const char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+	u32 tid, status;
+	int ret;
+	char buf[64];
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = '\0';
+
+	ret = sscanf(buf, "%u %u", &tid, &status);
+	if (ret != 2)
+		return -EINVAL;
+
+	/* Valid TID values are 0 through 15 */
+	if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if ((ar->state != ATH10K_STATE_ON) ||
+	    (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+					tid, status);
+	if (ret) {
+		ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+			    arsta->arvif->vdev_id, sta->addr, tid, status);
+	}
+	ret = count;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+	.write = ath10k_dbg_sta_write_addba_resp,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+	u32 tid, initiator, reason;
+	int ret;
+	char buf[64];
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = '\0';
+
+	ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+	if (ret != 3)
+		return -EINVAL;
+
+	/* Valid TID values are 0 through 15 */
+	if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	if ((ar->state != ATH10K_STATE_ON) ||
+	    (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+		ret = count;
+		goto out;
+	}
+
+	ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+				    tid, initiator, reason);
+	if (ret) {
+		ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+			    arsta->arvif->vdev_id, sta->addr, tid, initiator,
+			    reason);
+	}
+	ret = count;
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_delba = {
+	.write = ath10k_dbg_sta_write_delba,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, struct dentry *dir)
+{
+	debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
+			    &fops_aggr_mode);
+	debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
+	debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
+	debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+}
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644
index 0000000..89e7076
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+#include "debug.h"
+
+struct ath10k_hif_sg_item {
+	u16 transfer_id;
+	void *transfer_context; /* NULL = tx completion callback not called */
+	void *vaddr; /* for debugging mostly */
+	u32 paddr;
+	u16 len;
+};
+
+struct ath10k_hif_ops {
+	/* send a scatter-gather list to the target */
+	int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+		     struct ath10k_hif_sg_item *items, int n_items);
+
+	/* read firmware memory through the diagnose interface */
+	int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
+			 size_t buf_len);
+
+	int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
+			  int nbytes);
+	/*
+	 * API to handle HIF-specific BMI message exchanges, this API is
+	 * synchronous and only allowed to be called from a context that
+	 * can block (sleep)
+	 */
+	int (*exchange_bmi_msg)(struct ath10k *ar,
+				void *request, u32 request_len,
+				void *response, u32 *response_len);
+
+	/* Post BMI phase, after FW is loaded. Starts regular operation */
+	int (*start)(struct ath10k *ar);
+
+	/* Clean up what start() did. This does not revert to BMI phase. If
+	 * desired so, call power_down() and power_up() */
+	void (*stop)(struct ath10k *ar);
+
+	int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+				   u8 *ul_pipe, u8 *dl_pipe);
+
+	void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+	/*
+	 * Check if prior sends have completed.
+	 *
+	 * Check whether the pipe in question has any completed
+	 * sends that have not yet been processed.
+	 * This function is only relevant for HIF pipes that are configured
+	 * to be polled rather than interrupt-driven.
+	 */
+	void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+	u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+	u32 (*read32)(struct ath10k *ar, u32 address);
+
+	void (*write32)(struct ath10k *ar, u32 address, u32 value);
+
+	/* Power up the device and enter BMI transfer mode for FW download */
+	int (*power_up)(struct ath10k *ar);
+
+	/* Power down the device and free up resources. stop() must be called
+	 * before this if start() was called earlier */
+	void (*power_down)(struct ath10k *ar);
+
+	int (*suspend)(struct ath10k *ar);
+	int (*resume)(struct ath10k *ar);
+};
+
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+				   struct ath10k_hif_sg_item *items,
+				   int n_items)
+{
+	return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
+}
+
+static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+				       size_t buf_len)
+{
+	return ar->hif.ops->diag_read(ar, address, buf, buf_len);
+}
+
+static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
+					const void *data, int nbytes)
+{
+	if (!ar->hif.ops->diag_write)
+		return -EOPNOTSUPP;
+
+	return ar->hif.ops->diag_write(ar, address, data, nbytes);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+					      void *request, u32 request_len,
+					      void *response, u32 *response_len)
+{
+	return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+					     response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+	return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+	return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+						 u16 service_id,
+						 u8 *ul_pipe, u8 *dl_pipe)
+{
+	return ar->hif.ops->map_service_to_pipe(ar, service_id,
+						ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+					       u8 *ul_pipe, u8 *dl_pipe)
+{
+	ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+						  u8 pipe_id, int force)
+{
+	ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+						   u8 pipe_id)
+{
+	return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+static inline int ath10k_hif_power_up(struct ath10k *ar)
+{
+	return ar->hif.ops->power_up(ar);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+	ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+	if (!ar->hif.ops->suspend)
+		return -EOPNOTSUPP;
+
+	return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+	if (!ar->hif.ops->resume)
+		return -EOPNOTSUPP;
+
+	return ar->hif.ops->resume(ar);
+}
+
+static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
+{
+	if (!ar->hif.ops->read32) {
+		ath10k_warn(ar, "hif read32 not supported\n");
+		return 0xdeaddead;
+	}
+
+	return ar->hif.ops->read32(ar, address);
+}
+
+static inline void ath10k_hif_write32(struct ath10k *ar,
+				      u32 address, u32 data)
+{
+	if (!ar->hif.ops->write32) {
+		ath10k_warn(ar, "hif write32 not supported\n");
+		return;
+	}
+
+	ar->hif.ops->write32(ar, address, data);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644
index 0000000..5b3c6bc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+					   struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+	struct sk_buff *skb;
+	struct ath10k_skb_cb *skb_cb;
+
+	skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+	skb_cb = ATH10K_SKB_CB(skb);
+	memset(skb_cb, 0, sizeof(*skb_cb));
+
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+	return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+					     struct sk_buff *skb)
+{
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+	dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+					    struct sk_buff *skb)
+{
+	struct ath10k *ar = ep->htc->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+		   ep->eid, skb);
+
+	ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+	if (!ep->ep_ops.ep_tx_complete) {
+		ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+				      struct sk_buff *skb)
+{
+	struct ath10k_htc_hdr *hdr;
+
+	hdr = (struct ath10k_htc_hdr *)skb->data;
+
+	hdr->eid = ep->eid;
+	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+	hdr->flags = 0;
+	hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+	spin_lock_bh(&ep->htc->tx_lock);
+	hdr->seq_no = ep->seq_no++;
+	spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+		    enum ath10k_htc_ep_id eid,
+		    struct sk_buff *skb)
+{
+	struct ath10k *ar = htc->ar;
+	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+	struct ath10k_hif_sg_item sg_item;
+	struct device *dev = htc->ar->dev;
+	int credits = 0;
+	int ret;
+
+	if (htc->ar->state == ATH10K_STATE_WEDGED)
+		return -ECOMM;
+
+	if (eid >= ATH10K_HTC_EP_COUNT) {
+		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
+		return -ENOENT;
+	}
+
+	skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+	if (ep->tx_credit_flow_enabled) {
+		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+		spin_lock_bh(&htc->tx_lock);
+		if (ep->tx_credits < credits) {
+			spin_unlock_bh(&htc->tx_lock);
+			ret = -EAGAIN;
+			goto err_pull;
+		}
+		ep->tx_credits -= credits;
+		ath10k_dbg(ar, ATH10K_DBG_HTC,
+			   "htc ep %d consumed %d credits (total %d)\n",
+			   eid, credits, ep->tx_credits);
+		spin_unlock_bh(&htc->tx_lock);
+	}
+
+	ath10k_htc_prepare_tx_skb(ep, skb);
+
+	skb_cb->eid = eid;
+	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+	ret = dma_mapping_error(dev, skb_cb->paddr);
+	if (ret) {
+		ret = -EIO;
+		goto err_credits;
+	}
+
+	sg_item.transfer_id = ep->eid;
+	sg_item.transfer_context = skb;
+	sg_item.vaddr = skb->data;
+	sg_item.paddr = skb_cb->paddr;
+	sg_item.len = skb->len;
+
+	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+	if (ret)
+		goto err_unmap;
+
+	return 0;
+
+err_unmap:
+	dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+	if (ep->tx_credit_flow_enabled) {
+		spin_lock_bh(&htc->tx_lock);
+		ep->tx_credits += credits;
+		ath10k_dbg(ar, ATH10K_DBG_HTC,
+			   "htc ep %d reverted %d credits back (total %d)\n",
+			   eid, credits, ep->tx_credits);
+		spin_unlock_bh(&htc->tx_lock);
+
+		if (ep->ep_ops.ep_tx_credits)
+			ep->ep_ops.ep_tx_credits(htc->ar);
+	}
+err_pull:
+	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+	return ret;
+}
+
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_htc *htc = &ar->htc;
+	struct ath10k_skb_cb *skb_cb;
+	struct ath10k_htc_ep *ep;
+
+	if (WARN_ON_ONCE(!skb))
+		return;
+
+	skb_cb = ATH10K_SKB_CB(skb);
+	ep = &htc->endpoint[skb_cb->eid];
+
+	ath10k_htc_notify_tx_completion(ep, skb);
+	/* the skb now belongs to the completion handler */
+}
+EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+				 const struct ath10k_htc_credit_report *report,
+				 int len,
+				 enum ath10k_htc_ep_id eid)
+{
+	struct ath10k *ar = htc->ar;
+	struct ath10k_htc_ep *ep;
+	int i, n_reports;
+
+	if (len % sizeof(*report))
+		ath10k_warn(ar, "Uneven credit report len %d", len);
+
+	n_reports = len / sizeof(*report);
+
+	spin_lock_bh(&htc->tx_lock);
+	for (i = 0; i < n_reports; i++, report++) {
+		if (report->eid >= ATH10K_HTC_EP_COUNT)
+			break;
+
+		ep = &htc->endpoint[report->eid];
+		ep->tx_credits += report->credits;
+
+		ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+			   report->eid, report->credits, ep->tx_credits);
+
+		if (ep->ep_ops.ep_tx_credits) {
+			spin_unlock_bh(&htc->tx_lock);
+			ep->ep_ops.ep_tx_credits(htc->ar);
+			spin_lock_bh(&htc->tx_lock);
+		}
+	}
+	spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+				      u8 *buffer,
+				      int length,
+				      enum ath10k_htc_ep_id src_eid)
+{
+	struct ath10k *ar = htc->ar;
+	int status = 0;
+	struct ath10k_htc_record *record;
+	u8 *orig_buffer;
+	int orig_length;
+	size_t len;
+
+	orig_buffer = buffer;
+	orig_length = length;
+
+	while (length > 0) {
+		record = (struct ath10k_htc_record *)buffer;
+
+		if (length < sizeof(record->hdr)) {
+			status = -EINVAL;
+			break;
+		}
+
+		if (record->hdr.len > length) {
+			/* no room left in buffer for record */
+			ath10k_warn(ar, "Invalid record length: %d\n",
+				    record->hdr.len);
+			status = -EINVAL;
+			break;
+		}
+
+		switch (record->hdr.id) {
+		case ATH10K_HTC_RECORD_CREDITS:
+			len = sizeof(struct ath10k_htc_credit_report);
+			if (record->hdr.len < len) {
+				ath10k_warn(ar, "Credit report too long\n");
+				status = -EINVAL;
+				break;
+			}
+			ath10k_htc_process_credit_report(htc,
+							 record->credit_report,
+							 record->hdr.len,
+							 src_eid);
+			break;
+		default:
+			ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
+				    record->hdr.id, record->hdr.len);
+			break;
+		}
+
+		if (status)
+			break;
+
+		/* multiple records may be present in a trailer */
+		buffer += sizeof(record->hdr) + record->hdr.len;
+		length -= sizeof(record->hdr) + record->hdr.len;
+	}
+
+	if (status)
+		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
+				orig_buffer, orig_length);
+
+	return status;
+}
+
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+	int status = 0;
+	struct ath10k_htc *htc = &ar->htc;
+	struct ath10k_htc_hdr *hdr;
+	struct ath10k_htc_ep *ep;
+	u16 payload_len;
+	u32 trailer_len = 0;
+	size_t min_len;
+	u8 eid;
+	bool trailer_present;
+
+	hdr = (struct ath10k_htc_hdr *)skb->data;
+	skb_pull(skb, sizeof(*hdr));
+
+	eid = hdr->eid;
+
+	if (eid >= ATH10K_HTC_EP_COUNT) {
+		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
+		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
+				hdr, sizeof(*hdr));
+		goto out;
+	}
+
+	ep = &htc->endpoint[eid];
+
+	payload_len = __le16_to_cpu(hdr->len);
+
+	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+		ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
+			    payload_len + sizeof(*hdr));
+		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+				hdr, sizeof(*hdr));
+		goto out;
+	}
+
+	if (skb->len < payload_len) {
+		ath10k_dbg(ar, ATH10K_DBG_HTC,
+			   "HTC Rx: insufficient length, got %d, expected %d\n",
+			   skb->len, payload_len);
+		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
+				"", hdr, sizeof(*hdr));
+		goto out;
+	}
+
+	/* get flags to check for trailer */
+	trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+	if (trailer_present) {
+		u8 *trailer;
+
+		trailer_len = hdr->trailer_len;
+		min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+		if ((trailer_len < min_len) ||
+		    (trailer_len > payload_len)) {
+			ath10k_warn(ar, "Invalid trailer length: %d\n",
+				    trailer_len);
+			goto out;
+		}
+
+		trailer = (u8 *)hdr;
+		trailer += sizeof(*hdr);
+		trailer += payload_len;
+		trailer -= trailer_len;
+		status = ath10k_htc_process_trailer(htc, trailer,
+						    trailer_len, hdr->eid);
+		if (status)
+			goto out;
+
+		skb_trim(skb, skb->len - trailer_len);
+	}
+
+	if (((int)payload_len - (int)trailer_len) <= 0)
+		/* zero length packet with trailer data, just drop these */
+		goto out;
+
+	if (eid == ATH10K_HTC_EP_0) {
+		struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+		switch (__le16_to_cpu(msg->hdr.message_id)) {
+		case ATH10K_HTC_MSG_READY_ID:
+		case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+			/* handle HTC control message */
+			if (completion_done(&htc->ctl_resp)) {
+				/*
+				 * this is a fatal error, target should not be
+				 * sending unsolicited messages on the ep 0
+				 */
+				ath10k_warn(ar, "HTC rx ctrl still processing\n");
+				complete(&htc->ctl_resp);
+				goto out;
+			}
+
+			htc->control_resp_len =
+				min_t(int, skb->len,
+				      ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+			memcpy(htc->control_resp_buffer, skb->data,
+			       htc->control_resp_len);
+
+			complete(&htc->ctl_resp);
+			break;
+		case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+			htc->htc_ops.target_send_suspend_complete(ar);
+			break;
+		default:
+			ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+			break;
+		}
+		goto out;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+		   eid, skb);
+	ep->ep_ops.ep_rx_complete(ar, skb);
+
+	/* skb is now owned by the rx completion handler */
+	skb = NULL;
+out:
+	kfree_skb(skb);
+}
+EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+					   struct sk_buff *skb)
+{
+	/* This is unexpected. FW is not supposed to send regular rx on this
+	 * endpoint. */
+	ath10k_warn(ar, "unexpected htc rx\n");
+	kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+	switch (id) {
+	case ATH10K_HTC_SVC_ID_RESERVED:
+		return "Reserved";
+	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+		return "Control";
+	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+		return "WMI";
+	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+		return "DATA BE";
+	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+		return "DATA BK";
+	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+		return "DATA VI";
+	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+		return "DATA VO";
+	case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+		return "NMI Control";
+	case ATH10K_HTC_SVC_ID_NMI_DATA:
+		return "NMI Data";
+	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+		return "HTT Data";
+	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+		return "RAW";
+	}
+
+	return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+	struct ath10k_htc_ep *ep;
+	int i;
+
+	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+		ep = &htc->endpoint[i];
+		ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+		ep->max_ep_message_len = 0;
+		ep->max_tx_queue_depth = 0;
+		ep->eid = i;
+		ep->htc = htc;
+		ep->tx_credit_flow_enabled = true;
+	}
+}
+
+static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
+{
+	struct ath10k_htc_svc_tx_credits *entry;
+
+	entry = &htc->service_tx_alloc[0];
+
+	/*
+	 * for PCIE allocate all credists/HTC buffers to WMI.
+	 * no buffers are used/required for data. data always
+	 * remains on host.
+	 */
+	entry++;
+	entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+	entry->credit_allocation = htc->total_transmit_credits;
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+					   u16 service_id)
+{
+	u8 allocation = 0;
+	int i;
+
+	for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+		if (htc->service_tx_alloc[i].service_id == service_id)
+			allocation =
+			    htc->service_tx_alloc[i].credit_allocation;
+	}
+
+	return allocation;
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+	struct ath10k *ar = htc->ar;
+	int i, status = 0;
+	unsigned long time_left;
+	struct ath10k_htc_svc_conn_req conn_req;
+	struct ath10k_htc_svc_conn_resp conn_resp;
+	struct ath10k_htc_msg *msg;
+	u16 message_id;
+	u16 credit_count;
+	u16 credit_size;
+
+	time_left = wait_for_completion_timeout(&htc->ctl_resp,
+						ATH10K_HTC_WAIT_TIMEOUT_HZ);
+	if (!time_left) {
+		/* Workaround: In some cases the PCI HIF doesn't
+		 * receive interrupt for the control response message
+		 * even if the buffer was completed. It is suspected
+		 * iomap writes unmasking PCI CE irqs aren't propagated
+		 * properly in KVM PCI-passthrough sometimes.
+		 */
+		ath10k_warn(ar, "failed to receive control response completion, polling..\n");
+
+		for (i = 0; i < CE_COUNT; i++)
+			ath10k_hif_send_complete_check(htc->ar, i, 1);
+
+		time_left =
+		wait_for_completion_timeout(&htc->ctl_resp,
+					    ATH10K_HTC_WAIT_TIMEOUT_HZ);
+
+		if (!time_left)
+			status = -ETIMEDOUT;
+	}
+
+	if (status < 0) {
+		ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
+		return status;
+	}
+
+	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+		ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
+			   htc->control_resp_len);
+		return -ECOMM;
+	}
+
+	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+	message_id   = __le16_to_cpu(msg->hdr.message_id);
+	credit_count = __le16_to_cpu(msg->ready.credit_count);
+	credit_size  = __le16_to_cpu(msg->ready.credit_size);
+
+	if (message_id != ATH10K_HTC_MSG_READY_ID) {
+		ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
+		return -ECOMM;
+	}
+
+	htc->total_transmit_credits = credit_count;
+	htc->target_credit_size = credit_size;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTC,
+		   "Target ready! transmit resources: %d size:%d\n",
+		   htc->total_transmit_credits,
+		   htc->target_credit_size);
+
+	if ((htc->total_transmit_credits == 0) ||
+	    (htc->target_credit_size == 0)) {
+		ath10k_err(ar, "Invalid credit size received\n");
+		return -ECOMM;
+	}
+
+	ath10k_htc_setup_target_buffer_assignments(htc);
+
+	/* setup our pseudo HTC control endpoint connection */
+	memset(&conn_req, 0, sizeof(conn_req));
+	memset(&conn_resp, 0, sizeof(conn_resp));
+	conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+	conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+	conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+	conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+	/* connect fake service */
+	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+	if (status) {
+		ath10k_err(ar, "could not connect to htc service (%d)\n",
+			   status);
+		return status;
+	}
+
+	return 0;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+			       struct ath10k_htc_svc_conn_req *conn_req,
+			       struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+	struct ath10k *ar = htc->ar;
+	struct ath10k_htc_msg *msg;
+	struct ath10k_htc_conn_svc *req_msg;
+	struct ath10k_htc_conn_svc_response resp_msg_dummy;
+	struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+	enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+	struct ath10k_htc_ep *ep;
+	struct sk_buff *skb;
+	unsigned int max_msg_size = 0;
+	int length, status;
+	unsigned long time_left;
+	bool disable_credit_flow_ctrl = false;
+	u16 message_id, service_id, flags = 0;
+	u8 tx_alloc = 0;
+
+	/* special case for HTC pseudo control service */
+	if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+		disable_credit_flow_ctrl = true;
+		assigned_eid = ATH10K_HTC_EP_0;
+		max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+		memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+		goto setup;
+	}
+
+	tx_alloc = ath10k_htc_get_credit_allocation(htc,
+						    conn_req->service_id);
+	if (!tx_alloc)
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "boot htc service %s does not allocate target credits\n",
+			   htc_service_name(conn_req->service_id));
+
+	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+	if (!skb) {
+		ath10k_err(ar, "Failed to allocate HTC packet\n");
+		return -ENOMEM;
+	}
+
+	length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+	skb_put(skb, length);
+	memset(skb->data, 0, length);
+
+	msg = (struct ath10k_htc_msg *)skb->data;
+	msg->hdr.message_id =
+		__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+	/* Only enable credit flow control for WMI ctrl service */
+	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+		disable_credit_flow_ctrl = true;
+	}
+
+	req_msg = &msg->connect_service;
+	req_msg->flags = __cpu_to_le16(flags);
+	req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+	reinit_completion(&htc->ctl_resp);
+
+	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+	if (status) {
+		kfree_skb(skb);
+		return status;
+	}
+
+	/* wait for response */
+	time_left = wait_for_completion_timeout(&htc->ctl_resp,
+						ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+	if (!time_left) {
+		ath10k_err(ar, "Service connect timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	/* we controlled the buffer creation, it's aligned */
+	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+	resp_msg = &msg->connect_service_response;
+	message_id = __le16_to_cpu(msg->hdr.message_id);
+	service_id = __le16_to_cpu(resp_msg->service_id);
+
+	if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+	    (htc->control_resp_len < sizeof(msg->hdr) +
+	     sizeof(msg->connect_service_response))) {
+		ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
+		return -EPROTO;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTC,
+		   "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+		   htc_service_name(service_id),
+		   resp_msg->status, resp_msg->eid);
+
+	conn_resp->connect_resp_code = resp_msg->status;
+
+	/* check response status */
+	if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+		ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
+			   htc_service_name(service_id),
+			   resp_msg->status);
+		return -EPROTO;
+	}
+
+	assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+	max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+	if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+		return -EPROTO;
+
+	if (max_msg_size == 0)
+		return -EPROTO;
+
+	ep = &htc->endpoint[assigned_eid];
+	ep->eid = assigned_eid;
+
+	if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+		return -EPROTO;
+
+	/* return assigned endpoint to caller */
+	conn_resp->eid = assigned_eid;
+	conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+	/* setup the endpoint */
+	ep->service_id = conn_req->service_id;
+	ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+	ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+	ep->tx_credits = tx_alloc;
+	ep->tx_credit_size = htc->target_credit_size;
+	ep->tx_credits_per_max_message = ep->max_ep_message_len /
+					 htc->target_credit_size;
+
+	if (ep->max_ep_message_len % htc->target_credit_size)
+		ep->tx_credits_per_max_message++;
+
+	/* copy all the callbacks */
+	ep->ep_ops = conn_req->ep_ops;
+
+	status = ath10k_hif_map_service_to_pipe(htc->ar,
+						ep->service_id,
+						&ep->ul_pipe_id,
+						&ep->dl_pipe_id);
+	if (status)
+		return status;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+		   htc_service_name(ep->service_id), ep->ul_pipe_id,
+		   ep->dl_pipe_id, ep->eid);
+
+	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+		ep->tx_credit_flow_enabled = false;
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "boot htc service '%s' eid %d TX flow control disabled\n",
+			   htc_service_name(ep->service_id), assigned_eid);
+	}
+
+	return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
+{
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+	/* FW/HTC requires 4-byte aligned streams */
+	if (!IS_ALIGNED((unsigned long)skb->data, 4))
+		ath10k_warn(ar, "Unaligned HTC tx skb\n");
+
+	return skb;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+	struct ath10k *ar = htc->ar;
+	struct sk_buff *skb;
+	int status = 0;
+	struct ath10k_htc_msg *msg;
+
+	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+	memset(skb->data, 0, skb->len);
+
+	msg = (struct ath10k_htc_msg *)skb->data;
+	msg->hdr.message_id =
+		__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+	if (status) {
+		kfree_skb(skb);
+		return status;
+	}
+
+	return 0;
+}
+
+/* registered target arrival callback from the HIF layer */
+int ath10k_htc_init(struct ath10k *ar)
+{
+	struct ath10k_htc_ep *ep = NULL;
+	struct ath10k_htc *htc = &ar->htc;
+
+	spin_lock_init(&htc->tx_lock);
+
+	ath10k_htc_reset_endpoint_states(htc);
+
+	htc->ar = ar;
+
+	/* Get HIF default pipe for HTC message exchange */
+	ep = &htc->endpoint[ATH10K_HTC_EP_0];
+
+	ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
+
+	init_completion(&htc->ctl_resp);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644
index 0000000..e70aa38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+enum ath10k_htc_tx_flags {
+	ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+	ATH10K_HTC_FLAG_SEND_BUNDLE        = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+	ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+	ATH10K_HTC_FLAG_BUNDLE_MASK     = 0xF0
+};
+
+struct ath10k_htc_hdr {
+	u8 eid; /* @enum ath10k_htc_ep_id */
+	u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+	__le16 len;
+	union {
+		u8 trailer_len; /* for rx */
+		u8 control_byte0;
+	} __packed;
+	union {
+		u8 seq_no; /* for tx */
+		u8 control_byte1;
+	} __packed;
+	u8 pad0;
+	u8 pad1;
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+	ATH10K_HTC_MSG_READY_ID                = 1,
+	ATH10K_HTC_MSG_CONNECT_SERVICE_ID      = 2,
+	ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+	ATH10K_HTC_MSG_SETUP_COMPLETE_ID       = 4,
+	ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID    = 5,
+	ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE   = 6
+};
+
+enum ath10k_htc_version {
+	ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+	ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+	ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH    = 0x0,
+	ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF      = 0x1,
+	ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+	ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY         = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+	ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE    = 1 << 2,
+	ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB  8
+};
+
+enum ath10k_htc_conn_svc_status {
+	ATH10K_HTC_CONN_SVC_STATUS_SUCCESS      = 0,
+	ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND    = 1,
+	ATH10K_HTC_CONN_SVC_STATUS_FAILED       = 2,
+	ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+	ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP   = 4
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+	__le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+	u8 pad0;
+	u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+	__le16 credit_count;
+	__le16 credit_size;
+	u8 max_endpoints;
+	u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+	struct ath10k_htc_ready base;
+	u8 htc_version; /* @enum ath10k_htc_version */
+	u8 max_msgs_per_htc_bundle;
+	u8 pad0;
+	u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc {
+	__le16 service_id;
+	__le16 flags; /* @enum ath10k_htc_conn_flags */
+	u8 pad0;
+	u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+	__le16 service_id;
+	u8 status; /* @enum ath10k_htc_conn_svc_status */
+	u8 eid;
+	__le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+	u8 pad0;
+	u8 pad1;
+	__le32 flags; /* @enum htc_setup_complete_flags */
+	u8 max_msgs_per_bundled_recv;
+	u8 pad2;
+	u8 pad3;
+	u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+	struct ath10k_ath10k_htc_msg_hdr hdr;
+	union {
+		/* host-to-target */
+		struct ath10k_htc_conn_svc connect_service;
+		struct ath10k_htc_ready ready;
+		struct ath10k_htc_ready_extended ready_ext;
+		struct ath10k_htc_unknown unknown;
+		struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+		/* target-to-host */
+		struct ath10k_htc_conn_svc_response connect_service_response;
+	};
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+	ATH10K_HTC_RECORD_NULL    = 0,
+	ATH10K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+	u8 id; /* @enum ath10k_ath10k_htc_record_id */
+	u8 len;
+	u8 pad0;
+	u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+	u8 eid; /* @enum ath10k_htc_ep_id */
+	u8 credits;
+	u8 pad0;
+	u8 pad1;
+} __packed;
+
+struct ath10k_htc_record {
+	struct ath10k_ath10k_htc_record_hdr hdr;
+	union {
+		struct ath10k_htc_credit_report credit_report[0];
+		u8 pauload[0];
+	};
+} __packed __aligned(4);
+
+/*
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath10k_htc_frame {
+	struct ath10k_htc_hdr hdr;
+	union {
+		struct ath10k_htc_msg msg;
+		u8 payload[0];
+	};
+	struct ath10k_htc_record trailer[0];
+} __packed __aligned(4);
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+	ATH10K_HTC_SVC_GRP_RSVD = 0,
+	ATH10K_HTC_SVC_GRP_WMI = 1,
+	ATH10K_HTC_SVC_GRP_NMI = 2,
+	ATH10K_HTC_SVC_GRP_HTT = 3,
+
+	ATH10K_HTC_SVC_GRP_TEST = 254,
+	ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+	(int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+	/* NOTE: service ID of 0x0000 is reserved and should never be used */
+	ATH10K_HTC_SVC_ID_RESERVED	= 0x0000,
+	ATH10K_HTC_SVC_ID_UNUSED	= ATH10K_HTC_SVC_ID_RESERVED,
+
+	ATH10K_HTC_SVC_ID_RSVD_CTRL	= SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+	ATH10K_HTC_SVC_ID_WMI_CONTROL	= SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+	ATH10K_HTC_SVC_ID_WMI_DATA_BE	= SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+	ATH10K_HTC_SVC_ID_WMI_DATA_BK	= SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+	ATH10K_HTC_SVC_ID_WMI_DATA_VI	= SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+	ATH10K_HTC_SVC_ID_WMI_DATA_VO	= SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+	ATH10K_HTC_SVC_ID_NMI_CONTROL	= SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+	ATH10K_HTC_SVC_ID_NMI_DATA	= SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+	ATH10K_HTC_SVC_ID_HTT_DATA_MSG	= SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+	/* raw stream service (i.e. flash, tcmd, calibration apps) */
+	ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+	ATH10K_HTC_EP_UNUSED = -1,
+	ATH10K_HTC_EP_0 = 0,
+	ATH10K_HTC_EP_1 = 1,
+	ATH10K_HTC_EP_2,
+	ATH10K_HTC_EP_3,
+	ATH10K_HTC_EP_4,
+	ATH10K_HTC_EP_5,
+	ATH10K_HTC_EP_6,
+	ATH10K_HTC_EP_7,
+	ATH10K_HTC_EP_8,
+	ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+	void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+	void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+	void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+	void (*ep_tx_credits)(struct ath10k *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+	u16 service_id;
+	struct ath10k_htc_ep_ops ep_ops;
+	int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+	u8 buffer_len;
+	u8 actual_len;
+	enum ath10k_htc_ep_id eid;
+	unsigned int max_msg_len;
+	u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+					sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+
+struct ath10k_htc_ep {
+	struct ath10k_htc *htc;
+	enum ath10k_htc_ep_id eid;
+	enum ath10k_htc_svc_id service_id;
+	struct ath10k_htc_ep_ops ep_ops;
+
+	int max_tx_queue_depth;
+	int max_ep_message_len;
+	u8 ul_pipe_id;
+	u8 dl_pipe_id;
+
+	u8 seq_no; /* for debugging */
+	int tx_credits;
+	int tx_credit_size;
+	int tx_credits_per_max_message;
+	bool tx_credit_flow_enabled;
+};
+
+struct ath10k_htc_svc_tx_credits {
+	u16 service_id;
+	u8  credit_allocation;
+};
+
+struct ath10k_htc {
+	struct ath10k *ar;
+	struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+	/* protects endpoints */
+	spinlock_t tx_lock;
+
+	struct ath10k_htc_ops htc_ops;
+
+	u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+	int control_resp_len;
+
+	struct completion ctl_resp;
+
+	int total_transmit_credits;
+	struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
+	int target_credit_size;
+};
+
+int ath10k_htc_init(struct ath10k *ar);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+			       struct ath10k_htc_svc_conn_req  *conn_req,
+			       struct ath10k_htc_svc_conn_resp *conn_resp);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+		    struct sk_buff *packet);
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644
index 0000000..3e6ba63
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/if_ether.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+	[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+		HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+		HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+		HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+		HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+	[HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+	[HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+	[HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+	[HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+		HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+	[HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+		HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+		HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+		HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+		HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+	[HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+		HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+	[HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+		HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+	[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+	[HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+				HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+				HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+				HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+				HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+	[HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+	[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+				HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+	[HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+				HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
+int ath10k_htt_connect(struct ath10k_htt *htt)
+{
+	struct ath10k_htc_svc_conn_req conn_req;
+	struct ath10k_htc_svc_conn_resp conn_resp;
+	int status;
+
+	memset(&conn_req, 0, sizeof(conn_req));
+	memset(&conn_resp, 0, sizeof(conn_resp));
+
+	conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+	conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+
+	/* connect to control service */
+	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+	status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
+					    &conn_resp);
+
+	if (status)
+		return status;
+
+	htt->eid = conn_resp.eid;
+
+	return 0;
+}
+
+int ath10k_htt_init(struct ath10k *ar)
+{
+	struct ath10k_htt *htt = &ar->htt;
+
+	htt->ar = ar;
+
+	/*
+	 * Prefetch enough data to satisfy target
+	 * classification engine.
+	 * This is for LL chips. HL chips will probably
+	 * transfer all frame in the tx fragment.
+	 */
+	htt->prefetch_len =
+		36 + /* 802.11 + qos + ht */
+		4 + /* 802.1q */
+		8 + /* llc snap */
+		2; /* ip4 dscp or ip6 priority */
+
+	switch (ar->htt.op_version) {
+	case ATH10K_FW_HTT_OP_VERSION_10_4:
+		ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_10_1:
+		ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_TLV:
+		ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_MAIN:
+		ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+		break;
+	case ATH10K_FW_HTT_OP_VERSION_MAX:
+	case ATH10K_FW_HTT_OP_VERSION_UNSET:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+		   htt->target_version_major, htt->target_version_minor);
+
+	if (htt->target_version_major != 2 &&
+	    htt->target_version_major != 3) {
+		ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
+			   htt->target_version_major);
+		return -ENOTSUPP;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_setup(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	int status;
+
+	init_completion(&htt->target_version_received);
+
+	status = ath10k_htt_h2t_ver_req_msg(htt);
+	if (status)
+		return status;
+
+	status = wait_for_completion_timeout(&htt->target_version_received,
+					     HTT_TARGET_VERSION_TIMEOUT_HZ);
+	if (status == 0) {
+		ath10k_warn(ar, "htt version request timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	status = ath10k_htt_verify_version(htt);
+	if (status) {
+		ath10k_warn(ar, "failed to verify htt version: %d\n",
+			    status);
+		return status;
+	}
+
+	status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+	if (status)
+		return status;
+
+	status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+	if (status) {
+		ath10k_warn(ar, "failed to setup rx ring: %d\n",
+			    status);
+		return status;
+	}
+
+	status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+					     htt->max_num_ampdu,
+					     htt->max_num_amsdu);
+	if (status) {
+		ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+			    status);
+		return status;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644
index 0000000..2bad50e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -0,0 +1,1602 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/hashtable.h>
+#include <net/mac80211.h>
+
+#include "htc.h"
+#include "hw.h"
+#include "rx_desc.h"
+#include "hw.h"
+
+enum htt_dbg_stats_type {
+	HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+	HTT_DBG_STATS_RX_REORDER    = 1 << 1,
+	HTT_DBG_STATS_RX_RATE_INFO  = 1 << 2,
+	HTT_DBG_STATS_TX_PPDU_LOG   = 1 << 3,
+	HTT_DBG_STATS_TX_RATE_INFO  = 1 << 4,
+	/* bits 5-23 currently reserved */
+
+	HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+	HTT_H2T_MSG_TYPE_VERSION_REQ        = 0,
+	HTT_H2T_MSG_TYPE_TX_FRM             = 1,
+	HTT_H2T_MSG_TYPE_RX_RING_CFG        = 2,
+	HTT_H2T_MSG_TYPE_STATS_REQ          = 3,
+	HTT_H2T_MSG_TYPE_SYNC               = 4,
+	HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
+	HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+	/* This command is used for sending management frames in HTT < 3.0.
+	 * HTT >= 3.0 uses TX_FRM for everything. */
+	HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
+
+	HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+	u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+	u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU.  The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicitly and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral.  Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+	union {
+		struct double_word_addr {
+			__le32 paddr;
+			__le32 len;
+		} __packed dword_addr;
+		struct triple_word_addr {
+			__le32 paddr_lo;
+			__le16 paddr_hi;
+			__le16 len_16;
+		} __packed tword_addr;
+	} __packed;
+} __packed;
+
+struct htt_msdu_ext_desc {
+	__le32 tso_flag[3];
+	__le16 ip_identification;
+	u8 flags;
+	u8 reserved;
+	struct htt_data_tx_desc_frag frags[6];
+};
+
+#define	HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE		BIT(0)
+#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE	BIT(1)
+#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE	BIT(2)
+#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE	BIT(3)
+#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE	BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
+enum htt_data_tx_desc_flags0 {
+	HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+	HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
+	HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT      = 1 << 2,
+	HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY     = 1 << 3,
+	HTT_DATA_TX_DESC_FLAGS0_RSVD0           = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB  0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB  6
+	HTT_DATA_TX_DESC_FLAGS1_POSTPONED        = 1 << 11,
+	HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH    = 1 << 12,
+	HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+	HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+	HTT_DATA_TX_DESC_FLAGS1_RSVD1            = 1 << 15
+};
+
+enum htt_data_tx_ext_tid {
+	HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+	HTT_DATA_TX_EXT_TID_MGMT                = 17,
+	HTT_DATA_TX_EXT_TID_INVALID             = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ *       ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ *                for special kinds of tids
+ *       postponed: only for HL hosts. indicates if this is a resend
+ *                  (HL hosts manage queues on the host )
+ *       more_in_batch: only for HL hosts. indicates if more packets are
+ *                      pending. this allows target to wait and aggregate
+ *       freq: 0 means home channel of given vdev. intended for offchannel
+ */
+struct htt_data_tx_desc {
+	u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+	__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+	__le16 len;
+	__le16 id;
+	__le32 frags_paddr;
+	__le16 peerid;
+	__le16 freq;
+	u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+	HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+	HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+	HTT_RX_RING_FLAGS_PPDU_START   = 1 << 2,
+	HTT_RX_RING_FLAGS_PPDU_END     = 1 << 3,
+	HTT_RX_RING_FLAGS_MPDU_START   = 1 << 4,
+	HTT_RX_RING_FLAGS_MPDU_END     = 1 << 5,
+	HTT_RX_RING_FLAGS_MSDU_START   = 1 << 6,
+	HTT_RX_RING_FLAGS_MSDU_END     = 1 << 7,
+	HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+	HTT_RX_RING_FLAGS_FRAG_INFO    = 1 << 9,
+	HTT_RX_RING_FLAGS_UNICAST_RX   = 1 << 10,
+	HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+	HTT_RX_RING_FLAGS_CTRL_RX      = 1 << 12,
+	HTT_RX_RING_FLAGS_MGMT_RX      = 1 << 13,
+	HTT_RX_RING_FLAGS_NULL_RX      = 1 << 14,
+	HTT_RX_RING_FLAGS_PHY_DATA_RX  = 1 << 15
+};
+
+#define HTT_RX_RING_SIZE_MIN 128
+#define HTT_RX_RING_SIZE_MAX 2048
+
+struct htt_rx_ring_setup_ring {
+	__le32 fw_idx_shadow_reg_paddr;
+	__le32 rx_ring_base_paddr;
+	__le16 rx_ring_len; /* in 4-byte words */
+	__le16 rx_ring_bufsize; /* rx skb size - in bytes */
+	__le16 flags; /* %HTT_RX_RING_FLAGS_ */
+	__le16 fw_idx_init_val;
+
+	/* the following offsets are in 4-byte units */
+	__le16 mac80211_hdr_offset;
+	__le16 msdu_payload_offset;
+	__le16 ppdu_start_offset;
+	__le16 ppdu_end_offset;
+	__le16 mpdu_start_offset;
+	__le16 mpdu_end_offset;
+	__le16 msdu_start_offset;
+	__le16 msdu_end_offset;
+	__le16 rx_attention_offset;
+	__le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+	u8 num_rings; /* supported values: 1, 2 */
+	__le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup {
+	struct htt_rx_ring_setup_hdr hdr;
+	struct htt_rx_ring_setup_ring rings[0];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ *	so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ *	so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+	u8 upload_types[3];
+	u8 rsvd0;
+	u8 reset_types[3];
+	struct {
+		u8 mpdu_bytes;
+		u8 mpdu_num_msdus;
+		u8 msdu_bytes;
+	} __packed;
+	u8 stat_type;
+	__le32 cookie_lsb;
+	__le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+	u8 sync_count;
+	__le16 rsvd0;
+} __packed;
+
+struct htt_aggr_conf {
+	u8 max_num_ampdu_subframes;
+	/* amsdu_subframes is limited by 0x1F mask */
+	u8 max_num_amsdu_subframes;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+	__le32 rate;
+} __packed;
+
+struct htt_mgmt_tx_desc {
+	u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+	__le32 msdu_paddr;
+	__le32 desc_id;
+	__le32 len;
+	__le32 vdev_id;
+	u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+	union {
+		struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
+enum htt_mgmt_tx_status {
+	HTT_MGMT_TX_STATUS_OK    = 0,
+	HTT_MGMT_TX_STATUS_RETRY = 1,
+	HTT_MGMT_TX_STATUS_DROP  = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+enum htt_main_t2h_msg_type {
+	HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF             = 0x0,
+	HTT_MAIN_T2H_MSG_TYPE_RX_IND                   = 0x1,
+	HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH                 = 0x2,
+	HTT_MAIN_T2H_MSG_TYPE_PEER_MAP                 = 0x3,
+	HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP               = 0x4,
+	HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA                 = 0x5,
+	HTT_MAIN_T2H_MSG_TYPE_RX_DELBA                 = 0x6,
+	HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND             = 0x7,
+	HTT_MAIN_T2H_MSG_TYPE_PKTLOG                   = 0x8,
+	HTT_MAIN_T2H_MSG_TYPE_STATS_CONF               = 0x9,
+	HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND              = 0xa,
+	HTT_MAIN_T2H_MSG_TYPE_SEC_IND                  = 0xb,
+	HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND           = 0xd,
+	HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND        = 0xe,
+	HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND     = 0xf,
+	HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND                = 0x10,
+	HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND   = 0x11,
+	HTT_MAIN_T2H_MSG_TYPE_TEST,
+	/* keep this last */
+	HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+	HTT_10X_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
+	HTT_10X_T2H_MSG_TYPE_RX_IND                    = 0x1,
+	HTT_10X_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
+	HTT_10X_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
+	HTT_10X_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
+	HTT_10X_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
+	HTT_10X_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
+	HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
+	HTT_10X_T2H_MSG_TYPE_PKTLOG                    = 0x8,
+	HTT_10X_T2H_MSG_TYPE_STATS_CONF                = 0x9,
+	HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
+	HTT_10X_T2H_MSG_TYPE_SEC_IND                   = 0xb,
+	HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc,
+	HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
+	HTT_10X_T2H_MSG_TYPE_TEST                      = 0xe,
+	HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE               = 0xf,
+	HTT_10X_T2H_MSG_TYPE_AGGR_CONF                 = 0x11,
+	HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD            = 0x12,
+	HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0x13,
+	/* keep this last */
+	HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+	HTT_TLV_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
+	HTT_TLV_T2H_MSG_TYPE_RX_IND                    = 0x1,
+	HTT_TLV_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
+	HTT_TLV_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
+	HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
+	HTT_TLV_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
+	HTT_TLV_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
+	HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
+	HTT_TLV_T2H_MSG_TYPE_PKTLOG                    = 0x8,
+	HTT_TLV_T2H_MSG_TYPE_STATS_CONF                = 0x9,
+	HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
+	HTT_TLV_T2H_MSG_TYPE_SEC_IND                   = 0xb,
+	HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc, /* deprecated */
+	HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
+	HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0xe,
+	HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND      = 0xf,
+	HTT_TLV_T2H_MSG_TYPE_RX_PN_IND                 = 0x10,
+	HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND    = 0x11,
+	HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND       = 0x12,
+	/* 0x13 reservd */
+	HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE       = 0x14,
+	HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE               = 0x15,
+	HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR           = 0x16,
+	HTT_TLV_T2H_MSG_TYPE_TEST,
+	/* keep this last */
+	HTT_TLV_T2H_NUM_MSGS
+};
+
+enum htt_10_4_t2h_msg_type {
+	HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+	HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
+	HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+	HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+	HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+	HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+	HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+	HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+	HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+	HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+	HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+	HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
+	HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+	HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+	HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
+	HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
+	HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
+	HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
+	HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+	HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
+	HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
+	HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF          = 0x17,
+	HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
+	/* 0x19 to 0x2f are reserved */
+	HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND     = 0x30,
+	/* keep this last */
+	HTT_10_4_T2H_NUM_MSGS
+};
+
+enum htt_t2h_msg_type {
+	HTT_T2H_MSG_TYPE_VERSION_CONF,
+	HTT_T2H_MSG_TYPE_RX_IND,
+	HTT_T2H_MSG_TYPE_RX_FLUSH,
+	HTT_T2H_MSG_TYPE_PEER_MAP,
+	HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	HTT_T2H_MSG_TYPE_RX_ADDBA,
+	HTT_T2H_MSG_TYPE_RX_DELBA,
+	HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	HTT_T2H_MSG_TYPE_PKTLOG,
+	HTT_T2H_MSG_TYPE_STATS_CONF,
+	HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	HTT_T2H_MSG_TYPE_SEC_IND,
+	HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	HTT_T2H_MSG_TYPE_RX_PN_IND,
+	HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+	HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+	HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+	HTT_T2H_MSG_TYPE_AGGR_CONF,
+	HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+	HTT_T2H_MSG_TYPE_TEST,
+	HTT_T2H_MSG_TYPE_EN_STATS,
+	HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+	HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+	/* keep this last */
+	HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+	u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK   0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB    0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+	u8 minor;
+	u8 major;
+	u8 rsvd0;
+} __packed;
+
+struct htt_mgmt_tx_completion {
+	u8 rsvd0;
+	u8 rsvd1;
+	u8 rsvd2;
+	__le32 desc_id;
+	__le32 status;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB   (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 6)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK   0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB    0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK     0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB      6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB  12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK   0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB    18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK     0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB      24
+
+struct htt_rx_indication_hdr {
+	u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+	__le16 peer_id;
+	__le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID    (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB  (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK  (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID        (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID      (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK    0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB     0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB  24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB  0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK    0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB     24
+
+enum htt_rx_legacy_rate {
+	HTT_RX_OFDM_48 = 0,
+	HTT_RX_OFDM_24 = 1,
+	HTT_RX_OFDM_12,
+	HTT_RX_OFDM_6,
+	HTT_RX_OFDM_54,
+	HTT_RX_OFDM_36,
+	HTT_RX_OFDM_18,
+	HTT_RX_OFDM_9,
+
+	/* long preamble */
+	HTT_RX_CCK_11_LP = 0,
+	HTT_RX_CCK_5_5_LP = 1,
+	HTT_RX_CCK_2_LP,
+	HTT_RX_CCK_1_LP,
+	/* short preamble */
+	HTT_RX_CCK_11_SP,
+	HTT_RX_CCK_5_5_SP,
+	HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+	HTT_RX_LEGACY_RATE_OFDM = 0,
+	HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+	HTT_RX_LEGACY        = 0x4,
+	HTT_RX_HT            = 0x8,
+	HTT_RX_HT_WITH_TXBF  = 0x9,
+	HTT_RX_VHT           = 0xC,
+	HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+	u8 combined_rssi;
+	u8 sub_usec_timestamp;
+	u8 phy_err_code;
+	u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+	struct {
+		u8 pri20_db;
+		u8 ext20_db;
+		u8 ext40_db;
+		u8 ext80_db;
+	} __packed rssi_chains[4];
+	__le32 tsf;
+	__le32 usec_timestamp;
+	__le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+	__le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+	HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+	HTT_RX_IND_MPDU_STATUS_OK,
+	HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+	HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+	HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+	HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+	/* only accept EAPOL frames */
+	HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+	HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+	/* Non-data in promiscous mode */
+	HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+	HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+	HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+	HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+	HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+	HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+	/*
+	 * MISC: discard for unspecified reasons.
+	 * Leave this enum value last.
+	 */
+	HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+	u8 mpdu_count;
+	u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+	u8 pad0;
+	u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+	__le16 fw_rx_desc_bytes;
+	u8 pad0;
+	u8 pad1;
+};
+
+struct htt_rx_indication {
+	struct htt_rx_indication_hdr hdr;
+	struct htt_rx_indication_ppdu ppdu;
+	struct htt_rx_indication_prefix prefix;
+
+	/*
+	 * the following fields are both dynamically sized, so
+	 * take care addressing them
+	 */
+
+	/* the size of this is %fw_rx_desc_bytes */
+	struct fw_rx_desc_base fw_desc;
+
+	/*
+	 * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+	 * and has %num_mpdu_ranges elements.
+	 */
+	struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+		htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+	void *ptr = rx_ind;
+
+	ptr += sizeof(rx_ind->hdr)
+	     + sizeof(rx_ind->ppdu)
+	     + sizeof(rx_ind->prefix)
+	     + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+	return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+	HTT_RX_FLUSH_MPDU_DISCARD = 0,
+	HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ *	[seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+	__le16 peer_id;
+	u8 tid;
+	u8 rsvd0;
+	u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+	u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+	u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+	u8 vdev_id;
+	__le16 peer_id;
+	u8 addr[6];
+	u8 rsvd0;
+	u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+	u8 rsvd0;
+	__le16 peer_id;
+} __packed;
+
+enum htt_security_types {
+	HTT_SECURITY_NONE,
+	HTT_SECURITY_WEP128,
+	HTT_SECURITY_WEP104,
+	HTT_SECURITY_WEP40,
+	HTT_SECURITY_TKIP,
+	HTT_SECURITY_TKIP_NOMIC,
+	HTT_SECURITY_AES_CCMP,
+	HTT_SECURITY_WAPI,
+
+	HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB  0
+	HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+	union {
+		/* dont use bitfields; undefined behaviour */
+		u8 flags; /* %htt_security_flags */
+		struct {
+			u8 security_type:7, /* %htt_security_types */
+			   is_unicast:1;
+		} __packed;
+	} __packed;
+	__le16 peer_id;
+	u8 michael_key[8];
+	u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK     0x000F
+#define HTT_RX_BA_INFO0_TID_LSB      0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB  4
+
+struct htt_rx_addba {
+	u8 window_size;
+	__le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+	u8 rsvd0;
+	__le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+	HTT_DATA_TX_STATUS_OK            = 0,
+	HTT_DATA_TX_STATUS_DISCARD       = 1,
+	HTT_DATA_TX_STATUS_NO_ACK        = 2,
+	HTT_DATA_TX_STATUS_POSTPONE      = 3, /* HL only */
+	HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB  0
+#define HTT_DATA_TX_TID_MASK    0x78
+#define HTT_DATA_TX_TID_LSB     3
+	HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_data_tx_completion {
+	union {
+		u8 flags;
+		struct {
+			u8 status:3,
+			   tid:4,
+			   tid_invalid:1;
+		} __packed;
+	} __packed;
+	u8 num_msdus;
+	u8 rsvd0;
+	__le16 msdus[0]; /* variable length based on %num_msdus */
+} __packed;
+
+struct htt_tx_compl_ind_base {
+	u32 hdr;
+	u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+	u32 rate_code;
+	u32 rate_code_flags;
+	u32 flags;
+	u32 num_enqued; /* 1 for non-AMPDU */
+	u32 num_retries;
+	u32 num_failed; /* for AMPDU */
+	u32 ack_rssi;
+	u32 time_stamp;
+	u32 is_probe;
+};
+
+struct htt_rc_update {
+	u8 vdev_id;
+	__le16 peer_id;
+	u8 addr[6];
+	u8 num_elems;
+	u8 rsvd0;
+	struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+	union {
+		u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+		struct {
+			u8 ext_tid:5,
+			   flush_valid:1;
+		} __packed;
+	} __packed;
+	__le16 peer_id;
+	__le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+	__le16 fw_rx_desc_bytes;
+	__le16 rsvd0;
+
+	u8 fw_msdu_rx_desc[0];
+} __packed;
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK     0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB      0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB  5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB  0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK   0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB    6
+
+struct htt_rx_pn_ind {
+	__le16 peer_id;
+	u8 tid;
+	u8 seqno_start;
+	u8 seqno_end;
+	u8 pn_ie_count;
+	u8 reserved;
+	u8 pn_ies[0];
+} __packed;
+
+struct htt_rx_offload_msdu {
+	__le16 msdu_len;
+	__le16 peer_id;
+	u8 vdev_id;
+	u8 tid;
+	u8 fw_desc;
+	u8 payload[0];
+} __packed;
+
+struct htt_rx_offload_ind {
+	u8 reserved;
+	__le16 msdu_count;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc {
+	__le32 msdu_paddr;
+	__le16 msdu_len;
+	u8 fw_desc;
+	u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_ind {
+	u8 info;
+	__le16 peer_id;
+	u8 vdev_id;
+	u8 reserved;
+	__le16 msdu_count;
+	struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+} __packed;
+
+#define HTT_RX_IN_ORD_IND_INFO_TID_MASK		0x0000001f
+#define HTT_RX_IN_ORD_IND_INFO_TID_LSB		0
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK	0x00000020
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB	5
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK	0x00000040
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB		6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31                         16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |          num chars          |   num ints   |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                           int 0                           |
+ * |-----------------------------------------------------------|
+ * |                           int 1                           |
+ * |-----------------------------------------------------------|
+ * |                            ...                            |
+ * |-----------------------------------------------------------|
+ * |    char 3    |    char 2    |    char 1    |    char 0    |
+ * |-----------------------------------------------------------|
+ * |              |              |      ...     |    char 4    |
+ * |-----------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a test message
+ *     Value: HTT_MSG_TYPE_TEST
+ *   - NUM_INTS
+ *     Bits 15:8
+ *     Purpose: indicate how many 32-bit integers follow the message header
+ *   - NUM_CHARS
+ *     Bits 31:16
+ *     Purpose: indicate how many 8-bit charaters follow the series of integers
+ */
+struct htt_rx_test {
+	u8 num_ints;
+	__le16 num_chars;
+
+	/* payload consists of 2 lists:
+	 *  a) num_ints * sizeof(__le32)
+	 *  b) num_chars * sizeof(u8) aligned to 4bytes */
+	u8 payload[0];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+	return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+	return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31          24|23          16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |              |              |              |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                        payload                            |
+ * |-----------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a test message
+ *     Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+	u8 pad[3];
+	u8 payload[0];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+	/* Non QoS MPDUs received */
+	__le32 deliver_non_qos;
+
+	/* MPDUs received in-order */
+	__le32 deliver_in_order;
+
+	/* Flush due to reorder timer expired */
+	__le32 deliver_flush_timeout;
+
+	/* Flush due to move out of window */
+	__le32 deliver_flush_oow;
+
+	/* Flush due to DELBA */
+	__le32 deliver_flush_delba;
+
+	/* MPDUs dropped due to FCS error */
+	__le32 fcs_error;
+
+	/* MPDUs dropped due to monitor mode non-data packet */
+	__le32 mgmt_ctrl;
+
+	/* MPDUs dropped due to invalid peer */
+	__le32 invalid_peer;
+
+	/* MPDUs dropped due to duplication (non aggregation) */
+	__le32 dup_non_aggr;
+
+	/* MPDUs dropped due to processed before */
+	__le32 dup_past;
+
+	/* MPDUs dropped due to duplicate in reorder queue */
+	__le32 dup_in_reorder;
+
+	/* Reorder timeout happened */
+	__le32 reorder_timeout;
+
+	/* invalid bar ssn */
+	__le32 invalid_bar_ssn;
+
+	/* reorder reset due to bar ssn */
+	__le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+	/* Num HTT cookies queued to dispatch list */
+	__le32 comp_queued;
+
+	/* Num HTT cookies dispatched */
+	__le32 comp_delivered;
+
+	/* Num MSDU queued to WAL */
+	__le32 msdu_enqued;
+
+	/* Num MPDU queue to WAL */
+	__le32 mpdu_enqued;
+
+	/* Num MSDUs dropped by WMM limit */
+	__le32 wmm_drop;
+
+	/* Num Local frames queued */
+	__le32 local_enqued;
+
+	/* Num Local frames done */
+	__le32 local_freed;
+
+	/* Num queued to HW */
+	__le32 hw_queued;
+
+	/* Num PPDU reaped from HW */
+	__le32 hw_reaped;
+
+	/* Num underruns */
+	__le32 underrun;
+
+	/* Num PPDUs cleaned up in TX abort */
+	__le32 tx_abort;
+
+	/* Num MPDUs requed by SW */
+	__le32 mpdus_requed;
+
+	/* excessive retries */
+	__le32 tx_ko;
+
+	/* data hw rate code */
+	__le32 data_rc;
+
+	/* Scheduler self triggers */
+	__le32 self_triggers;
+
+	/* frames dropped due to excessive sw retries */
+	__le32 sw_retry_failure;
+
+	/* illegal rate phy errors  */
+	__le32 illgl_rate_phy_err;
+
+	/* wal pdev continous xretry */
+	__le32 pdev_cont_xretry;
+
+	/* wal pdev continous xretry */
+	__le32 pdev_tx_timeout;
+
+	/* wal pdev resets  */
+	__le32 pdev_resets;
+
+	__le32 phy_underrun;
+
+	/* MPDU is more than txop limit */
+	__le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+	/* Cnts any change in ring routing mid-ppdu */
+	__le32 mid_ppdu_route_change;
+
+	/* Total number of statuses processed */
+	__le32 status_rcvd;
+
+	/* Extra frags on rings 0-3 */
+	__le32 r0_frags;
+	__le32 r1_frags;
+	__le32 r2_frags;
+	__le32 r3_frags;
+
+	/* MSDUs / MPDUs delivered to HTT */
+	__le32 htt_msdus;
+	__le32 htt_mpdus;
+
+	/* MSDUs / MPDUs delivered to local stack */
+	__le32 loc_msdus;
+	__le32 loc_mpdus;
+
+	/* AMSDUs that have more MSDUs than the status ring size */
+	__le32 oversize_amsdu;
+
+	/* Number of PHY errors */
+	__le32 phy_errs;
+
+	/* Number of PHY errors drops */
+	__le32 phy_err_drop;
+
+	/* Number of mpdu errors - FCS, MIC, ENC etc. */
+	__le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+	__le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+	struct htt_dbg_stats_wal_tx_stats tx_stats;
+	struct htt_dbg_stats_wal_rx_stats rx_stats;
+	struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+	__le32 mcs[10];
+	__le32 sgi[10];
+	__le32 nss[4];
+	__le32 stbc[10];
+	__le32 bw[3];
+	__le32 pream[6];
+	__le32 ldpc;
+	__le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present -     The requested stats have been delivered in full.
+ *               This indicates that either the stats information was contained
+ *               in its entirety within this message, or else this message
+ *               completes the delivery of the requested stats info that was
+ *               partially delivered through earlier STATS_CONF messages.
+ * partial -     The requested stats have been delivered in part.
+ *               One or more subsequent STATS_CONF messages with the same
+ *               cookie value will be sent to deliver the remainder of the
+ *               information.
+ * error -       The requested stats could not be delivered, for example due
+ *               to a shortage of memory to construct a message holding the
+ *               requested stats.
+ * invalid -     The requested stat type is either not recognized, or the
+ *               target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ *               elements are present within a series of stats info elems
+ *               (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+	HTT_DBG_STATS_STATUS_PRESENT     = 0,
+	HTT_DBG_STATS_STATUS_PARTIAL     = 1,
+	HTT_DBG_STATS_STATUS_ERROR       = 2,
+	HTT_DBG_STATS_STATUS_INVALID     = 3,
+	HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * target -> host statistics upload
+ *
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a series of tag-length-value stats information elements.
+ * The tag-length header for each stats info element also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A special value of all 1's in this status field is used to indicate
+ * the end of the series of stats info elements.
+ *
+ *
+ * |31                         16|15           8|7   5|4       0|
+ * |------------------------------------------------------------|
+ * |                  reserved                  |    msg type   |
+ * |------------------------------------------------------------|
+ * |                        cookie LSBs                         |
+ * |------------------------------------------------------------|
+ * |                        cookie MSBs                         |
+ * |------------------------------------------------------------|
+ * |      stats entry length     |   reserved   |  S  |stat type|
+ * |------------------------------------------------------------|
+ * |                                                            |
+ * |                  type-specific stats info                  |
+ * |                                                            |
+ * |------------------------------------------------------------|
+ * |      stats entry length     |   reserved   |  S  |stat type|
+ * |------------------------------------------------------------|
+ * |                                                            |
+ * |                  type-specific stats info                  |
+ * |                                                            |
+ * |------------------------------------------------------------|
+ * |              n/a            |   reserved   | 111 |   n/a   |
+ * |------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies this is a statistics upload confirmation message
+ *    Value: 0x9
+ *  - COOKIE_LSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: LSBs of the opaque cookie specified by the host-side requestor
+ *  - COOKIE_MSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ *  - STAT_TYPE
+ *    Bits 4:0
+ *    Purpose: identifies the type of statistics info held in the
+ *        following information element
+ *    Value: htt_dbg_stats_type
+ *  - STATUS
+ *    Bits 7:5
+ *    Purpose: indicate whether the requested stats are present
+ *    Value: htt_dbg_stats_status, including a special value (0x7) to mark
+ *        the completion of the stats entry series
+ *  - LENGTH
+ *    Bits 31:16
+ *    Purpose: indicate the stats information size
+ *    Value: This field specifies the number of bytes of stats information
+ *       that follows the element tag-length header.
+ *       It is expected but not required that this length is a multiple of
+ *       4 bytes.  Even if the length is not an integer multiple of 4, the
+ *       subsequent stats entry header will begin on a 4-byte aligned
+ *       boundary.
+ */
+
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB  0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK    0xE0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB     5
+
+struct htt_stats_conf_item {
+	union {
+		u8 info;
+		struct {
+			u8 stat_type:5; /* %HTT_DBG_STATS_ */
+			u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
+		} __packed;
+	} __packed;
+	u8 pad;
+	__le16 length;
+	u8 payload[0]; /* roundup(length, 4) long */
+} __packed;
+
+struct htt_stats_conf {
+	u8 pad[3];
+	__le32 cookie_lsb;
+	__le32 cookie_msb;
+
+	/* each item has variable length! */
+	struct htt_stats_conf_item items[0];
+} __packed;
+
+static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
+					const struct htt_stats_conf_item *item)
+{
+	return (void *)item + sizeof(*item) + roundup(item->length, 4);
+}
+
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31                         16|15           8|7   5|4       0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE    |  NUM_BANKS   | RES |SWP|pdev|    msg type   |
+ * |------------------------------------------------------------|
+ * |                     BANK0_BASE_ADDRESS                     |
+ * |------------------------------------------------------------|
+ * |                            ...                             |
+ * |------------------------------------------------------------|
+ * |                    BANK15_BASE_ADDRESS                     |
+ * |------------------------------------------------------------|
+ * |       BANK0_MAX_ID          |       BANK0_MIN_ID           |
+ * |------------------------------------------------------------|
+ * |                            ...                             |
+ * |------------------------------------------------------------|
+ * |       BANK15_MAX_ID         |       BANK15_MIN_ID          |
+ * |------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Value: 0x6
+ *  - BANKx_BASE_ADDRESS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ *         bank physical/bus address.
+ *  - BANKx_MIN_ID
+ *    Bits 15:0
+ *    Purpose: Provide a mechanism to specify the min index that needs to
+ *          mapped.
+ *  - BANKx_MAX_ID
+ *    Bits 31:16
+ *    Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+	__le16 bank_min_id;
+	__le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB  0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP         (1 << 2)
+
+struct htt_frag_desc_bank_cfg {
+	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+	u8 num_banks;
+	u8 desc_size;
+	__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+	struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+} __packed;
+
+union htt_rx_pn_t {
+	/* WEP: 24-bit PN */
+	u32 pn24;
+
+	/* TKIP or CCMP: 48-bit PN */
+	u_int64_t pn48;
+
+	/* WAPI: 128-bit PN */
+	u_int64_t pn128[2];
+};
+
+struct htt_cmd {
+	struct htt_cmd_hdr hdr;
+	union {
+		struct htt_ver_req ver_req;
+		struct htt_mgmt_tx_desc mgmt_tx;
+		struct htt_data_tx_desc data_tx;
+		struct htt_rx_ring_setup rx_setup;
+		struct htt_stats_req stats_req;
+		struct htt_oob_sync_req oob_sync_req;
+		struct htt_aggr_conf aggr_conf;
+		struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+	};
+} __packed;
+
+struct htt_resp {
+	struct htt_resp_hdr hdr;
+	union {
+		struct htt_ver_resp ver_resp;
+		struct htt_mgmt_tx_completion mgmt_tx_completion;
+		struct htt_data_tx_completion data_tx_completion;
+		struct htt_rx_indication rx_ind;
+		struct htt_rx_fragment_indication rx_frag_ind;
+		struct htt_rx_peer_map peer_map;
+		struct htt_rx_peer_unmap peer_unmap;
+		struct htt_rx_flush rx_flush;
+		struct htt_rx_addba rx_addba;
+		struct htt_rx_delba rx_delba;
+		struct htt_security_indication security_indication;
+		struct htt_rc_update rc_update;
+		struct htt_rx_test rx_test;
+		struct htt_pktlog_msg pktlog_msg;
+		struct htt_stats_conf stats_conf;
+		struct htt_rx_pn_ind rx_pn_ind;
+		struct htt_rx_offload_ind rx_offload_ind;
+		struct htt_rx_in_ord_ind rx_in_ord_ind;
+	};
+} __packed;
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+	u32 msdu_id;
+	bool discard;
+	bool no_ack;
+	bool success;
+};
+
+struct htt_peer_map_event {
+	u8 vdev_id;
+	u16 peer_id;
+	u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+	u16 peer_id;
+};
+
+struct ath10k_htt_txbuf {
+	struct htt_data_tx_desc_frag frags[2];
+	struct ath10k_htc_hdr htc_hdr;
+	struct htt_cmd_hdr cmd_hdr;
+	struct htt_data_tx_desc cmd_tx;
+} __packed;
+
+struct ath10k_htt {
+	struct ath10k *ar;
+	enum ath10k_htc_ep_id eid;
+
+	u8 target_version_major;
+	u8 target_version_minor;
+	struct completion target_version_received;
+	enum ath10k_fw_htt_op_version op_version;
+	u8 max_num_amsdu;
+	u8 max_num_ampdu;
+
+	const enum htt_t2h_msg_type *t2h_msg_types;
+	u32 t2h_msg_types_max;
+
+	struct {
+		/*
+		 * Ring of network buffer objects - This ring is
+		 * used exclusively by the host SW. This ring
+		 * mirrors the dev_addrs_ring that is shared
+		 * between the host SW and the MAC HW. The host SW
+		 * uses this netbufs ring to locate the network
+		 * buffer objects whose data buffers the HW has
+		 * filled.
+		 */
+		struct sk_buff **netbufs_ring;
+
+		/* This is used only with firmware supporting IN_ORD_IND.
+		 *
+		 * With Full Rx Reorder the HTT Rx Ring is more of a temporary
+		 * buffer ring from which buffer addresses are copied by the
+		 * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
+		 * pointing to specific (re-ordered) buffers.
+		 *
+		 * FIXME: With kernel generic hashing functions there's a lot
+		 * of hash collisions for sk_buffs.
+		 */
+		bool in_ord_rx;
+		DECLARE_HASHTABLE(skb_table, 4);
+
+		/*
+		 * Ring of buffer addresses -
+		 * This ring holds the "physical" device address of the
+		 * rx buffers the host SW provides for the MAC HW to
+		 * fill.
+		 */
+		__le32 *paddrs_ring;
+
+		/*
+		 * Base address of ring, as a "physical" device address
+		 * rather than a CPU address.
+		 */
+		dma_addr_t base_paddr;
+
+		/* how many elems in the ring (power of 2) */
+		int size;
+
+		/* size - 1 */
+		unsigned size_mask;
+
+		/* how many rx buffers to keep in the ring */
+		int fill_level;
+
+		/* how many rx buffers (full+empty) are in the ring */
+		int fill_cnt;
+
+		/*
+		 * alloc_idx - where HTT SW has deposited empty buffers
+		 * This is allocated in consistent mem, so that the FW can
+		 * read this variable, and program the HW's FW_IDX reg with
+		 * the value of this shadow register.
+		 */
+		struct {
+			__le32 *vaddr;
+			dma_addr_t paddr;
+		} alloc_idx;
+
+		/* where HTT SW has processed bufs filled by rx MAC DMA */
+		struct {
+			unsigned msdu_payld;
+		} sw_rd_idx;
+
+		/*
+		 * refill_retry_timer - timer triggered when the ring is
+		 * not refilled to the level expected
+		 */
+		struct timer_list refill_retry_timer;
+
+		/* Protects access to all rx ring buffer state variables */
+		spinlock_t lock;
+	} rx_ring;
+
+	unsigned int prefetch_len;
+
+	/* Protects access to pending_tx, num_pending_tx */
+	spinlock_t tx_lock;
+	int max_num_pending_tx;
+	int num_pending_tx;
+	int num_pending_mgmt_tx;
+	struct idr pending_tx;
+	wait_queue_head_t empty_tx_wq;
+
+	/* set if host-fw communication goes haywire
+	 * used to avoid further failures */
+	bool rx_confused;
+	struct tasklet_struct rx_replenish_task;
+
+	/* This is used to group tx/rx completions separately and process them
+	 * in batches to reduce cache stalls */
+	struct tasklet_struct txrx_compl_task;
+	struct sk_buff_head tx_compl_q;
+	struct sk_buff_head rx_compl_q;
+	struct sk_buff_head rx_in_ord_compl_q;
+
+	/* rx_status template */
+	struct ieee80211_rx_status rx_status;
+
+	struct {
+		dma_addr_t paddr;
+		struct htt_msdu_ext_desc *vaddr;
+	} frag_desc;
+
+	struct {
+		dma_addr_t paddr;
+		struct ath10k_htt_txbuf *vaddr;
+	} txbuf;
+};
+
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring. */
+struct htt_rx_desc {
+	union {
+		/* This field is filled on the host using the msdu buffer
+		 * from htt_rx_indication */
+		struct fw_rx_desc_base fw_desc;
+		u32 pad;
+	} __packed;
+	struct {
+		struct rx_attention attention;
+		struct rx_frag_info frag_info;
+		struct rx_mpdu_start mpdu_start;
+		struct rx_msdu_start msdu_start;
+		struct rx_msdu_end msdu_end;
+		struct rx_mpdu_end mpdu_end;
+		struct rx_ppdu_start ppdu_start;
+		struct rx_ppdu_end ppdu_end;
+	} __packed;
+	u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+	u8 msdu_payload[0];
+};
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7	/* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
+int ath10k_htt_connect(struct ath10k_htt *htt);
+int ath10k_htt_init(struct ath10k *ar);
+int ath10k_htt_setup(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
+void ath10k_htt_tx_free(struct ath10k_htt *htt);
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+int ath10k_htt_rx_ring_refill(struct ath10k *ar);
+void ath10k_htt_rx_free(struct ath10k_htt *htt);
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+				u8 max_subfrms_ampdu,
+				u8 max_subfrms_amsdu);
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644
index 0000000..6060dda
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -0,0 +1,2154 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+#include "trace.h"
+#include "mac.h"
+
+#include <linux/log2.h>
+
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+static void ath10k_htt_txrx_compl_task(unsigned long ptr);
+
+static struct sk_buff *
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+{
+	struct ath10k_skb_rxcb *rxcb;
+
+	hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
+		if (rxcb->paddr == paddr)
+			return ATH10K_RXCB_SKB(rxcb);
+
+	WARN_ON_ONCE(1);
+	return NULL;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+	struct sk_buff *skb;
+	struct ath10k_skb_rxcb *rxcb;
+	struct hlist_node *n;
+	int i;
+
+	if (htt->rx_ring.in_ord_rx) {
+		hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
+			skb = ATH10K_RXCB_SKB(rxcb);
+			dma_unmap_single(htt->ar->dev, rxcb->paddr,
+					 skb->len + skb_tailroom(skb),
+					 DMA_FROM_DEVICE);
+			hash_del(&rxcb->hlist);
+			dev_kfree_skb_any(skb);
+		}
+	} else {
+		for (i = 0; i < htt->rx_ring.size; i++) {
+			skb = htt->rx_ring.netbufs_ring[i];
+			if (!skb)
+				continue;
+
+			rxcb = ATH10K_SKB_RXCB(skb);
+			dma_unmap_single(htt->ar->dev, rxcb->paddr,
+					 skb->len + skb_tailroom(skb),
+					 DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+		}
+	}
+
+	htt->rx_ring.fill_cnt = 0;
+	hash_init(htt->rx_ring.skb_table);
+	memset(htt->rx_ring.netbufs_ring, 0,
+	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+	struct htt_rx_desc *rx_desc;
+	struct ath10k_skb_rxcb *rxcb;
+	struct sk_buff *skb;
+	dma_addr_t paddr;
+	int ret = 0, idx;
+
+	/* The Full Rx Reorder firmware has no way of telling the host
+	 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
+	 * To keep things simple make sure ring is always half empty. This
+	 * guarantees there'll be no replenishment overruns possible.
+	 */
+	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+
+	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+	while (num > 0) {
+		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+		if (!skb) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+			skb_pull(skb,
+				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+				 skb->data);
+
+		/* Clear rx_desc attention word before posting to Rx ring */
+		rx_desc = (struct htt_rx_desc *)skb->data;
+		rx_desc->attention.flags = __cpu_to_le32(0);
+
+		paddr = dma_map_single(htt->ar->dev, skb->data,
+				       skb->len + skb_tailroom(skb),
+				       DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+			dev_kfree_skb_any(skb);
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		rxcb = ATH10K_SKB_RXCB(skb);
+		rxcb->paddr = paddr;
+		htt->rx_ring.netbufs_ring[idx] = skb;
+		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+		htt->rx_ring.fill_cnt++;
+
+		if (htt->rx_ring.in_ord_rx) {
+			hash_add(htt->rx_ring.skb_table,
+				 &ATH10K_SKB_RXCB(skb)->hlist,
+				 (u32)paddr);
+		}
+
+		num--;
+		idx++;
+		idx &= htt->rx_ring.size_mask;
+	}
+
+fail:
+	/*
+	 * Make sure the rx buffer is updated before available buffer
+	 * index to avoid any potential rx ring corruption.
+	 */
+	mb();
+	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
+	return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+	lockdep_assert_held(&htt->rx_ring.lock);
+	return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+	int ret, num_deficit, num_to_fill;
+
+	/* Refilling the whole RX ring buffer proves to be a bad idea. The
+	 * reason is RX may take up significant amount of CPU cycles and starve
+	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
+	 * with ath10k wlan interface. This ended up with very poor performance
+	 * once CPU the host system was overwhelmed with RX on ath10k.
+	 *
+	 * By limiting the number of refills the replenishing occurs
+	 * progressively. This in turns makes use of the fact tasklets are
+	 * processed in FIFO order. This means actual RX processing can starve
+	 * out refilling. If there's not enough buffers on RX ring FW will not
+	 * report RX until it is refilled with enough buffers. This
+	 * automatically balances load wrt to CPU power.
+	 *
+	 * This probably comes at a cost of lower maximum throughput but
+	 * improves the average and stability. */
+	spin_lock_bh(&htt->rx_ring.lock);
+	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+	num_deficit -= num_to_fill;
+	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+	if (ret == -ENOMEM) {
+		/*
+		 * Failed to fill it to the desired level -
+		 * we'll start a timer and try again next time.
+		 * As long as enough buffers are left in the ring for
+		 * another A-MPDU rx, no special recovery is needed.
+		 */
+		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+	} else if (num_deficit > 0) {
+		tasklet_schedule(&htt->rx_replenish_task);
+	}
+	spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
+{
+	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+
+	ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+int ath10k_htt_rx_ring_refill(struct ath10k *ar)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	int ret;
+
+	spin_lock_bh(&htt->rx_ring.lock);
+	ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
+					      htt->rx_ring.fill_cnt));
+	spin_unlock_bh(&htt->rx_ring.lock);
+
+	if (ret)
+		ath10k_htt_rx_ring_free(htt);
+
+	return ret;
+}
+
+void ath10k_htt_rx_free(struct ath10k_htt *htt)
+{
+	del_timer_sync(&htt->rx_ring.refill_retry_timer);
+	tasklet_kill(&htt->rx_replenish_task);
+	tasklet_kill(&htt->txrx_compl_task);
+
+	skb_queue_purge(&htt->tx_compl_q);
+	skb_queue_purge(&htt->rx_compl_q);
+	skb_queue_purge(&htt->rx_in_ord_compl_q);
+
+	ath10k_htt_rx_ring_free(htt);
+
+	dma_free_coherent(htt->ar->dev,
+			  (htt->rx_ring.size *
+			   sizeof(htt->rx_ring.paddrs_ring)),
+			  htt->rx_ring.paddrs_ring,
+			  htt->rx_ring.base_paddr);
+
+	dma_free_coherent(htt->ar->dev,
+			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
+			  htt->rx_ring.alloc_idx.vaddr,
+			  htt->rx_ring.alloc_idx.paddr);
+
+	kfree(htt->rx_ring.netbufs_ring);
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	int idx;
+	struct sk_buff *msdu;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	if (htt->rx_ring.fill_cnt == 0) {
+		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
+		return NULL;
+	}
+
+	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+	msdu = htt->rx_ring.netbufs_ring[idx];
+	htt->rx_ring.netbufs_ring[idx] = NULL;
+	htt->rx_ring.paddrs_ring[idx] = 0;
+
+	idx++;
+	idx &= htt->rx_ring.size_mask;
+	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+	htt->rx_ring.fill_cnt--;
+
+	dma_unmap_single(htt->ar->dev,
+			 ATH10K_SKB_RXCB(msdu)->paddr,
+			 msdu->len + skb_tailroom(msdu),
+			 DMA_FROM_DEVICE);
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+			msdu->data, msdu->len + skb_tailroom(msdu));
+
+	return msdu;
+}
+
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+				   u8 **fw_desc, int *fw_desc_len,
+				   struct sk_buff_head *amsdu)
+{
+	struct ath10k *ar = htt->ar;
+	int msdu_len, msdu_chaining = 0;
+	struct sk_buff *msdu;
+	struct htt_rx_desc *rx_desc;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	for (;;) {
+		int last_msdu, msdu_len_invalid, msdu_chained;
+
+		msdu = ath10k_htt_rx_netbuf_pop(htt);
+		if (!msdu) {
+			__skb_queue_purge(amsdu);
+			return -ENOENT;
+		}
+
+		__skb_queue_tail(amsdu, msdu);
+
+		rx_desc = (struct htt_rx_desc *)msdu->data;
+
+		/* FIXME: we must report msdu payload since this is what caller
+		 *        expects now */
+		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+
+		/*
+		 * Sanity check - confirm the HW is finished filling in the
+		 * rx data.
+		 * If the HW and SW are working correctly, then it's guaranteed
+		 * that the HW's MAC DMA is done before this point in the SW.
+		 * To prevent the case that we handle a stale Rx descriptor,
+		 * just assert for now until we have a way to recover.
+		 */
+		if (!(__le32_to_cpu(rx_desc->attention.flags)
+				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
+			__skb_queue_purge(amsdu);
+			return -EIO;
+		}
+
+		/*
+		 * Copy the FW rx descriptor for this MSDU from the rx
+		 * indication message into the MSDU's netbuf. HL uses the
+		 * same rx indication message definition as LL, and simply
+		 * appends new info (fields from the HW rx desc, and the
+		 * MSDU payload itself). So, the offset into the rx
+		 * indication message only has to account for the standard
+		 * offset of the per-MSDU FW rx desc info within the
+		 * message, and how many bytes of the per-MSDU FW rx desc
+		 * info have already been consumed. (And the endianness of
+		 * the host, since for a big-endian host, the rx ind
+		 * message contents, including the per-MSDU rx desc bytes,
+		 * were byteswapped during upload.)
+		 */
+		if (*fw_desc_len > 0) {
+			rx_desc->fw_desc.info0 = **fw_desc;
+			/*
+			 * The target is expected to only provide the basic
+			 * per-MSDU rx descriptors. Just to be sure, verify
+			 * that the target has not attached extension data
+			 * (e.g. LRO flow ID).
+			 */
+
+			/* or more, if there's extension data */
+			(*fw_desc)++;
+			(*fw_desc_len)--;
+		} else {
+			/*
+			 * When an oversized AMSDU happened, FW will lost
+			 * some of MSDU status - in this case, the FW
+			 * descriptors provided will be less than the
+			 * actual MSDUs inside this MPDU. Mark the FW
+			 * descriptors so that it will still deliver to
+			 * upper stack, if no CRC error for this MPDU.
+			 *
+			 * FIX THIS - the FW descriptors are actually for
+			 * MSDUs in the end of this A-MSDU instead of the
+			 * beginning.
+			 */
+			rx_desc->fw_desc.info0 = 0;
+		}
+
+		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
+			      RX_MSDU_START_INFO0_MSDU_LENGTH);
+		msdu_chained = rx_desc->frag_info.ring2_more_count;
+
+		if (msdu_len_invalid)
+			msdu_len = 0;
+
+		skb_trim(msdu, 0);
+		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+		msdu_len -= msdu->len;
+
+		/* Note: Chained buffers do not contain rx descriptor */
+		while (msdu_chained--) {
+			msdu = ath10k_htt_rx_netbuf_pop(htt);
+			if (!msdu) {
+				__skb_queue_purge(amsdu);
+				return -ENOENT;
+			}
+
+			__skb_queue_tail(amsdu, msdu);
+			skb_trim(msdu, 0);
+			skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
+			msdu_len -= msdu->len;
+			msdu_chaining = 1;
+		}
+
+		last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
+				RX_MSDU_END_INFO0_LAST_MSDU;
+
+		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
+					 sizeof(*rx_desc) - sizeof(u32));
+
+		if (last_msdu)
+			break;
+	}
+
+	if (skb_queue_empty(amsdu))
+		msdu_chaining = -1;
+
+	/*
+	 * Don't refill the ring yet.
+	 *
+	 * First, the elements popped here are still in use - it is not
+	 * safe to overwrite them until the matching call to
+	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
+	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
+	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+	 * (something like 3 buffers). Consequently, we'll rely on the txrx
+	 * SW to tell us when it is done pulling all the PPDU's rx buffers
+	 * out of the rx ring, and then refill it just once.
+	 */
+
+	return msdu_chaining;
+}
+
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+
+	ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
+					       u32 paddr)
+{
+	struct ath10k *ar = htt->ar;
+	struct ath10k_skb_rxcb *rxcb;
+	struct sk_buff *msdu;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
+	if (!msdu)
+		return NULL;
+
+	rxcb = ATH10K_SKB_RXCB(msdu);
+	hash_del(&rxcb->hlist);
+	htt->rx_ring.fill_cnt--;
+
+	dma_unmap_single(htt->ar->dev, rxcb->paddr,
+			 msdu->len + skb_tailroom(msdu),
+			 DMA_FROM_DEVICE);
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+			msdu->data, msdu->len + skb_tailroom(msdu));
+
+	return msdu;
+}
+
+static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
+					struct htt_rx_in_ord_ind *ev,
+					struct sk_buff_head *list)
+{
+	struct ath10k *ar = htt->ar;
+	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+	struct htt_rx_desc *rxd;
+	struct sk_buff *msdu;
+	int msdu_count;
+	bool is_offload;
+	u32 paddr;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	msdu_count = __le16_to_cpu(ev->msdu_count);
+	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+	while (msdu_count--) {
+		paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
+
+		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+		if (!msdu) {
+			__skb_queue_purge(list);
+			return -ENOENT;
+		}
+
+		__skb_queue_tail(list, msdu);
+
+		if (!is_offload) {
+			rxd = (void *)msdu->data;
+
+			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+			skb_put(msdu, sizeof(*rxd));
+			skb_pull(msdu, sizeof(*rxd));
+			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+			if (!(__le32_to_cpu(rxd->attention.flags) &
+			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
+				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+				return -EIO;
+			}
+		}
+
+		msdu_desc++;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	dma_addr_t paddr;
+	void *vaddr;
+	size_t size;
+	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+	htt->rx_confused = false;
+
+	/* XXX: The fill level could be changed during runtime in response to
+	 * the host processing latency. Is this really worth it?
+	 */
+	htt->rx_ring.size = HTT_RX_RING_SIZE;
+	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+	htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+
+	if (!is_power_of_2(htt->rx_ring.size)) {
+		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
+		return -EINVAL;
+	}
+
+	htt->rx_ring.netbufs_ring =
+		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+			GFP_KERNEL);
+	if (!htt->rx_ring.netbufs_ring)
+		goto err_netbuf;
+
+	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+
+	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
+	if (!vaddr)
+		goto err_dma_ring;
+
+	htt->rx_ring.paddrs_ring = vaddr;
+	htt->rx_ring.base_paddr = paddr;
+
+	vaddr = dma_alloc_coherent(htt->ar->dev,
+				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
+				   &paddr, GFP_DMA);
+	if (!vaddr)
+		goto err_dma_idx;
+
+	htt->rx_ring.alloc_idx.vaddr = vaddr;
+	htt->rx_ring.alloc_idx.paddr = paddr;
+	htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
+	*htt->rx_ring.alloc_idx.vaddr = 0;
+
+	/* Initialize the Rx refill retry timer */
+	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
+
+	spin_lock_init(&htt->rx_ring.lock);
+
+	htt->rx_ring.fill_cnt = 0;
+	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+	hash_init(htt->rx_ring.skb_table);
+
+	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+		     (unsigned long)htt);
+
+	skb_queue_head_init(&htt->tx_compl_q);
+	skb_queue_head_init(&htt->rx_compl_q);
+	skb_queue_head_init(&htt->rx_in_ord_compl_q);
+
+	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
+		     (unsigned long)htt);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
+		   htt->rx_ring.size, htt->rx_ring.fill_level);
+	return 0;
+
+err_dma_idx:
+	dma_free_coherent(htt->ar->dev,
+			  (htt->rx_ring.size *
+			   sizeof(htt->rx_ring.paddrs_ring)),
+			  htt->rx_ring.paddrs_ring,
+			  htt->rx_ring.base_paddr);
+err_dma_ring:
+	kfree(htt->rx_ring.netbufs_ring);
+err_netbuf:
+	return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
+					  enum htt_rx_mpdu_encrypt_type type)
+{
+	switch (type) {
+	case HTT_RX_MPDU_ENCRYPT_NONE:
+		return 0;
+	case HTT_RX_MPDU_ENCRYPT_WEP40:
+	case HTT_RX_MPDU_ENCRYPT_WEP104:
+		return IEEE80211_WEP_IV_LEN;
+	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+		return IEEE80211_TKIP_IV_LEN;
+	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+		return IEEE80211_CCMP_HDR_LEN;
+	case HTT_RX_MPDU_ENCRYPT_WEP128:
+	case HTT_RX_MPDU_ENCRYPT_WAPI:
+		break;
+	}
+
+	ath10k_warn(ar, "unsupported encryption type %d\n", type);
+	return 0;
+}
+
+#define MICHAEL_MIC_LEN 8
+
+static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
+					 enum htt_rx_mpdu_encrypt_type type)
+{
+	switch (type) {
+	case HTT_RX_MPDU_ENCRYPT_NONE:
+		return 0;
+	case HTT_RX_MPDU_ENCRYPT_WEP40:
+	case HTT_RX_MPDU_ENCRYPT_WEP104:
+		return IEEE80211_WEP_ICV_LEN;
+	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+		return IEEE80211_TKIP_ICV_LEN;
+	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+		return IEEE80211_CCMP_MIC_LEN;
+	case HTT_RX_MPDU_ENCRYPT_WEP128:
+	case HTT_RX_MPDU_ENCRYPT_WAPI:
+		break;
+	}
+
+	ath10k_warn(ar, "unsupported encryption type %d\n", type);
+	return 0;
+}
+
+struct amsdu_subframe_hdr {
+	u8 dst[ETH_ALEN];
+	u8 src[ETH_ALEN];
+	__be16 len;
+} __packed;
+
+#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+				  struct ieee80211_rx_status *status,
+				  struct htt_rx_desc *rxd)
+{
+	struct ieee80211_supported_band *sband;
+	u8 cck, rate, bw, sgi, mcs, nss;
+	u8 preamble = 0;
+	u8 group_id;
+	u32 info1, info2, info3;
+
+	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+	info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+
+	preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
+
+	switch (preamble) {
+	case HTT_RX_LEGACY:
+		/* To get legacy rate index band is required. Since band can't
+		 * be undefined check if freq is non-zero.
+		 */
+		if (!status->freq)
+			return;
+
+		cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
+		rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
+		rate &= ~RX_PPDU_START_RATE_FLAG;
+
+		sband = &ar->mac.sbands[status->band];
+		status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
+		break;
+	case HTT_RX_HT:
+	case HTT_RX_HT_WITH_TXBF:
+		/* HT-SIG - Table 20-11 in info2 and info3 */
+		mcs = info2 & 0x1F;
+		nss = mcs >> 3;
+		bw = (info2 >> 7) & 1;
+		sgi = (info3 >> 7) & 1;
+
+		status->rate_idx = mcs;
+		status->flag |= RX_FLAG_HT;
+		if (sgi)
+			status->flag |= RX_FLAG_SHORT_GI;
+		if (bw)
+			status->flag |= RX_FLAG_40MHZ;
+		break;
+	case HTT_RX_VHT:
+	case HTT_RX_VHT_WITH_TXBF:
+		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
+		   TODO check this */
+		bw = info2 & 3;
+		sgi = info3 & 1;
+		group_id = (info2 >> 4) & 0x3F;
+
+		if (GROUP_ID_IS_SU_MIMO(group_id)) {
+			mcs = (info3 >> 4) & 0x0F;
+			nss = ((info2 >> 10) & 0x07) + 1;
+		} else {
+			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
+			 * so it's impossible to decode MCS. Also since
+			 * firmware consumes Group Id Management frames host
+			 * has no knowledge regarding group/user position
+			 * mapping so it's impossible to pick the correct Nsts
+			 * from VHT-SIG-A1.
+			 *
+			 * Bandwidth and SGI are valid so report the rateinfo
+			 * on best-effort basis.
+			 */
+			mcs = 0;
+			nss = 1;
+		}
+
+		if (mcs > 0x09) {
+			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
+			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
+				    __le32_to_cpu(rxd->attention.flags),
+				    __le32_to_cpu(rxd->mpdu_start.info0),
+				    __le32_to_cpu(rxd->mpdu_start.info1),
+				    __le32_to_cpu(rxd->msdu_start.common.info0),
+				    __le32_to_cpu(rxd->msdu_start.common.info1),
+				    rxd->ppdu_start.info0,
+				    __le32_to_cpu(rxd->ppdu_start.info1),
+				    __le32_to_cpu(rxd->ppdu_start.info2),
+				    __le32_to_cpu(rxd->ppdu_start.info3),
+				    __le32_to_cpu(rxd->ppdu_start.info4));
+
+			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
+				    __le32_to_cpu(rxd->msdu_end.common.info0),
+				    __le32_to_cpu(rxd->mpdu_end.info0));
+
+			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
+					"rx desc msdu payload: ",
+					rxd->msdu_payload, 50);
+		}
+
+		status->rate_idx = mcs;
+		status->vht_nss = nss;
+
+		if (sgi)
+			status->flag |= RX_FLAG_SHORT_GI;
+
+		switch (bw) {
+		/* 20MHZ */
+		case 0:
+			break;
+		/* 40MHZ */
+		case 1:
+			status->flag |= RX_FLAG_40MHZ;
+			break;
+		/* 80MHZ */
+		case 2:
+			status->vht_flag |= RX_VHT_FLAG_80MHZ;
+		}
+
+		status->flag |= RX_FLAG_VHT;
+		break;
+	default:
+		break;
+	}
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	struct cfg80211_chan_def def;
+	u16 peer_id;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (!rxd)
+		return NULL;
+
+	if (rxd->attention.flags &
+	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+		return NULL;
+
+	if (!(rxd->msdu_end.common.info0 &
+	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+		return NULL;
+
+	peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+		     RX_MPDU_START_INFO0_PEER_IDX);
+
+	peer = ath10k_peer_find_by_id(ar, peer_id);
+	if (!peer)
+		return NULL;
+
+	arvif = ath10k_get_arvif(ar, peer->vdev_id);
+	if (WARN_ON_ONCE(!arvif))
+		return NULL;
+
+	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+		return NULL;
+
+	return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+	struct ath10k_vif *arvif;
+	struct cfg80211_chan_def def;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (arvif->vdev_id == vdev_id &&
+		    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+			return def.chan;
+	}
+
+	return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+			      struct ieee80211_chanctx_conf *conf,
+			      void *data)
+{
+	struct cfg80211_chan_def *def = data;
+
+	*def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+	struct cfg80211_chan_def def = {};
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_htt_rx_h_any_chan_iter,
+					    &def);
+
+	return def.chan;
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+				    struct ieee80211_rx_status *status,
+				    struct htt_rx_desc *rxd,
+				    u32 vdev_id)
+{
+	struct ieee80211_channel *ch;
+
+	spin_lock_bh(&ar->data_lock);
+	ch = ar->scan_channel;
+	if (!ch)
+		ch = ar->rx_channel;
+	if (!ch)
+		ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+	if (!ch)
+		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+	if (!ch)
+		ch = ath10k_htt_rx_h_any_channel(ar);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!ch)
+		return false;
+
+	status->band = ch->band;
+	status->freq = ch->center_freq;
+
+	return true;
+}
+
+static void ath10k_htt_rx_h_signal(struct ath10k *ar,
+				   struct ieee80211_rx_status *status,
+				   struct htt_rx_desc *rxd)
+{
+	/* FIXME: Get real NF */
+	status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+			 rxd->ppdu_start.rssi_comb;
+	status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+}
+
+static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
+				    struct ieee80211_rx_status *status,
+				    struct htt_rx_desc *rxd)
+{
+	/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
+	 * means all prior MSDUs in a PPDU are reported to mac80211 without the
+	 * TSF. Is it worth holding frames until end of PPDU is known?
+	 *
+	 * FIXME: Can we get/compute 64bit TSF?
+	 */
+	status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+	status->flag |= RX_FLAG_MACTIME_END;
+}
+
+static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
+				 struct sk_buff_head *amsdu,
+				 struct ieee80211_rx_status *status,
+				 u32 vdev_id)
+{
+	struct sk_buff *first;
+	struct htt_rx_desc *rxd;
+	bool is_first_ppdu;
+	bool is_last_ppdu;
+
+	if (skb_queue_empty(amsdu))
+		return;
+
+	first = skb_peek(amsdu);
+	rxd = (void *)first->data - sizeof(*rxd);
+
+	is_first_ppdu = !!(rxd->attention.flags &
+			   __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
+	is_last_ppdu = !!(rxd->attention.flags &
+			  __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
+
+	if (is_first_ppdu) {
+		/* New PPDU starts so clear out the old per-PPDU status. */
+		status->freq = 0;
+		status->rate_idx = 0;
+		status->vht_nss = 0;
+		status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
+		status->flag &= ~(RX_FLAG_HT |
+				  RX_FLAG_VHT |
+				  RX_FLAG_SHORT_GI |
+				  RX_FLAG_40MHZ |
+				  RX_FLAG_MACTIME_END);
+		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+		ath10k_htt_rx_h_signal(ar, status, rxd);
+		ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
+		ath10k_htt_rx_h_rates(ar, status, rxd);
+	}
+
+	if (is_last_ppdu)
+		ath10k_htt_rx_h_mactime(ar, status, rxd);
+}
+
+static const char * const tid_to_ac[] = {
+	"BE",
+	"BK",
+	"BK",
+	"BE",
+	"VI",
+	"VI",
+	"VO",
+	"VO",
+};
+
+static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
+{
+	u8 *qc;
+	int tid;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return "";
+
+	qc = ieee80211_get_qos_ctl(hdr);
+	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+	if (tid < 8)
+		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
+	else
+		snprintf(out, size, "tid %d", tid);
+
+	return out;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+			      struct ieee80211_rx_status *rx_status,
+			      struct sk_buff *skb)
+{
+	struct ieee80211_rx_status *status;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	char tid[32];
+
+	status = IEEE80211_SKB_RXCB(skb);
+	*status = *rx_status;
+
+	ath10k_dbg(ar, ATH10K_DBG_DATA,
+		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+		   skb,
+		   skb->len,
+		   ieee80211_get_SA(hdr),
+		   ath10k_get_tid(hdr, tid, sizeof(tid)),
+		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+							"mcast" : "ucast",
+		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+		   status->flag == 0 ? "legacy" : "",
+		   status->flag & RX_FLAG_HT ? "ht" : "",
+		   status->flag & RX_FLAG_VHT ? "vht" : "",
+		   status->flag & RX_FLAG_40MHZ ? "40" : "",
+		   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+		   status->rate_idx,
+		   status->vht_nss,
+		   status->freq,
+		   status->band, status->flag,
+		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+		   !!(status->flag & RX_FLAG_MMIC_ERROR),
+		   !!(status->flag & RX_FLAG_AMSDU_MORE));
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+			skb->data, skb->len);
+	trace_ath10k_rx_hdr(ar, skb->data, skb->len);
+	trace_ath10k_rx_payload(ar, skb->data, skb->len);
+
+	ieee80211_rx(ar->hw, skb);
+}
+
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+				      struct ieee80211_hdr *hdr)
+{
+	int len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+		      ar->fw_features))
+		len = round_up(len, 4);
+
+	return len;
+}
+
+static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
+					struct sk_buff *msdu,
+					struct ieee80211_rx_status *status,
+					enum htt_rx_mpdu_encrypt_type enctype,
+					bool is_decrypted)
+{
+	struct ieee80211_hdr *hdr;
+	struct htt_rx_desc *rxd;
+	size_t hdr_len;
+	size_t crypto_len;
+	bool is_first;
+	bool is_last;
+
+	rxd = (void *)msdu->data - sizeof(*rxd);
+	is_first = !!(rxd->msdu_end.common.info0 &
+		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+	is_last = !!(rxd->msdu_end.common.info0 &
+		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+	/* Delivered decapped frame:
+	 * [802.11 header]
+	 * [crypto param] <-- can be trimmed if !fcs_err &&
+	 *                    !decrypt_err && !peer_idx_invalid
+	 * [amsdu header] <-- only if A-MSDU
+	 * [rfc1042/llc]
+	 * [payload]
+	 * [FCS] <-- at end, needs to be trimmed
+	 */
+
+	/* This probably shouldn't happen but warn just in case */
+	if (unlikely(WARN_ON_ONCE(!is_first)))
+		return;
+
+	/* This probably shouldn't happen but warn just in case */
+	if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+		return;
+
+	skb_trim(msdu, msdu->len - FCS_LEN);
+
+	/* In most cases this will be true for sniffed frames. It makes sense
+	 * to deliver them as-is without stripping the crypto param. This is
+	 * necessary for software based decryption.
+	 *
+	 * If there's no error then the frame is decrypted. At least that is
+	 * the case for frames that come in via fragmented rx indication.
+	 */
+	if (!is_decrypted)
+		return;
+
+	/* The payload is decrypted so strip crypto params. Start from tail
+	 * since hdr is used to compute some stuff.
+	 */
+
+	hdr = (void *)msdu->data;
+
+	/* Tail */
+	skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+
+	/* MMIC */
+	if (!ieee80211_has_morefrags(hdr->frame_control) &&
+	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+		skb_trim(msdu, msdu->len - 8);
+
+	/* Head */
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+	memmove((void *)msdu->data + crypto_len,
+		(void *)msdu->data, hdr_len);
+	skb_pull(msdu, crypto_len);
+}
+
+static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
+					  struct sk_buff *msdu,
+					  struct ieee80211_rx_status *status,
+					  const u8 first_hdr[64])
+{
+	struct ieee80211_hdr *hdr;
+	size_t hdr_len;
+	u8 da[ETH_ALEN];
+	u8 sa[ETH_ALEN];
+
+	/* Delivered decapped frame:
+	 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
+	 * [rfc1042/llc]
+	 *
+	 * Note: The nwifi header doesn't have QoS Control and is
+	 * (always?) a 3addr frame.
+	 *
+	 * Note2: There's no A-MSDU subframe header. Even if it's part
+	 * of an A-MSDU.
+	 */
+
+	/* pull decapped header and copy SA & DA */
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
+	ether_addr_copy(da, ieee80211_get_DA(hdr));
+	ether_addr_copy(sa, ieee80211_get_SA(hdr));
+	skb_pull(msdu, hdr_len);
+
+	/* push original 802.11 header */
+	hdr = (struct ieee80211_hdr *)first_hdr;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+	/* original 802.11 header has a different DA and in
+	 * case of 4addr it may also have different SA
+	 */
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	ether_addr_copy(ieee80211_get_DA(hdr), da);
+	ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
+					  struct sk_buff *msdu,
+					  enum htt_rx_mpdu_encrypt_type enctype)
+{
+	struct ieee80211_hdr *hdr;
+	struct htt_rx_desc *rxd;
+	size_t hdr_len, crypto_len;
+	void *rfc1042;
+	bool is_first, is_last, is_amsdu;
+
+	rxd = (void *)msdu->data - sizeof(*rxd);
+	hdr = (void *)rxd->rx_hdr_status;
+
+	is_first = !!(rxd->msdu_end.common.info0 &
+		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+	is_last = !!(rxd->msdu_end.common.info0 &
+		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+	is_amsdu = !(is_first && is_last);
+
+	rfc1042 = hdr;
+
+	if (is_first) {
+		hdr_len = ieee80211_hdrlen(hdr->frame_control);
+		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+		rfc1042 += round_up(hdr_len, 4) +
+			   round_up(crypto_len, 4);
+	}
+
+	if (is_amsdu)
+		rfc1042 += sizeof(struct amsdu_subframe_hdr);
+
+	return rfc1042;
+}
+
+static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
+					struct sk_buff *msdu,
+					struct ieee80211_rx_status *status,
+					const u8 first_hdr[64],
+					enum htt_rx_mpdu_encrypt_type enctype)
+{
+	struct ieee80211_hdr *hdr;
+	struct ethhdr *eth;
+	size_t hdr_len;
+	void *rfc1042;
+	u8 da[ETH_ALEN];
+	u8 sa[ETH_ALEN];
+
+	/* Delivered decapped frame:
+	 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
+	 * [payload]
+	 */
+
+	rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
+	if (WARN_ON_ONCE(!rfc1042))
+		return;
+
+	/* pull decapped header and copy SA & DA */
+	eth = (struct ethhdr *)msdu->data;
+	ether_addr_copy(da, eth->h_dest);
+	ether_addr_copy(sa, eth->h_source);
+	skb_pull(msdu, sizeof(struct ethhdr));
+
+	/* push rfc1042/llc/snap */
+	memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
+	       sizeof(struct rfc1042_hdr));
+
+	/* push original 802.11 header */
+	hdr = (struct ieee80211_hdr *)first_hdr;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+	/* original 802.11 header has a different DA and in
+	 * case of 4addr it may also have different SA
+	 */
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	ether_addr_copy(ieee80211_get_DA(hdr), da);
+	ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
+					 struct sk_buff *msdu,
+					 struct ieee80211_rx_status *status,
+					 const u8 first_hdr[64])
+{
+	struct ieee80211_hdr *hdr;
+	size_t hdr_len;
+
+	/* Delivered decapped frame:
+	 * [amsdu header] <-- replaced with 802.11 hdr
+	 * [rfc1042/llc]
+	 * [payload]
+	 */
+
+	skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+
+	hdr = (struct ieee80211_hdr *)first_hdr;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+}
+
+static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
+				    struct sk_buff *msdu,
+				    struct ieee80211_rx_status *status,
+				    u8 first_hdr[64],
+				    enum htt_rx_mpdu_encrypt_type enctype,
+				    bool is_decrypted)
+{
+	struct htt_rx_desc *rxd;
+	enum rx_msdu_decap_format decap;
+
+	/* First msdu's decapped header:
+	 * [802.11 header] <-- padded to 4 bytes long
+	 * [crypto param] <-- padded to 4 bytes long
+	 * [amsdu header] <-- only if A-MSDU
+	 * [rfc1042/llc]
+	 *
+	 * Other (2nd, 3rd, ..) msdu's decapped header:
+	 * [amsdu header] <-- only if A-MSDU
+	 * [rfc1042/llc]
+	 */
+
+	rxd = (void *)msdu->data - sizeof(*rxd);
+	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+		   RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+	switch (decap) {
+	case RX_MSDU_DECAP_RAW:
+		ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
+					    is_decrypted);
+		break;
+	case RX_MSDU_DECAP_NATIVE_WIFI:
+		ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
+		break;
+	case RX_MSDU_DECAP_ETHERNET2_DIX:
+		ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
+		break;
+	case RX_MSDU_DECAP_8023_SNAP_LLC:
+		ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
+		break;
+	}
+}
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+{
+	struct htt_rx_desc *rxd;
+	u32 flags, info;
+	bool is_ip4, is_ip6;
+	bool is_tcp, is_udp;
+	bool ip_csum_ok, tcpudp_csum_ok;
+
+	rxd = (void *)skb->data - sizeof(*rxd);
+	flags = __le32_to_cpu(rxd->attention.flags);
+	info = __le32_to_cpu(rxd->msdu_start.common.info1);
+
+	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+	if (!is_ip4 && !is_ip6)
+		return CHECKSUM_NONE;
+	if (!is_tcp && !is_udp)
+		return CHECKSUM_NONE;
+	if (!ip_csum_ok)
+		return CHECKSUM_NONE;
+	if (!tcpudp_csum_ok)
+		return CHECKSUM_NONE;
+
+	return CHECKSUM_UNNECESSARY;
+}
+
+static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+{
+	msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+}
+
+static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+				 struct sk_buff_head *amsdu,
+				 struct ieee80211_rx_status *status)
+{
+	struct sk_buff *first;
+	struct sk_buff *last;
+	struct sk_buff *msdu;
+	struct htt_rx_desc *rxd;
+	struct ieee80211_hdr *hdr;
+	enum htt_rx_mpdu_encrypt_type enctype;
+	u8 first_hdr[64];
+	u8 *qos;
+	size_t hdr_len;
+	bool has_fcs_err;
+	bool has_crypto_err;
+	bool has_tkip_err;
+	bool has_peer_idx_invalid;
+	bool is_decrypted;
+	u32 attention;
+
+	if (skb_queue_empty(amsdu))
+		return;
+
+	first = skb_peek(amsdu);
+	rxd = (void *)first->data - sizeof(*rxd);
+
+	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+	/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
+	 * decapped header. It'll be used for undecapping of each MSDU.
+	 */
+	hdr = (void *)rxd->rx_hdr_status;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+	memcpy(first_hdr, hdr, hdr_len);
+
+	/* Each A-MSDU subframe will use the original header as the base and be
+	 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+	 */
+	hdr = (void *)first_hdr;
+	qos = ieee80211_get_qos_ctl(hdr);
+	qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+	/* Some attention flags are valid only in the last MSDU. */
+	last = skb_peek_tail(amsdu);
+	rxd = (void *)last->data - sizeof(*rxd);
+	attention = __le32_to_cpu(rxd->attention.flags);
+
+	has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
+	has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
+	has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+	has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
+
+	/* Note: If hardware captures an encrypted frame that it can't decrypt,
+	 * e.g. due to fcs error, missing peer or invalid key data it will
+	 * report the frame as raw.
+	 */
+	is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
+			!has_fcs_err &&
+			!has_crypto_err &&
+			!has_peer_idx_invalid);
+
+	/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+	status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+			  RX_FLAG_MMIC_ERROR |
+			  RX_FLAG_DECRYPTED |
+			  RX_FLAG_IV_STRIPPED |
+			  RX_FLAG_MMIC_STRIPPED);
+
+	if (has_fcs_err)
+		status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (has_tkip_err)
+		status->flag |= RX_FLAG_MMIC_ERROR;
+
+	if (is_decrypted)
+		status->flag |= RX_FLAG_DECRYPTED |
+				RX_FLAG_IV_STRIPPED |
+				RX_FLAG_MMIC_STRIPPED;
+
+	skb_queue_walk(amsdu, msdu) {
+		ath10k_htt_rx_h_csum_offload(msdu);
+		ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+					is_decrypted);
+
+		/* Undecapping involves copying the original 802.11 header back
+		 * to sk_buff. If frame is protected and hardware has decrypted
+		 * it then remove the protected bit.
+		 */
+		if (!is_decrypted)
+			continue;
+
+		hdr = (void *)msdu->data;
+		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+	}
+}
+
+static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+				    struct sk_buff_head *amsdu,
+				    struct ieee80211_rx_status *status)
+{
+	struct sk_buff *msdu;
+
+	while ((msdu = __skb_dequeue(amsdu))) {
+		/* Setup per-MSDU flags */
+		if (skb_queue_empty(amsdu))
+			status->flag &= ~RX_FLAG_AMSDU_MORE;
+		else
+			status->flag |= RX_FLAG_AMSDU_MORE;
+
+		ath10k_process_rx(ar, status, msdu);
+	}
+}
+
+static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
+{
+	struct sk_buff *skb, *first;
+	int space;
+	int total_len = 0;
+
+	/* TODO:  Might could optimize this by using
+	 * skb_try_coalesce or similar method to
+	 * decrease copying, or maybe get mac80211 to
+	 * provide a way to just receive a list of
+	 * skb?
+	 */
+
+	first = __skb_dequeue(amsdu);
+
+	/* Allocate total length all at once. */
+	skb_queue_walk(amsdu, skb)
+		total_len += skb->len;
+
+	space = total_len - skb_tailroom(first);
+	if ((space > 0) &&
+	    (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
+		/* TODO:  bump some rx-oom error stat */
+		/* put it back together so we can free the
+		 * whole list at once.
+		 */
+		__skb_queue_head(amsdu, first);
+		return -1;
+	}
+
+	/* Walk list again, copying contents into
+	 * msdu_head
+	 */
+	while ((skb = __skb_dequeue(amsdu))) {
+		skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+					  skb->len);
+		dev_kfree_skb_any(skb);
+	}
+
+	__skb_queue_head(amsdu, first);
+	return 0;
+}
+
+static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+				    struct sk_buff_head *amsdu,
+				    bool chained)
+{
+	struct sk_buff *first;
+	struct htt_rx_desc *rxd;
+	enum rx_msdu_decap_format decap;
+
+	first = skb_peek(amsdu);
+	rxd = (void *)first->data - sizeof(*rxd);
+	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+		   RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+	if (!chained)
+		return;
+
+	/* FIXME: Current unchaining logic can only handle simple case of raw
+	 * msdu chaining. If decapping is other than raw the chaining may be
+	 * more complex and this isn't handled by the current code. Don't even
+	 * try re-constructing such frames - it'll be pretty much garbage.
+	 */
+	if (decap != RX_MSDU_DECAP_RAW ||
+	    skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+		__skb_queue_purge(amsdu);
+		return;
+	}
+
+	ath10k_unchain_msdu(amsdu);
+}
+
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+					struct sk_buff_head *amsdu,
+					struct ieee80211_rx_status *rx_status)
+{
+	struct sk_buff *msdu;
+	struct htt_rx_desc *rxd;
+	bool is_mgmt;
+	bool has_fcs_err;
+
+	msdu = skb_peek(amsdu);
+	rxd = (void *)msdu->data - sizeof(*rxd);
+
+	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
+	 * invalid/dangerous frames.
+	 */
+
+	if (!rx_status->freq) {
+		ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
+		return false;
+	}
+
+	is_mgmt = !!(rxd->attention.flags &
+		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+	has_fcs_err = !!(rxd->attention.flags &
+			 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
+
+	/* Management frames are handled via WMI events. The pros of such
+	 * approach is that channel is explicitly provided in WMI events
+	 * whereas HTT doesn't provide channel information for Rxed frames.
+	 *
+	 * However some firmware revisions don't report corrupted frames via
+	 * WMI so don't drop them.
+	 */
+	if (is_mgmt && !has_fcs_err) {
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+		return false;
+	}
+
+	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
+		return false;
+	}
+
+	return true;
+}
+
+static void ath10k_htt_rx_h_filter(struct ath10k *ar,
+				   struct sk_buff_head *amsdu,
+				   struct ieee80211_rx_status *rx_status)
+{
+	if (skb_queue_empty(amsdu))
+		return;
+
+	if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
+		return;
+
+	__skb_queue_purge(amsdu);
+}
+
+static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
+				  struct htt_rx_indication *rx)
+{
+	struct ath10k *ar = htt->ar;
+	struct ieee80211_rx_status *rx_status = &htt->rx_status;
+	struct htt_rx_indication_mpdu_range *mpdu_ranges;
+	struct sk_buff_head amsdu;
+	int num_mpdu_ranges;
+	int fw_desc_len;
+	u8 *fw_desc;
+	int i, ret, mpdu_count = 0;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	if (htt->rx_confused)
+		return;
+
+	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
+	fw_desc = (u8 *)&rx->fw_desc;
+
+	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+			rx, sizeof(*rx) +
+			(sizeof(struct htt_rx_indication_mpdu_range) *
+				num_mpdu_ranges));
+
+	for (i = 0; i < num_mpdu_ranges; i++)
+		mpdu_count += mpdu_ranges[i].mpdu_count;
+
+	while (mpdu_count--) {
+		__skb_queue_head_init(&amsdu);
+		ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
+					      &fw_desc_len, &amsdu);
+		if (ret < 0) {
+			ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+			__skb_queue_purge(&amsdu);
+			/* FIXME: It's probably a good idea to reboot the
+			 * device instead of leaving it inoperable.
+			 */
+			htt->rx_confused = true;
+			break;
+		}
+
+		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
+		ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+		ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+	}
+
+	tasklet_schedule(&htt->rx_replenish_task);
+}
+
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
+				       struct htt_rx_fragment_indication *frag)
+{
+	struct ath10k *ar = htt->ar;
+	struct ieee80211_rx_status *rx_status = &htt->rx_status;
+	struct sk_buff_head amsdu;
+	int ret;
+	u8 *fw_desc;
+	int fw_desc_len;
+
+	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
+	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
+
+	__skb_queue_head_init(&amsdu);
+
+	spin_lock_bh(&htt->rx_ring.lock);
+	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+				      &amsdu);
+	spin_unlock_bh(&htt->rx_ring.lock);
+
+	tasklet_schedule(&htt->rx_replenish_task);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+
+	if (ret) {
+		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+			    ret);
+		__skb_queue_purge(&amsdu);
+		return;
+	}
+
+	if (skb_queue_len(&amsdu) != 1) {
+		ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
+		__skb_queue_purge(&amsdu);
+		return;
+	}
+
+	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
+	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+
+	if (fw_desc_len > 0) {
+		ath10k_dbg(ar, ATH10K_DBG_HTT,
+			   "expecting more fragmented rx in one indication %d\n",
+			   fw_desc_len);
+	}
+}
+
+static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+				       struct sk_buff *skb)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	struct htt_resp *resp = (struct htt_resp *)skb->data;
+	struct htt_tx_done tx_done = {};
+	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+	__le16 msdu_id;
+	int i;
+
+	switch (status) {
+	case HTT_DATA_TX_STATUS_NO_ACK:
+		tx_done.no_ack = true;
+		break;
+	case HTT_DATA_TX_STATUS_OK:
+		tx_done.success = true;
+		break;
+	case HTT_DATA_TX_STATUS_DISCARD:
+	case HTT_DATA_TX_STATUS_POSTPONE:
+	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+		tx_done.discard = true;
+		break;
+	default:
+		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
+		tx_done.discard = true;
+		break;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+		   resp->data_tx_completion.num_msdus);
+
+	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+		msdu_id = resp->data_tx_completion.msdus[i];
+		tx_done.msdu_id = __le16_to_cpu(msdu_id);
+		ath10k_txrx_tx_unref(htt, &tx_done);
+	}
+}
+
+static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
+{
+	struct htt_rx_addba *ev = &resp->rx_addba;
+	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	u16 info0, tid, peer_id;
+
+	info0 = __le16_to_cpu(ev->info0);
+	tid = MS(info0, HTT_RX_BA_INFO0_TID);
+	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
+		   tid, peer_id, ev->window_size);
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find_by_id(ar, peer_id);
+	if (!peer) {
+		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+			    peer_id);
+		spin_unlock_bh(&ar->data_lock);
+		return;
+	}
+
+	arvif = ath10k_get_arvif(ar, peer->vdev_id);
+	if (!arvif) {
+		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+			    peer->vdev_id);
+		spin_unlock_bh(&ar->data_lock);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
+		   peer->addr, tid, ev->window_size);
+
+	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
+{
+	struct htt_rx_delba *ev = &resp->rx_delba;
+	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	u16 info0, tid, peer_id;
+
+	info0 = __le16_to_cpu(ev->info0);
+	tid = MS(info0, HTT_RX_BA_INFO0_TID);
+	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx delba tid %hu peer_id %hu\n",
+		   tid, peer_id);
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find_by_id(ar, peer_id);
+	if (!peer) {
+		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+			    peer_id);
+		spin_unlock_bh(&ar->data_lock);
+		return;
+	}
+
+	arvif = ath10k_get_arvif(ar, peer->vdev_id);
+	if (!arvif) {
+		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+			    peer->vdev_id);
+		spin_unlock_bh(&ar->data_lock);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx stop rx ba session sta %pM tid %hu\n",
+		   peer->addr, tid);
+
+	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+				       struct sk_buff_head *amsdu)
+{
+	struct sk_buff *msdu;
+	struct htt_rx_desc *rxd;
+
+	if (skb_queue_empty(list))
+		return -ENOBUFS;
+
+	if (WARN_ON(!skb_queue_empty(amsdu)))
+		return -EINVAL;
+
+	while ((msdu = __skb_dequeue(list))) {
+		__skb_queue_tail(amsdu, msdu);
+
+		rxd = (void *)msdu->data - sizeof(*rxd);
+		if (rxd->msdu_end.common.info0 &
+		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
+			break;
+	}
+
+	msdu = skb_peek_tail(amsdu);
+	rxd = (void *)msdu->data - sizeof(*rxd);
+	if (!(rxd->msdu_end.common.info0 &
+	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
+		skb_queue_splice_init(amsdu, list);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
+					    struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	if (!ieee80211_has_protected(hdr->frame_control))
+		return;
+
+	/* Offloaded frames are already decrypted but firmware insists they are
+	 * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
+	 * will drop the frame.
+	 */
+
+	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+	status->flag |= RX_FLAG_DECRYPTED |
+			RX_FLAG_IV_STRIPPED |
+			RX_FLAG_MMIC_STRIPPED;
+}
+
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+				       struct sk_buff_head *list)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	struct ieee80211_rx_status *status = &htt->rx_status;
+	struct htt_rx_offload_msdu *rx;
+	struct sk_buff *msdu;
+	size_t offset;
+
+	while ((msdu = __skb_dequeue(list))) {
+		/* Offloaded frames don't have Rx descriptor. Instead they have
+		 * a short meta information header.
+		 */
+
+		rx = (void *)msdu->data;
+
+		skb_put(msdu, sizeof(*rx));
+		skb_pull(msdu, sizeof(*rx));
+
+		if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
+			ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
+			dev_kfree_skb_any(msdu);
+			continue;
+		}
+
+		skb_put(msdu, __le16_to_cpu(rx->msdu_len));
+
+		/* Offloaded rx header length isn't multiple of 2 nor 4 so the
+		 * actual payload is unaligned. Align the frame.  Otherwise
+		 * mac80211 complains.  This shouldn't reduce performance much
+		 * because these offloaded frames are rare.
+		 */
+		offset = 4 - ((unsigned long)msdu->data & 3);
+		skb_put(msdu, offset);
+		memmove(msdu->data + offset, msdu->data, msdu->len);
+		skb_pull(msdu, offset);
+
+		/* FIXME: The frame is NWifi. Re-construct QoS Control
+		 * if possible later.
+		 */
+
+		memset(status, 0, sizeof(*status));
+		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
+		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
+		ath10k_process_rx(ar, status, msdu);
+	}
+}
+
+static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	struct htt_resp *resp = (void *)skb->data;
+	struct ieee80211_rx_status *status = &htt->rx_status;
+	struct sk_buff_head list;
+	struct sk_buff_head amsdu;
+	u16 peer_id;
+	u16 msdu_count;
+	u8 vdev_id;
+	u8 tid;
+	bool offload;
+	bool frag;
+	int ret;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	if (htt->rx_confused)
+		return;
+
+	skb_pull(skb, sizeof(resp->hdr));
+	skb_pull(skb, sizeof(resp->rx_in_ord_ind));
+
+	peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
+	msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
+	vdev_id = resp->rx_in_ord_ind.vdev_id;
+	tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
+	offload = !!(resp->rx_in_ord_ind.info &
+			HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+	frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
+		   vdev_id, peer_id, tid, offload, frag, msdu_count);
+
+	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+		ath10k_warn(ar, "dropping invalid in order rx indication\n");
+		return;
+	}
+
+	/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
+	 * extracted and processed.
+	 */
+	__skb_queue_head_init(&list);
+	ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+	if (ret < 0) {
+		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
+		htt->rx_confused = true;
+		return;
+	}
+
+	/* Offloaded frames are very different and need to be handled
+	 * separately.
+	 */
+	if (offload)
+		ath10k_htt_rx_h_rx_offload(ar, &list);
+
+	while (!skb_queue_empty(&list)) {
+		__skb_queue_head_init(&amsdu);
+		ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+		switch (ret) {
+		case 0:
+			/* Note: The in-order indication may report interleaved
+			 * frames from different PPDUs meaning reported rx rate
+			 * to mac80211 isn't accurate/reliable. It's still
+			 * better to report something than nothing though. This
+			 * should still give an idea about rx rate to the user.
+			 */
+			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
+			ath10k_htt_rx_h_filter(ar, &amsdu, status);
+			ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+			ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+			break;
+		case -EAGAIN:
+			/* fall through */
+		default:
+			/* Should not happen. */
+			ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
+			htt->rx_confused = true;
+			__skb_queue_purge(&list);
+			return;
+		}
+	}
+
+	tasklet_schedule(&htt->rx_replenish_task);
+}
+
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	struct htt_resp *resp = (struct htt_resp *)skb->data;
+	enum htt_t2h_msg_type type;
+
+	/* confirm alignment */
+	if (!IS_ALIGNED((unsigned long)skb->data, 4))
+		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
+		   resp->hdr.msg_type);
+
+	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+	switch (type) {
+	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+		htt->target_version_major = resp->ver_resp.major;
+		htt->target_version_minor = resp->ver_resp.minor;
+		complete(&htt->target_version_received);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_IND:
+		spin_lock_bh(&htt->rx_ring.lock);
+		__skb_queue_tail(&htt->rx_compl_q, skb);
+		spin_unlock_bh(&htt->rx_ring.lock);
+		tasklet_schedule(&htt->txrx_compl_task);
+		return;
+	case HTT_T2H_MSG_TYPE_PEER_MAP: {
+		struct htt_peer_map_event ev = {
+			.vdev_id = resp->peer_map.vdev_id,
+			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+		};
+		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+		ath10k_peer_map_event(htt, &ev);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+		struct htt_peer_unmap_event ev = {
+			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+		};
+		ath10k_peer_unmap_event(htt, &ev);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+		struct htt_tx_done tx_done = {};
+		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+
+		tx_done.msdu_id =
+			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+		switch (status) {
+		case HTT_MGMT_TX_STATUS_OK:
+			tx_done.success = true;
+			break;
+		case HTT_MGMT_TX_STATUS_RETRY:
+			tx_done.no_ack = true;
+			break;
+		case HTT_MGMT_TX_STATUS_DROP:
+			tx_done.discard = true;
+			break;
+		}
+
+		ath10k_txrx_tx_unref(htt, &tx_done);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+		skb_queue_tail(&htt->tx_compl_q, skb);
+		tasklet_schedule(&htt->txrx_compl_task);
+		return;
+	case HTT_T2H_MSG_TYPE_SEC_IND: {
+		struct ath10k *ar = htt->ar;
+		struct htt_security_indication *ev = &resp->security_indication;
+
+		ath10k_dbg(ar, ATH10K_DBG_HTT,
+			   "sec ind peer_id %d unicast %d type %d\n",
+			  __le16_to_cpu(ev->peer_id),
+			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+			  MS(ev->flags, HTT_SECURITY_TYPE));
+		complete(&ar->install_key_done);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+				skb->data, skb->len);
+		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_TEST:
+		break;
+	case HTT_T2H_MSG_TYPE_STATS_CONF:
+		trace_ath10k_htt_stats(ar, skb->data, skb->len);
+		break;
+	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+		/* Firmware can return tx frames if it's unable to fully
+		 * process them and suspects host may be able to fix it. ath10k
+		 * sends all tx frames as already inspected so this shouldn't
+		 * happen unless fw has a bug.
+		 */
+		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
+		break;
+	case HTT_T2H_MSG_TYPE_RX_ADDBA:
+		ath10k_htt_rx_addba(ar, resp);
+		break;
+	case HTT_T2H_MSG_TYPE_RX_DELBA:
+		ath10k_htt_rx_delba(ar, resp);
+		break;
+	case HTT_T2H_MSG_TYPE_PKTLOG: {
+		struct ath10k_pktlog_hdr *hdr =
+			(struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
+
+		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
+					sizeof(*hdr) +
+					__le16_to_cpu(hdr->size));
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
+		/* Ignore this event because mac80211 takes care of Rx
+		 * aggregation reordering.
+		 */
+		break;
+	}
+	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
+		spin_lock_bh(&htt->rx_ring.lock);
+		__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+		spin_unlock_bh(&htt->rx_ring.lock);
+		tasklet_schedule(&htt->txrx_compl_task);
+		return;
+	}
+	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+		break;
+	case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+		break;
+	case HTT_T2H_MSG_TYPE_AGGR_CONF:
+		break;
+	case HTT_T2H_MSG_TYPE_EN_STATS:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+	case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
+	default:
+		ath10k_warn(ar, "htt event (%d) not handled\n",
+			    resp->hdr.msg_type);
+		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+				skb->data, skb->len);
+		break;
+	};
+
+	/* Free the indication buffer */
+	dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+
+static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+{
+	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+	struct ath10k *ar = htt->ar;
+	struct htt_resp *resp;
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&htt->tx_compl_q))) {
+		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+		dev_kfree_skb_any(skb);
+	}
+
+	spin_lock_bh(&htt->rx_ring.lock);
+	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
+		resp = (struct htt_resp *)skb->data;
+		ath10k_htt_rx_handler(htt, &resp->rx_ind);
+		dev_kfree_skb_any(skb);
+	}
+
+	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+		ath10k_htt_rx_in_ord_ind(ar, skb);
+		dev_kfree_skb_any(skb);
+	}
+	spin_unlock_bh(&htt->rx_ring.lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644
index 0000000..1682397
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+{
+	if (limit_mgmt_desc)
+		htt->num_pending_mgmt_tx--;
+
+	htt->num_pending_tx--;
+	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+}
+
+static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+				      bool limit_mgmt_desc)
+{
+	spin_lock_bh(&htt->tx_lock);
+	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+	spin_unlock_bh(&htt->tx_lock);
+}
+
+static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+				     bool limit_mgmt_desc, bool is_probe_resp)
+{
+	struct ath10k *ar = htt->ar;
+	int ret = 0;
+
+	spin_lock_bh(&htt->tx_lock);
+
+	if (htt->num_pending_tx >= htt->max_num_pending_tx) {
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	if (limit_mgmt_desc) {
+		if (is_probe_resp && (htt->num_pending_mgmt_tx >
+		    ar->hw_params.max_probe_resp_desc_thres)) {
+			ret = -EBUSY;
+			goto exit;
+		}
+		htt->num_pending_mgmt_tx++;
+	}
+
+	htt->num_pending_tx++;
+	if (htt->num_pending_tx == htt->max_num_pending_tx)
+		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+exit:
+	spin_unlock_bh(&htt->tx_lock);
+	return ret;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
+{
+	struct ath10k *ar = htt->ar;
+	int ret;
+
+	lockdep_assert_held(&htt->tx_lock);
+
+	ret = idr_alloc(&htt->pending_tx, skb, 0,
+			htt->max_num_pending_tx, GFP_ATOMIC);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
+
+	return ret;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+	struct ath10k *ar = htt->ar;
+
+	lockdep_assert_held(&htt->tx_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+
+	idr_remove(&htt->pending_tx, msdu_id);
+}
+
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	int ret, size;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
+		   htt->max_num_pending_tx);
+
+	spin_lock_init(&htt->tx_lock);
+	idr_init(&htt->pending_tx);
+
+	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
+						  &htt->txbuf.paddr,
+						  GFP_DMA);
+	if (!htt->txbuf.vaddr) {
+		ath10k_err(ar, "failed to alloc tx buffer\n");
+		ret = -ENOMEM;
+		goto free_idr_pending_tx;
+	}
+
+	if (!ar->hw_params.continuous_frag_desc)
+		goto skip_frag_desc_alloc;
+
+	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+						  &htt->frag_desc.paddr,
+						  GFP_DMA);
+	if (!htt->frag_desc.vaddr) {
+		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+		ret = -ENOMEM;
+		goto free_txbuf;
+	}
+
+skip_frag_desc_alloc:
+	return 0;
+
+free_txbuf:
+	size = htt->max_num_pending_tx *
+			  sizeof(struct ath10k_htt_txbuf);
+	dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+			  htt->txbuf.paddr);
+free_idr_pending_tx:
+	idr_destroy(&htt->pending_tx);
+	return ret;
+}
+
+static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
+{
+	struct ath10k *ar = ctx;
+	struct ath10k_htt *htt = &ar->htt;
+	struct htt_tx_done tx_done = {0};
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
+
+	tx_done.discard = 1;
+	tx_done.msdu_id = msdu_id;
+
+	ath10k_txrx_tx_unref(htt, &tx_done);
+
+	return 0;
+}
+
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
+{
+	int size;
+
+	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+	idr_destroy(&htt->pending_tx);
+
+	if (htt->txbuf.vaddr) {
+		size = htt->max_num_pending_tx *
+				  sizeof(struct ath10k_htt_txbuf);
+		dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+				  htt->txbuf.paddr);
+	}
+
+	if (htt->frag_desc.vaddr) {
+		size = htt->max_num_pending_tx *
+				  sizeof(struct htt_msdu_ext_desc);
+		dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+				  htt->frag_desc.paddr);
+	}
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int len = 0;
+	int ret;
+
+	len += sizeof(cmd->hdr);
+	len += sizeof(cmd->ver_req);
+
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+	struct ath10k *ar = htt->ar;
+	struct htt_stats_req *req;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int len = 0, ret;
+
+	len += sizeof(cmd->hdr);
+	len += sizeof(cmd->stats_req);
+
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+	req = &cmd->stats_req;
+
+	memset(req, 0, sizeof(*req));
+
+	/* currently we support only max 8 bit masks so no need to worry
+	 * about endian support */
+	req->upload_types[0] = mask;
+	req->reset_types[0] = mask;
+	req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+	req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+	req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to send htt type stats request: %d",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int ret, size;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	if (!htt->frag_desc.paddr) {
+		ath10k_warn(ar, "invalid frag desc memory\n");
+		return -EINVAL;
+	}
+
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+	skb = ath10k_htc_alloc_skb(ar, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+	cmd->frag_desc_bank_cfg.info = 0;
+	cmd->frag_desc_bank_cfg.num_banks = 1;
+	cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+	cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+				__cpu_to_le32(htt->frag_desc.paddr);
+	cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
+	cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+				__cpu_to_le16(htt->max_num_pending_tx - 1);
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	struct htt_rx_ring_setup_ring *ring;
+	const int num_rx_ring = 1;
+	u16 flags;
+	u32 fw_idx;
+	int len;
+	int ret;
+
+	/*
+	 * the HW expects the buffer to be an integral number of 4-byte
+	 * "words"
+	 */
+	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+	    + (sizeof(*ring) * num_rx_ring);
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+
+	cmd = (struct htt_cmd *)skb->data;
+	ring = &cmd->rx_setup.rings[0];
+
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+	cmd->rx_setup.hdr.num_rings = 1;
+
+	/* FIXME: do we need all of this? */
+	flags = 0;
+	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+	flags |= HTT_RX_RING_FLAGS_PPDU_START;
+	flags |= HTT_RX_RING_FLAGS_PPDU_END;
+	flags |= HTT_RX_RING_FLAGS_MPDU_START;
+	flags |= HTT_RX_RING_FLAGS_MPDU_END;
+	flags |= HTT_RX_RING_FLAGS_MSDU_START;
+	flags |= HTT_RX_RING_FLAGS_MSDU_END;
+	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+	flags |= HTT_RX_RING_FLAGS_NULL_RX;
+	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+	ring->fw_idx_shadow_reg_paddr =
+		__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+	ring->flags = __cpu_to_le16(flags);
+	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+
+	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+
+#undef desc_offset
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+				u8 max_subfrms_ampdu,
+				u8 max_subfrms_amsdu)
+{
+	struct ath10k *ar = htt->ar;
+	struct htt_aggr_conf *aggr_conf;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int len;
+	int ret;
+
+	/* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+	if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+		return -EINVAL;
+
+	if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+		return -EINVAL;
+
+	len = sizeof(cmd->hdr);
+	len += sizeof(cmd->aggr_conf);
+
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+	aggr_conf = &cmd->aggr_conf;
+	aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+	aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+		   aggr_conf->max_num_amsdu_subframes,
+		   aggr_conf->max_num_ampdu_subframes);
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+	struct ath10k *ar = htt->ar;
+	struct device *dev = ar->dev;
+	struct sk_buff *txdesc = NULL;
+	struct htt_cmd *cmd;
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+	u8 vdev_id = skb_cb->vdev_id;
+	int len = 0;
+	int msdu_id = -1;
+	int res;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+	bool limit_mgmt_desc = false;
+	bool is_probe_resp = false;
+
+	if (ar->hw_params.max_probe_resp_desc_thres) {
+		limit_mgmt_desc = true;
+
+		if (ieee80211_is_probe_resp(hdr->frame_control))
+			is_probe_resp = true;
+	}
+
+	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
+
+	if (res)
+		goto err;
+
+	len += sizeof(cmd->hdr);
+	len += sizeof(cmd->mgmt_tx);
+
+	spin_lock_bh(&htt->tx_lock);
+	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
+	if (res < 0)
+		goto err_tx_dec;
+
+	msdu_id = res;
+
+	txdesc = ath10k_htc_alloc_skb(ar, len);
+	if (!txdesc) {
+		res = -ENOMEM;
+		goto err_free_msdu_id;
+	}
+
+	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+				       DMA_TO_DEVICE);
+	res = dma_mapping_error(dev, skb_cb->paddr);
+	if (res) {
+		res = -EIO;
+		goto err_free_txdesc;
+	}
+
+	skb_put(txdesc, len);
+	cmd = (struct htt_cmd *)txdesc->data;
+	memset(cmd, 0, len);
+
+	cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
+	cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+	cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
+	cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
+	cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
+	memcpy(cmd->mgmt_tx.hdr, msdu->data,
+	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+	skb_cb->htt.txbuf = NULL;
+
+	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+	if (res)
+		goto err_unmap_msdu;
+
+	return 0;
+
+err_unmap_msdu:
+	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txdesc:
+	dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+	spin_lock_bh(&htt->tx_lock);
+	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+	spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+err:
+	return res;
+}
+
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+	struct ath10k *ar = htt->ar;
+	struct device *dev = ar->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+	struct ath10k_hif_sg_item sg_items[2];
+	struct htt_data_tx_desc_frag *frags;
+	u8 vdev_id = skb_cb->vdev_id;
+	u8 tid = skb_cb->htt.tid;
+	int prefetch_len;
+	int res;
+	u8 flags0 = 0;
+	u16 msdu_id, flags1 = 0;
+	u32 frags_paddr = 0;
+	struct htt_msdu_ext_desc *ext_desc = NULL;
+	bool limit_mgmt_desc = false;
+	bool is_probe_resp = false;
+
+	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
+	    ar->hw_params.max_probe_resp_desc_thres) {
+		limit_mgmt_desc = true;
+
+		if (ieee80211_is_probe_resp(hdr->frame_control))
+			is_probe_resp = true;
+	}
+
+	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
+	if (res)
+		goto err;
+
+	spin_lock_bh(&htt->tx_lock);
+	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
+	if (res < 0)
+		goto err_tx_dec;
+
+	msdu_id = res;
+
+	prefetch_len = min(htt->prefetch_len, msdu->len);
+	prefetch_len = roundup(prefetch_len, 4);
+
+	skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
+	skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
+		(sizeof(struct ath10k_htt_txbuf) * msdu_id);
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	} else if (!skb_cb->htt.nohwcrypt &&
+		   skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
+		   ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	}
+
+	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+				       DMA_TO_DEVICE);
+	res = dma_mapping_error(dev, skb_cb->paddr);
+	if (res) {
+		res = -EIO;
+		goto err_free_msdu_id;
+	}
+
+	switch (skb_cb->txmode) {
+	case ATH10K_HW_TXRX_RAW:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+		/* pass through */
+	case ATH10K_HW_TXRX_ETHERNET:
+		if (ar->hw_params.continuous_frag_desc) {
+			memset(&htt->frag_desc.vaddr[msdu_id], 0,
+			       sizeof(struct htt_msdu_ext_desc));
+			frags = (struct htt_data_tx_desc_frag *)
+				&htt->frag_desc.vaddr[msdu_id].frags;
+			ext_desc = &htt->frag_desc.vaddr[msdu_id];
+			frags[0].tword_addr.paddr_lo =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi = 0;
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+			frags_paddr =  htt->frag_desc.paddr +
+				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
+		} else {
+			frags = skb_cb->htt.txbuf->frags;
+			frags[0].dword_addr.paddr =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+			frags[1].dword_addr.paddr = 0;
+			frags[1].dword_addr.len = 0;
+
+			frags_paddr = skb_cb->htt.txbuf_paddr;
+		}
+		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		break;
+	case ATH10K_HW_TXRX_MGMT:
+		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+		frags_paddr = skb_cb->paddr;
+		break;
+	}
+
+	/* Normally all commands go through HTC which manages tx credits for
+	 * each endpoint and notifies when tx is completed.
+	 *
+	 * HTT endpoint is creditless so there's no need to care about HTC
+	 * flags. In that case it is trivial to fill the HTC header here.
+	 *
+	 * MSDU transmission is considered completed upon HTT event. This
+	 * implies no relevant resources can be freed until after the event is
+	 * received. That's why HTC tx completion handler itself is ignored by
+	 * setting NULL to transfer_context for all sg items.
+	 *
+	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
+	 * as it's a waste of resources. By bypassing HTC it is possible to
+	 * avoid extra memory allocations, compress data structures and thus
+	 * improve performance. */
+
+	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
+	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
+			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+			sizeof(skb_cb->htt.txbuf->cmd_tx) +
+			prefetch_len);
+	skb_cb->htt.txbuf->htc_hdr.flags = 0;
+
+	if (skb_cb->htt.nohwcrypt)
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+	if (!skb_cb->is_protected)
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+		if (ar->hw_params.continuous_frag_desc)
+			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+	}
+
+	/* Prevent firmware from sending up tx inspection requests. There's
+	 * nothing ath10k can do with frames requested for inspection so force
+	 * it to simply rely a regular tx completion with discard status.
+	 */
+	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
+	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
+	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
+
+	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
+		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
+		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+			msdu->data, msdu->len);
+	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+	sg_items[0].transfer_id = 0;
+	sg_items[0].transfer_context = NULL;
+	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
+	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
+			    sizeof(skb_cb->htt.txbuf->frags);
+	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
+			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+			  sizeof(skb_cb->htt.txbuf->cmd_tx);
+
+	sg_items[1].transfer_id = 0;
+	sg_items[1].transfer_context = NULL;
+	sg_items[1].vaddr = msdu->data;
+	sg_items[1].paddr = skb_cb->paddr;
+	sg_items[1].len = prefetch_len;
+
+	res = ath10k_hif_tx_sg(htt->ar,
+			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+			       sg_items, ARRAY_SIZE(sg_items));
+	if (res)
+		goto err_unmap_msdu;
+
+	return 0;
+
+err_unmap_msdu:
+	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+	spin_lock_bh(&htt->tx_lock);
+	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+	spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+err:
+	return res;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
new file mode 100644
index 0000000..7b84d08
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include "core.h"
+#include "hw.h"
+
+const struct ath10k_hw_regs qca988x_regs = {
+	.rtc_state_cold_reset_mask	= 0x00000400,
+	.rtc_soc_base_address		= 0x00004000,
+	.rtc_wmac_base_address		= 0x00005000,
+	.soc_core_base_address		= 0x00009000,
+	.ce_wrapper_base_address	= 0x00057000,
+	.ce0_base_address		= 0x00057400,
+	.ce1_base_address		= 0x00057800,
+	.ce2_base_address		= 0x00057c00,
+	.ce3_base_address		= 0x00058000,
+	.ce4_base_address		= 0x00058400,
+	.ce5_base_address		= 0x00058800,
+	.ce6_base_address		= 0x00058c00,
+	.ce7_base_address		= 0x00059000,
+	.soc_reset_control_si0_rst_mask	= 0x00000001,
+	.soc_reset_control_ce_rst_mask	= 0x00040000,
+	.soc_chip_id_address		= 0x000000ec,
+	.scratch_3_address		= 0x00000030,
+	.fw_indicator_address		= 0x00009030,
+	.pcie_local_base_address	= 0x00080000,
+	.ce_wrap_intr_sum_host_msi_lsb	= 0x00000008,
+	.ce_wrap_intr_sum_host_msi_mask	= 0x0000ff00,
+	.pcie_intr_fw_mask		= 0x00000400,
+	.pcie_intr_ce_mask_all		= 0x0007f800,
+	.pcie_intr_clr_address		= 0x00000014,
+};
+
+const struct ath10k_hw_regs qca6174_regs = {
+	.rtc_state_cold_reset_mask		= 0x00002000,
+	.rtc_soc_base_address			= 0x00000800,
+	.rtc_wmac_base_address			= 0x00001000,
+	.soc_core_base_address			= 0x0003a000,
+	.ce_wrapper_base_address		= 0x00034000,
+	.ce0_base_address			= 0x00034400,
+	.ce1_base_address			= 0x00034800,
+	.ce2_base_address			= 0x00034c00,
+	.ce3_base_address			= 0x00035000,
+	.ce4_base_address			= 0x00035400,
+	.ce5_base_address			= 0x00035800,
+	.ce6_base_address			= 0x00035c00,
+	.ce7_base_address			= 0x00036000,
+	.soc_reset_control_si0_rst_mask		= 0x00000000,
+	.soc_reset_control_ce_rst_mask		= 0x00000001,
+	.soc_chip_id_address			= 0x000000f0,
+	.scratch_3_address			= 0x00000028,
+	.fw_indicator_address			= 0x0003a028,
+	.pcie_local_base_address		= 0x00080000,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x00000008,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x0000ff00,
+	.pcie_intr_fw_mask			= 0x00000400,
+	.pcie_intr_ce_mask_all			= 0x0007f800,
+	.pcie_intr_clr_address			= 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+	.rtc_state_cold_reset_mask		= 0x00000400,
+	.rtc_soc_base_address			= 0x00080000,
+	.rtc_wmac_base_address			= 0x00000000,
+	.soc_core_base_address			= 0x00082000,
+	.ce_wrapper_base_address		= 0x0004d000,
+	.ce0_base_address			= 0x0004a000,
+	.ce1_base_address			= 0x0004a400,
+	.ce2_base_address			= 0x0004a800,
+	.ce3_base_address			= 0x0004ac00,
+	.ce4_base_address			= 0x0004b000,
+	.ce5_base_address			= 0x0004b400,
+	.ce6_base_address			= 0x0004b800,
+	.ce7_base_address			= 0x0004bc00,
+	/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+	 * CE0 and CE1 no other copy engine is directly referred in the code.
+	 * It is not really neccessary to assign address for newly supported
+	 * CEs in this address table.
+	 *	Copy Engine		Address
+	 *	CE8			0x0004c000
+	 *	CE9			0x0004c400
+	 *	CE10			0x0004c800
+	 *	CE11			0x0004cc00
+	 */
+	.soc_reset_control_si0_rst_mask		= 0x00000001,
+	.soc_reset_control_ce_rst_mask		= 0x00000100,
+	.soc_chip_id_address			= 0x000000ec,
+	.scratch_3_address			= 0x00040050,
+	.fw_indicator_address			= 0x00040050,
+	.pcie_local_base_address		= 0x00000000,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x0000000c,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x00fff000,
+	.pcie_intr_fw_mask			= 0x00100000,
+	.pcie_intr_ce_mask_all			= 0x000fff00,
+	.pcie_intr_clr_address			= 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+	.rtc_state_val_on		= 3,
+	.ce_count			= 8,
+	.msi_assign_ce_max		= 7,
+	.num_target_ce_config_wlan	= 7,
+	.ce_desc_meta_data_mask		= 0xFFFC,
+	.ce_desc_meta_data_lsb		= 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+	.rtc_state_val_on		= 3,
+	.ce_count			= 8,
+	.msi_assign_ce_max		= 7,
+	.num_target_ce_config_wlan	= 7,
+	.ce_desc_meta_data_mask		= 0xFFFC,
+	.ce_desc_meta_data_lsb		= 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+	.rtc_state_val_on		= 5,
+	.ce_count			= 12,
+	.msi_assign_ce_max		= 12,
+	.num_target_ce_config_wlan	= 10,
+	.ce_desc_meta_data_mask		= 0xFFF0,
+	.ce_desc_meta_data_lsb		= 4,
+};
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
+{
+	u32 cc_fix = 0;
+
+	survey->filled |= SURVEY_INFO_TIME |
+			  SURVEY_INFO_TIME_BUSY;
+
+	if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) {
+		cc_fix = 0x7fffffff;
+		survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+	}
+
+	cc -= cc_prev - cc_fix;
+	rcc -= rcc_prev;
+
+	survey->time = CCNT_TO_MSEC(ar, cc);
+	survey->time_busy = CCNT_TO_MSEC(ar, rcc);
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644
index 0000000..713c2bc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+#define ATH10K_FW_DIR			"ath10k"
+
+#define QCA988X_2_0_DEVICE_ID   (0x003c)
+#define QCA6164_2_1_DEVICE_ID   (0x0041)
+#define QCA6174_2_1_DEVICE_ID   (0x003e)
+#define QCA99X0_2_0_DEVICE_ID   (0x0040)
+#define QCA9377_1_0_DEVICE_ID   (0x0042)
+
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV	0x0
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION		0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV	0x2
+#define QCA988X_HW_2_0_FW_DIR		ATH10K_FW_DIR "/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_FILE		"firmware.bin"
+#define QCA988X_HW_2_0_OTP_FILE		"otp.bin"
+#define QCA988X_HW_2_0_BOARD_DATA_FILE	"board.bin"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR	0x1234
+
+/* QCA6174 target BMI version signatures */
+#define QCA6174_HW_1_0_VERSION		0x05000000
+#define QCA6174_HW_1_1_VERSION		0x05000001
+#define QCA6174_HW_1_3_VERSION		0x05000003
+#define QCA6174_HW_2_1_VERSION		0x05010000
+#define QCA6174_HW_3_0_VERSION		0x05020000
+#define QCA6174_HW_3_2_VERSION		0x05030000
+
+/* QCA9377 target BMI version signatures */
+#define QCA9377_HW_1_0_DEV_VERSION	0x05020000
+#define QCA9377_HW_1_1_DEV_VERSION	0x05020001
+
+enum qca6174_pci_rev {
+	QCA6174_PCI_REV_1_1 = 0x11,
+	QCA6174_PCI_REV_1_3 = 0x13,
+	QCA6174_PCI_REV_2_0 = 0x20,
+	QCA6174_PCI_REV_3_0 = 0x30,
+};
+
+enum qca6174_chip_id_rev {
+	QCA6174_HW_1_0_CHIP_ID_REV = 0,
+	QCA6174_HW_1_1_CHIP_ID_REV = 1,
+	QCA6174_HW_1_3_CHIP_ID_REV = 2,
+	QCA6174_HW_2_1_CHIP_ID_REV = 4,
+	QCA6174_HW_2_2_CHIP_ID_REV = 5,
+	QCA6174_HW_3_0_CHIP_ID_REV = 8,
+	QCA6174_HW_3_1_CHIP_ID_REV = 9,
+	QCA6174_HW_3_2_CHIP_ID_REV = 10,
+};
+
+enum qca9377_chip_id_rev {
+	QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
+	QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
+};
+
+#define QCA6174_HW_2_1_FW_DIR		"ath10k/QCA6174/hw2.1"
+#define QCA6174_HW_2_1_FW_FILE		"firmware.bin"
+#define QCA6174_HW_2_1_OTP_FILE		"otp.bin"
+#define QCA6174_HW_2_1_BOARD_DATA_FILE	"board.bin"
+#define QCA6174_HW_2_1_PATCH_LOAD_ADDR	0x1234
+
+#define QCA6174_HW_3_0_FW_DIR		"ath10k/QCA6174/hw3.0"
+#define QCA6174_HW_3_0_FW_FILE		"firmware.bin"
+#define QCA6174_HW_3_0_OTP_FILE		"otp.bin"
+#define QCA6174_HW_3_0_BOARD_DATA_FILE	"board.bin"
+#define QCA6174_HW_3_0_PATCH_LOAD_ADDR	0x1234
+
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV     0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
+#define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR	0x1234
+
+/* QCA9377 1.0 definitions */
+#define QCA9377_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9377/hw1.0"
+#define QCA9377_HW_1_0_FW_FILE         "firmware.bin"
+#define QCA9377_HW_1_0_OTP_FILE        "otp.bin"
+#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9377_HW_1_0_PATCH_LOAD_ADDR	0x1234
+
+#define ATH10K_FW_API2_FILE		"firmware-2.bin"
+#define ATH10K_FW_API3_FILE		"firmware-3.bin"
+
+/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
+#define ATH10K_FW_API4_FILE		"firmware-4.bin"
+
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE		"firmware-5.bin"
+
+#define ATH10K_FW_UTF_FILE		"utf.bin"
+#define ATH10K_FW_UTF_API2_FILE		"utf-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC               "QCA-ATH10K"
+#define ATH10K_BOARD_MAGIC                  "QCA-ATH10K-BOARD"
+
+#define ATH10K_BOARD_API2_FILE         "board-2.bin"
+
+#define REG_DUMP_COUNT_QCA988X 60
+
+#define QCA988X_CAL_DATA_LEN		2116
+
+struct ath10k_fw_ie {
+	__le32 id;
+	__le32 len;
+	u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+	ATH10K_FW_IE_FW_VERSION = 0,
+	ATH10K_FW_IE_TIMESTAMP = 1,
+	ATH10K_FW_IE_FEATURES = 2,
+	ATH10K_FW_IE_FW_IMAGE = 3,
+	ATH10K_FW_IE_OTP_IMAGE = 4,
+
+	/* WMI "operations" interface version, 32 bit value. Supported from
+	 * FW API 4 and above.
+	 */
+	ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+	/* HTT "operations" interface version, 32 bit value. Supported from
+	 * FW API 5 and above.
+	 */
+	ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+	/* Code swap image for firmware binary */
+	ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
+};
+
+enum ath10k_fw_wmi_op_version {
+	ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
+
+	ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
+	ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
+	ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
+	ATH10K_FW_WMI_OP_VERSION_TLV = 4,
+	ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+	ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
+
+	/* keep last */
+	ATH10K_FW_WMI_OP_VERSION_MAX,
+};
+
+enum ath10k_fw_htt_op_version {
+	ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+	ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+	/* also used in 10.2 and 10.2.4 branches */
+	ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+	ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+	ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
+	/* keep last */
+	ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
+enum ath10k_bd_ie_type {
+	/* contains sub IEs of enum ath10k_bd_ie_board_type */
+	ATH10K_BD_IE_BOARD = 0,
+};
+
+enum ath10k_bd_ie_board_type {
+	ATH10K_BD_IE_BOARD_NAME = 0,
+	ATH10K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath10k_hw_rev {
+	ATH10K_HW_QCA988X,
+	ATH10K_HW_QCA6174,
+	ATH10K_HW_QCA99X0,
+	ATH10K_HW_QCA9377,
+};
+
+struct ath10k_hw_regs {
+	u32 rtc_state_cold_reset_mask;
+	u32 rtc_soc_base_address;
+	u32 rtc_wmac_base_address;
+	u32 soc_core_base_address;
+	u32 ce_wrapper_base_address;
+	u32 ce0_base_address;
+	u32 ce1_base_address;
+	u32 ce2_base_address;
+	u32 ce3_base_address;
+	u32 ce4_base_address;
+	u32 ce5_base_address;
+	u32 ce6_base_address;
+	u32 ce7_base_address;
+	u32 soc_reset_control_si0_rst_mask;
+	u32 soc_reset_control_ce_rst_mask;
+	u32 soc_chip_id_address;
+	u32 scratch_3_address;
+	u32 fw_indicator_address;
+	u32 pcie_local_base_address;
+	u32 ce_wrap_intr_sum_host_msi_lsb;
+	u32 ce_wrap_intr_sum_host_msi_mask;
+	u32 pcie_intr_fw_mask;
+	u32 pcie_intr_ce_mask_all;
+	u32 pcie_intr_clr_address;
+};
+
+extern const struct ath10k_hw_regs qca988x_regs;
+extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+	u32 rtc_state_val_on;
+	u8 ce_count;
+	u8 msi_assign_ce_max;
+	u8 num_target_ce_config_wlan;
+	u16 ce_desc_meta_data_mask;
+	u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
+
+#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
+
+/* Known pecularities:
+ *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ *  - raw have FCS, nwifi doesn't
+ *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ *    param, llc/snap) are aligned to 4byte boundaries each */
+enum ath10k_hw_txrx_mode {
+	ATH10K_HW_TXRX_RAW = 0,
+
+	/* Native Wifi decap mode is used to align IP frames to 4-byte
+	 * boundaries and avoid a very expensive re-alignment in mac80211.
+	 */
+	ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+	ATH10K_HW_TXRX_ETHERNET = 2,
+
+	/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+	ATH10K_HW_TXRX_MGMT = 3,
+};
+
+enum ath10k_mcast2ucast_mode {
+	ATH10K_MCAST2UCAST_DISABLED = 0,
+	ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+struct ath10k_pktlog_hdr {
+	__le16 flags;
+	__le16 missed_cnt;
+	__le16 log_type;
+	__le16 size;
+	__le32 timestamp;
+	u8 payload[0];
+} __packed;
+
+enum ath10k_hw_rate_ofdm {
+	ATH10K_HW_RATE_OFDM_48M = 0,
+	ATH10K_HW_RATE_OFDM_24M,
+	ATH10K_HW_RATE_OFDM_12M,
+	ATH10K_HW_RATE_OFDM_6M,
+	ATH10K_HW_RATE_OFDM_54M,
+	ATH10K_HW_RATE_OFDM_36M,
+	ATH10K_HW_RATE_OFDM_18M,
+	ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+	ATH10K_HW_RATE_CCK_LP_11M = 0,
+	ATH10K_HW_RATE_CCK_LP_5_5M,
+	ATH10K_HW_RATE_CCK_LP_2M,
+	ATH10K_HW_RATE_CCK_LP_1M,
+	ATH10K_HW_RATE_CCK_SP_11M,
+	ATH10K_HW_RATE_CCK_SP_5_5M,
+	ATH10K_HW_RATE_CCK_SP_2M,
+};
+
+/* Target specific defines for MAIN firmware */
+#define TARGET_NUM_VDEVS			8
+#define TARGET_NUM_PEER_AST			2
+#define TARGET_NUM_WDS_ENTRIES			32
+#define TARGET_DMA_BURST_SIZE			0
+#define TARGET_MAC_AGGR_DELIM			0
+#define TARGET_AST_SKID_LIMIT			16
+#define TARGET_NUM_STATIONS			16
+#define TARGET_NUM_PEERS			((TARGET_NUM_STATIONS) + \
+						 (TARGET_NUM_VDEVS))
+#define TARGET_NUM_OFFLOAD_PEERS		0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS         0
+#define TARGET_NUM_PEER_KEYS			2
+#define TARGET_NUM_TIDS				((TARGET_NUM_PEERS) * 2)
+#define TARGET_TX_CHAIN_MASK			(BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK			(BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI		100
+#define TARGET_RX_TIMEOUT_HI_PRI		40
+
+#define TARGET_SCAN_MAX_PENDING_REQS		4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV		3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV		3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES	8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV		3
+#define TARGET_NUM_MCAST_GROUPS			0
+#define TARGET_NUM_MCAST_TABLE_ELEMS		0
+#define TARGET_MCAST2UCAST_MODE			ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE			1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG			0
+#define TARGET_NUM_MSDU_DESC			(1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES			0
+
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS			16
+#define TARGET_10X_NUM_PEER_AST			2
+#define TARGET_10X_NUM_WDS_ENTRIES		32
+#define TARGET_10X_DMA_BURST_SIZE		0
+#define TARGET_10X_MAC_AGGR_DELIM		0
+#define TARGET_10X_AST_SKID_LIMIT		128
+#define TARGET_10X_NUM_STATIONS			128
+#define TARGET_10X_NUM_PEERS			((TARGET_10X_NUM_STATIONS) + \
+						 (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS		0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS	0
+#define TARGET_10X_NUM_PEER_KEYS		2
+#define TARGET_10X_NUM_TIDS_MAX			256
+#define TARGET_10X_NUM_TIDS			min((TARGET_10X_NUM_TIDS_MAX), \
+						    (TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI		100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI		40
+#define TARGET_10X_SCAN_MAX_PENDING_REQS	4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV	2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV	2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES	8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV		3
+#define TARGET_10X_NUM_MCAST_GROUPS		0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS	0
+#define TARGET_10X_MCAST2UCAST_MODE		ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE		1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG			0
+#define TARGET_10X_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES		0
+
+/* 10.2 parameters */
+#define TARGET_10_2_DMA_BURST_SIZE		0
+
+/* Target specific defines for WMI-TLV firmware */
+#define TARGET_TLV_NUM_VDEVS			4
+#define TARGET_TLV_NUM_STATIONS			32
+#define TARGET_TLV_NUM_PEERS			35
+#define TARGET_TLV_NUM_TDLS_VDEVS		1
+#define TARGET_TLV_NUM_TIDS			((TARGET_TLV_NUM_PEERS) * 2)
+#define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS		22
+
+/* Diagnostic Window */
+#define CE_DIAG_PIPE	7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS			16
+#define TARGET_10_4_NUM_STATIONS		32
+#define TARGET_10_4_NUM_PEERS			((TARGET_10_4_NUM_STATIONS) + \
+						 (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS		0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX	512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS		50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS		0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS	0
+#define TARGET_10_4_NUM_PEER_KEYS		2
+#define TARGET_10_4_TGT_NUM_TIDS		((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT		32
+#define TARGET_10_4_TX_CHAIN_MASK		(BIT(0) | BIT(1) | \
+						 BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK		(BIT(0) | BIT(1) | \
+						 BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI		100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI		40
+
+#define TARGET_10_4_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS		4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES   8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS		0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS	0
+#define TARGET_10_4_MCAST2UCAST_MODE		0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE		1024
+#define TARGET_10_4_NUM_WDS_ENTRIES		32
+#define TARGET_10_4_DMA_BURST_SIZE		0
+#define TARGET_10_4_MAC_AGGR_DELIM		0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG			0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS		2
+#define TARGET_10_4_MAX_PEER_EXT_STATS		16
+#define TARGET_10_4_SMART_ANT_CAP		0
+#define TARGET_10_4_BK_MIN_FREE			0
+#define TARGET_10_4_BE_MIN_FREE			0
+#define TARGET_10_4_VI_MIN_FREE			0
+#define TARGET_10_4_VO_MIN_FREE			0
+#define TARGET_10_4_RX_BATCH_MODE		1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG	0
+#define TARGET_10_4_ATF_CONFIG			0
+#define TARGET_10_4_IPHDR_PAD_CONFIG		1
+#define TARGET_10_4_QWRAP_CONFIG		0
+
+/* Number of Copy Engines supported */
+#define CE_COUNT ar->hw_values->ce_count
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW		0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL	1
+#define MSI_ASSIGN_CE_MAX	ar->hw_values->msi_assign_ce_max
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON				ar->hw_values->rtc_state_val_on
+
+#define RTC_STATE_COLD_RESET_MASK		ar->regs->rtc_state_cold_reset_mask
+#define RTC_STATE_V_LSB				0
+#define RTC_STATE_V_MASK			0x00000007
+#define RTC_STATE_ADDRESS			0x0000
+#define PCIE_SOC_WAKE_V_MASK			0x00000001
+#define PCIE_SOC_WAKE_ADDRESS			0x0004
+#define PCIE_SOC_WAKE_RESET			0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS		0x0008
+
+#define RTC_SOC_BASE_ADDRESS			ar->regs->rtc_soc_base_address
+#define RTC_WMAC_BASE_ADDRESS			ar->regs->rtc_wmac_base_address
+#define MAC_COEX_BASE_ADDRESS			0x00006000
+#define BT_COEX_BASE_ADDRESS			0x00007000
+#define SOC_PCIE_BASE_ADDRESS			0x00008000
+#define SOC_CORE_BASE_ADDRESS			ar->regs->soc_core_base_address
+#define WLAN_UART_BASE_ADDRESS			0x0000c000
+#define WLAN_SI_BASE_ADDRESS			0x00010000
+#define WLAN_GPIO_BASE_ADDRESS			0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS		0x0001c000
+#define WLAN_MAC_BASE_ADDRESS			0x00020000
+#define EFUSE_BASE_ADDRESS			0x00030000
+#define FPGA_REG_BASE_ADDRESS			0x00039000
+#define WLAN_UART2_BASE_ADDRESS			0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS			ar->regs->ce_wrapper_base_address
+#define CE0_BASE_ADDRESS			ar->regs->ce0_base_address
+#define CE1_BASE_ADDRESS			ar->regs->ce1_base_address
+#define CE2_BASE_ADDRESS			ar->regs->ce2_base_address
+#define CE3_BASE_ADDRESS			ar->regs->ce3_base_address
+#define CE4_BASE_ADDRESS			ar->regs->ce4_base_address
+#define CE5_BASE_ADDRESS			ar->regs->ce5_base_address
+#define CE6_BASE_ADDRESS			ar->regs->ce6_base_address
+#define CE7_BASE_ADDRESS			ar->regs->ce7_base_address
+#define DBI_BASE_ADDRESS			0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS	0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS		ar->regs->pcie_local_base_address
+
+#define SOC_RESET_CONTROL_ADDRESS		0x00000000
+#define SOC_RESET_CONTROL_OFFSET		0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK		ar->regs->soc_reset_control_si0_rst_mask
+#define SOC_RESET_CONTROL_CE_RST_MASK		ar->regs->soc_reset_control_ce_rst_mask
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK	0x00000040
+#define SOC_CPU_CLOCK_OFFSET			0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB		0
+#define SOC_CPU_CLOCK_STANDARD_MASK		0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET		0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK		0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET			0x000000c4
+#define SOC_LPO_CAL_OFFSET			0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB			20
+#define SOC_LPO_CAL_ENABLE_MASK			0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS		0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK	0x00000004
+
+#define SOC_CHIP_ID_ADDRESS			ar->regs->soc_chip_id_address
+#define SOC_CHIP_ID_REV_LSB			8
+#define SOC_CHIP_ID_REV_MASK			0x00000f00
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK	0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK	0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB		0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK		0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS			0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_MASK		0x00007800
+#define WLAN_GPIO_PIN1_ADDRESS			0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK		0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS			0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS			0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS			0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS			0x0000005c
+
+#define CLOCK_GPIO_OFFSET			0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB		0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK		0
+
+#define SI_CONFIG_OFFSET			0x00000000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB		18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK		0x00040000
+#define SI_CONFIG_I2C_LSB			16
+#define SI_CONFIG_I2C_MASK			0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB		7
+#define SI_CONFIG_POS_SAMPLE_MASK		0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB		5
+#define SI_CONFIG_INACTIVE_DATA_MASK		0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB		4
+#define SI_CONFIG_INACTIVE_CLK_MASK		0x00000010
+#define SI_CONFIG_DIVIDER_LSB			0
+#define SI_CONFIG_DIVIDER_MASK			0x0000000f
+#define SI_CS_OFFSET				0x00000004
+#define SI_CS_DONE_ERR_MASK			0x00000400
+#define SI_CS_DONE_INT_MASK			0x00000200
+#define SI_CS_START_LSB				8
+#define SI_CS_START_MASK			0x00000100
+#define SI_CS_RX_CNT_LSB			4
+#define SI_CS_RX_CNT_MASK			0x000000f0
+#define SI_CS_TX_CNT_LSB			0
+#define SI_CS_TX_CNT_MASK			0x0000000f
+
+#define SI_TX_DATA0_OFFSET			0x00000008
+#define SI_TX_DATA1_OFFSET			0x0000000c
+#define SI_RX_DATA0_OFFSET			0x00000010
+#define SI_RX_DATA1_OFFSET			0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK			0x00002000
+#define CORE_CTRL_PCIE_REG_31_MASK		0x00000800
+#define CORE_CTRL_ADDRESS			0x0000
+#define PCIE_INTR_ENABLE_ADDRESS		0x0008
+#define PCIE_INTR_CAUSE_ADDRESS			0x000c
+#define PCIE_INTR_CLR_ADDRESS			ar->regs->pcie_intr_clr_address
+#define SCRATCH_3_ADDRESS			ar->regs->scratch_3_address
+#define CPU_INTR_ADDRESS			0x0010
+
+#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS			ar->regs->fw_indicator_address
+#define FW_IND_EVENT_PENDING			1
+#define FW_IND_INITIALIZED			2
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK			ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL			ar->regs->pcie_intr_ce_mask_all
+
+#define DRAM_BASE_ADDRESS			0x00400000
+
+#define PCIE_BAR_REG_ADDRESS			0x40030
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET			SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET		SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET		SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET			SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK		SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK		MISSING
+#define RESET_CONTROL_SI0_RST_MASK		SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS			WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET			WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET			WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_MASK			WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN1_CONFIG_MASK			WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS				WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS			SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET			0x18
+#define CPU_CLOCK_OFFSET			SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET				SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET			WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET			WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET			WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET			WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB			SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK			SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB			SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK			SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS		WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS			MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB		MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK		MISSING
+#define INT_STATUS_ENABLE_CPU_LSB		MISSING
+#define INT_STATUS_ENABLE_CPU_MASK		MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB		MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK		MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB		MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK	MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB	MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK	MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB	MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK	MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB	MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK	MISSING
+#define INT_STATUS_ENABLE_ADDRESS		MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB		MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK		MISSING
+#define HOST_INT_STATUS_ADDRESS			MISSING
+#define CPU_INT_STATUS_ADDRESS			MISSING
+#define ERROR_INT_STATUS_ADDRESS		MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK		MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB		MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK	MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB	MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK	MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB	MISSING
+#define COUNT_DEC_ADDRESS			MISSING
+#define HOST_INT_STATUS_CPU_MASK		MISSING
+#define HOST_INT_STATUS_CPU_LSB			MISSING
+#define HOST_INT_STATUS_ERROR_MASK		MISSING
+#define HOST_INT_STATUS_ERROR_LSB		MISSING
+#define HOST_INT_STATUS_COUNTER_MASK		MISSING
+#define HOST_INT_STATUS_COUNTER_LSB		MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS		MISSING
+#define WINDOW_DATA_ADDRESS			MISSING
+#define WINDOW_READ_ADDR_ADDRESS		MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS		MISSING
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644
index 0000000..6decf4a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -0,0 +1,7388 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+#include "testmode.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+	{ .bitrate = 10,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+	{ .bitrate = 20,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110,
+	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+	switch (bitrate) {
+	case 10:
+	case 20:
+	case 55:
+	case 110:
+		return true;
+	}
+
+	return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+	return DIV_ROUND_UP(bitrate, 5) |
+	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+			     u8 hw_rate)
+{
+	const struct ieee80211_rate *rate;
+	int i;
+
+	for (i = 0; i < sband->n_bitrates; i++) {
+		rate = &sband->bitrates[i];
+
+		if (rate->hw_value == hw_rate)
+			return i;
+		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+			 rate->hw_value_short == hw_rate)
+			return i;
+	}
+
+	return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+			     u32 bitrate)
+{
+	int i;
+
+	for (i = 0; i < sband->n_bitrates; i++)
+		if (sband->bitrates[i].bitrate == bitrate)
+			return i;
+
+	return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+	switch ((mcs_map >> (2 * nss)) & 0x3) {
+	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+	}
+	return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+	int nss;
+
+	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+		if (ht_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+	int nss;
+
+	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+		if (vht_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+			   struct ieee80211_key_conf *key,
+			   enum set_key_cmd cmd,
+			   const u8 *macaddr, u32 flags)
+{
+	struct ath10k *ar = arvif->ar;
+	struct wmi_vdev_install_key_arg arg = {
+		.vdev_id = arvif->vdev_id,
+		.key_idx = key->keyidx,
+		.key_len = key->keylen,
+		.key_data = key->key,
+		.key_flags = flags,
+		.macaddr = macaddr,
+	};
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		arg.key_cipher = WMI_CIPHER_AES_CCM;
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		arg.key_cipher = WMI_CIPHER_TKIP;
+		arg.key_txmic_len = 8;
+		arg.key_rxmic_len = 8;
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		arg.key_cipher = WMI_CIPHER_WEP;
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		WARN_ON(1);
+		return -EINVAL;
+	default:
+		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
+		return -EOPNOTSUPP;
+	}
+
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+	if (cmd == DISABLE_KEY) {
+		arg.key_cipher = WMI_CIPHER_NONE;
+		arg.key_data = NULL;
+	}
+
+	return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+			      struct ieee80211_key_conf *key,
+			      enum set_key_cmd cmd,
+			      const u8 *macaddr, u32 flags)
+{
+	struct ath10k *ar = arvif->ar;
+	int ret;
+	unsigned long time_left;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->install_key_done);
+
+	if (arvif->nohwcrypt)
+		return 1;
+
+	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
+	if (ret)
+		return ret;
+
+	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+	if (time_left == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+					const u8 *addr)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ath10k_peer *peer;
+	int ret;
+	int i;
+	u32 flags;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
+		    arvif->vif->type != NL80211_IFTYPE_ADHOC))
+		return -EINVAL;
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!peer)
+		return -ENOENT;
+
+	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+		if (arvif->wep_keys[i] == NULL)
+			continue;
+
+		switch (arvif->vif->type) {
+		case NL80211_IFTYPE_AP:
+			flags = WMI_KEY_PAIRWISE;
+
+			if (arvif->def_wep_key_idx == i)
+				flags |= WMI_KEY_TX_USAGE;
+
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr, flags);
+			if (ret < 0)
+				return ret;
+			break;
+		case NL80211_IFTYPE_ADHOC:
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr,
+						 WMI_KEY_PAIRWISE);
+			if (ret < 0)
+				return ret;
+
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr, WMI_KEY_GROUP);
+			if (ret < 0)
+				return ret;
+			break;
+		default:
+			WARN_ON(1);
+			return -EINVAL;
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		peer->keys[i] = arvif->wep_keys[i];
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	/* In some cases (notably with static WEP IBSS with multiple keys)
+	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
+	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
+	 * didn't seem help. Using def_keyid vdev parameter seems to be
+	 * effective so use that.
+	 *
+	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+	 */
+	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
+		return 0;
+
+	if (arvif->def_wep_key_idx == -1)
+		return 0;
+
+	ret = ath10k_wmi_vdev_set_param(arvif->ar,
+					arvif->vdev_id,
+					arvif->ar->wmi.vdev_param->def_keyid,
+					arvif->def_wep_key_idx);
+	if (ret) {
+		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+				  const u8 *addr)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ath10k_peer *peer;
+	int first_errno = 0;
+	int ret;
+	int i;
+	u32 flags = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!peer)
+		return -ENOENT;
+
+	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+		if (peer->keys[i] == NULL)
+			continue;
+
+		/* key flags are not required to delete the key */
+		ret = ath10k_install_key(arvif, peer->keys[i],
+					 DISABLE_KEY, addr, flags);
+		if (ret < 0 && first_errno == 0)
+			first_errno = ret;
+
+		if (ret < 0)
+			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
+				    i, ret);
+
+		spin_lock_bh(&ar->data_lock);
+		peer->keys[i] = NULL;
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	return first_errno;
+}
+
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+				    u8 keyidx)
+{
+	struct ath10k_peer *peer;
+	int i;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	/* We don't know which vdev this peer belongs to,
+	 * since WMI doesn't give us that information.
+	 *
+	 * FIXME: multi-bss needs to be handled.
+	 */
+	peer = ath10k_peer_find(ar, 0, addr);
+	if (!peer)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
+			return true;
+	}
+
+	return false;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+				 struct ieee80211_key_conf *key)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ath10k_peer *peer;
+	u8 addr[ETH_ALEN];
+	int first_errno = 0;
+	int ret;
+	int i;
+	u32 flags = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	for (;;) {
+		/* since ath10k_install_key we can't hold data_lock all the
+		 * time, so we try to remove the keys incrementally */
+		spin_lock_bh(&ar->data_lock);
+		i = 0;
+		list_for_each_entry(peer, &ar->peers, list) {
+			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+				if (peer->keys[i] == key) {
+					ether_addr_copy(addr, peer->addr);
+					peer->keys[i] = NULL;
+					break;
+				}
+			}
+
+			if (i < ARRAY_SIZE(peer->keys))
+				break;
+		}
+		spin_unlock_bh(&ar->data_lock);
+
+		if (i == ARRAY_SIZE(peer->keys))
+			break;
+		/* key flags are not required to delete the key */
+		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
+		if (ret < 0 && first_errno == 0)
+			first_errno = ret;
+
+		if (ret)
+			ath10k_warn(ar, "failed to remove key for %pM: %d\n",
+				    addr, ret);
+	}
+
+	return first_errno;
+}
+
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+					 struct ieee80211_key_conf *key)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ath10k_peer *peer;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(peer, &ar->peers, list) {
+		if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+			continue;
+
+		if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+			continue;
+
+		if (peer->keys[key->keyidx] == key)
+			continue;
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+			   arvif->vdev_id, key->keyidx);
+
+		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+		if (ret) {
+			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+				    arvif->vdev_id, peer->addr, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+	enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+	switch (chandef->chan->band) {
+	case IEEE80211_BAND_2GHZ:
+		switch (chandef->width) {
+		case NL80211_CHAN_WIDTH_20_NOHT:
+			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
+				phymode = MODE_11B;
+			else
+				phymode = MODE_11G;
+			break;
+		case NL80211_CHAN_WIDTH_20:
+			phymode = MODE_11NG_HT20;
+			break;
+		case NL80211_CHAN_WIDTH_40:
+			phymode = MODE_11NG_HT40;
+			break;
+		case NL80211_CHAN_WIDTH_5:
+		case NL80211_CHAN_WIDTH_10:
+		case NL80211_CHAN_WIDTH_80:
+		case NL80211_CHAN_WIDTH_80P80:
+		case NL80211_CHAN_WIDTH_160:
+			phymode = MODE_UNKNOWN;
+			break;
+		}
+		break;
+	case IEEE80211_BAND_5GHZ:
+		switch (chandef->width) {
+		case NL80211_CHAN_WIDTH_20_NOHT:
+			phymode = MODE_11A;
+			break;
+		case NL80211_CHAN_WIDTH_20:
+			phymode = MODE_11NA_HT20;
+			break;
+		case NL80211_CHAN_WIDTH_40:
+			phymode = MODE_11NA_HT40;
+			break;
+		case NL80211_CHAN_WIDTH_80:
+			phymode = MODE_11AC_VHT80;
+			break;
+		case NL80211_CHAN_WIDTH_5:
+		case NL80211_CHAN_WIDTH_10:
+		case NL80211_CHAN_WIDTH_80P80:
+		case NL80211_CHAN_WIDTH_160:
+			phymode = MODE_UNKNOWN;
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	WARN_ON(phymode == MODE_UNKNOWN);
+	return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ *   0 for no restriction
+ *   1 for 1/4 us
+ *   2 for 1/2 us
+ *   3 for 1 us
+ *   4 for 2 us
+ *   5 for 4 us
+ *   6 for 8 us
+ *   7 for 16 us
+ */
+	switch (mpdudensity) {
+	case 0:
+		return 0;
+	case 1:
+	case 2:
+	case 3:
+	/* Our lower layer calculations limit our precision to
+	   1 microsecond */
+		return 1;
+	case 4:
+		return 2;
+	case 5:
+		return 4;
+	case 6:
+		return 8;
+	case 7:
+		return 16;
+	default:
+		return 0;
+	}
+}
+
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+			struct cfg80211_chan_def *def)
+{
+	struct ieee80211_chanctx_conf *conf;
+
+	rcu_read_lock();
+	conf = rcu_dereference(vif->chanctx_conf);
+	if (!conf) {
+		rcu_read_unlock();
+		return -ENOENT;
+	}
+
+	*def = conf->def;
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+					 struct ieee80211_chanctx_conf *conf,
+					 void *data)
+{
+	int *num = data;
+
+	(*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+	int num = 0;
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_mac_num_chanctxs_iter,
+					    &num);
+
+	return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+				struct ieee80211_chanctx_conf *conf,
+				void *data)
+{
+	struct cfg80211_chan_def **def = data;
+
+	*def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+			      enum wmi_peer_type peer_type)
+{
+	struct ath10k_vif *arvif;
+	int num_peers = 0;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	num_peers = ar->num_peers;
+
+	/* Each vdev consumes a peer entry as well */
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		num_peers++;
+
+	if (num_peers >= ar->max_num_peers)
+		return -ENOBUFS;
+
+	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
+	if (ret) {
+		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
+			    addr, vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
+			    addr, vdev_id, ret);
+		return ret;
+	}
+
+	ar->num_peers++;
+
+	return 0;
+}
+
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 param;
+	int ret;
+
+	param = ar->wmi.pdev_param->sta_kickout_th;
+	ret = ath10k_wmi_pdev_set_param(ar, param,
+					ATH10K_KICKOUT_THRESHOLD);
+	if (ret) {
+		ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+					ATH10K_KEEPALIVE_MIN_IDLE);
+	if (ret) {
+		ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+					ATH10K_KEEPALIVE_MAX_IDLE);
+	if (ret) {
+		ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+	if (ret) {
+		ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+
+	vdev_param = ar->wmi.vdev_param->rts_threshold;
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
+	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
+	ar->num_peers--;
+
+	return 0;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+	struct ath10k_peer *peer, *tmp;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+		if (peer->vdev_id != vdev_id)
+			continue;
+
+		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
+			    peer->addr, vdev_id);
+
+		list_del(&peer->list);
+		kfree(peer);
+		ar->num_peers--;
+	}
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+	struct ath10k_peer *peer, *tmp;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+		list_del(&peer->list);
+		kfree(peer);
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+	ar->num_peers = 0;
+	ar->num_stations = 0;
+}
+
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+				       struct ieee80211_sta *sta,
+				       enum wmi_tdls_peer_state state)
+{
+	int ret;
+	struct wmi_tdls_peer_update_cmd_arg arg = {};
+	struct wmi_tdls_peer_capab_arg cap = {};
+	struct wmi_channel_arg chan_arg = {};
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	arg.vdev_id = vdev_id;
+	arg.peer_state = state;
+	ether_addr_copy(arg.addr, sta->addr);
+
+	cap.peer_max_sp = sta->max_sp;
+	cap.peer_uapsd_queues = sta->uapsd_queues;
+
+	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+	    !sta->tdls_initiator)
+		cap.is_peer_responder = 1;
+
+	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+			    arg.addr, vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (!arvif->beacon)
+		return;
+
+	if (!arvif->beacon_buf)
+		dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
+				 arvif->beacon->len, DMA_TO_DEVICE);
+
+	if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
+		    arvif->beacon_state != ATH10K_BEACON_SENT))
+		return;
+
+	dev_kfree_skb_any(arvif->beacon);
+
+	arvif->beacon = NULL;
+	arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+}
+
+static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	ath10k_mac_vif_beacon_free(arvif);
+
+	if (arvif->beacon_buf) {
+		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+				  arvif->beacon_buf, arvif->beacon_paddr);
+		arvif->beacon_buf = NULL;
+	}
+}
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+	unsigned long time_left;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+		return -ESHUTDOWN;
+
+	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+	if (time_left == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
+{
+	struct cfg80211_chan_def *chandef = NULL;
+	struct ieee80211_channel *channel = NULL;
+	struct wmi_vdev_start_request_arg arg = {};
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_mac_get_any_chandef_iter,
+					    &chandef);
+	if (WARN_ON_ONCE(!chandef))
+		return -ENOENT;
+
+	channel = chandef->chan;
+
+	arg.vdev_id = vdev_id;
+	arg.channel.freq = channel->center_freq;
+	arg.channel.band_center_freq1 = chandef->center_freq1;
+
+	/* TODO setup this dynamically, what in case we
+	   don't have any vifs? */
+	arg.channel.mode = chan_to_phymode(chandef);
+	arg.channel.chan_radar =
+			!!(channel->flags & IEEE80211_CHAN_RADAR);
+
+	arg.channel.min_power = 0;
+	arg.channel.max_power = channel->max_power * 2;
+	arg.channel.max_reg_power = channel->max_reg_power * 2;
+	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+
+	reinit_completion(&ar->vdev_setup_done);
+
+	ret = ath10k_wmi_vdev_start(ar, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
+			    vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_vdev_setup_sync(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
+			    vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+	if (ret) {
+		ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
+			    vdev_id, ret);
+		goto vdev_stop;
+	}
+
+	ar->monitor_vdev_id = vdev_id;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+		   ar->monitor_vdev_id);
+	return 0;
+
+vdev_stop:
+	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+	if (ret)
+		ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
+			    ar->monitor_vdev_id, ret);
+
+	return ret;
+}
+
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
+{
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+	if (ret)
+		ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
+			    ar->monitor_vdev_id, ret);
+
+	reinit_completion(&ar->vdev_setup_done);
+
+	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+	if (ret)
+		ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
+			    ar->monitor_vdev_id, ret);
+
+	ret = ath10k_vdev_setup_sync(ar);
+	if (ret)
+		ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
+			    ar->monitor_vdev_id, ret);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+		   ar->monitor_vdev_id);
+	return ret;
+}
+
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
+{
+	int bit, ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (ar->free_vdev_map == 0) {
+		ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
+		return -ENOMEM;
+	}
+
+	bit = __ffs64(ar->free_vdev_map);
+
+	ar->monitor_vdev_id = bit;
+
+	ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+				     WMI_VDEV_TYPE_MONITOR,
+				     0, ar->mac_addr);
+	if (ret) {
+		ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
+			    ar->monitor_vdev_id, ret);
+		return ret;
+	}
+
+	ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
+		   ar->monitor_vdev_id);
+
+	return 0;
+}
+
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
+{
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
+			    ar->monitor_vdev_id, ret);
+		return ret;
+	}
+
+	ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
+		   ar->monitor_vdev_id);
+	return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_monitor_vdev_create(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
+		ath10k_monitor_vdev_delete(ar);
+		return ret;
+	}
+
+	ar->monitor_started = true;
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
+
+	return 0;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_monitor_vdev_stop(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_monitor_vdev_delete(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
+		return ret;
+	}
+
+	ar->monitor_started = false;
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
+
+	return 0;
+}
+
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+	int num_ctx;
+
+	/* At least one chanctx is required to derive a channel to start
+	 * monitor vdev on.
+	 */
+	num_ctx = ath10k_mac_num_chanctxs(ar);
+	if (num_ctx == 0)
+		return false;
+
+	/* If there's already an existing special monitor interface then don't
+	 * bother creating another monitor vdev.
+	 */
+	if (ar->monitor_arvif)
+		return false;
+
+	return ar->monitor ||
+	       ar->filter_flags & FIF_OTHER_BSS ||
+	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+	int num_ctx;
+
+	num_ctx = ath10k_mac_num_chanctxs(ar);
+
+	/* FIXME: Current interface combinations and cfg80211/mac80211 code
+	 * shouldn't allow this but make sure to prevent handling the following
+	 * case anyway since multi-channel DFS hasn't been tested at all.
+	 */
+	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+		return false;
+
+	return true;
+}
+
+static int ath10k_monitor_recalc(struct ath10k *ar)
+{
+	bool needed;
+	bool allowed;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	needed = ath10k_mac_monitor_vdev_is_needed(ar);
+	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
+		   ar->monitor_started, needed, allowed);
+
+	if (WARN_ON(needed && !allowed)) {
+		if (ar->monitor_started) {
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+			ret = ath10k_monitor_stop(ar);
+			if (ret)
+				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
+					    ret);
+				/* not serious */
+		}
+
+		return -EPERM;
+	}
+
+	if (needed == ar->monitor_started)
+		return 0;
+
+	if (needed)
+		return ath10k_monitor_start(ar);
+	else
+		return ath10k_monitor_stop(ar);
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param, rts_cts = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+	if (arvif->num_legacy_stations > 0)
+		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+			      WMI_RTSCTS_PROFILE);
+	else
+		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+			      WMI_RTSCTS_PROFILE);
+
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+					 rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+	ret = ath10k_monitor_recalc(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
+		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+		   ar->monitor_vdev_id);
+
+	return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* CAC is not running - do nothing */
+	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+		return 0;
+
+	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+	ath10k_monitor_stop(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
+
+	return 0;
+}
+
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+				      struct ieee80211_chanctx_conf *conf,
+				      void *data)
+{
+	bool *ret = data;
+
+	if (!*ret && conf->radar_enabled)
+		*ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+	bool has_radar = false;
+
+	ieee80211_iter_chan_contexts_atomic(ar->hw,
+					    ath10k_mac_has_radar_iter,
+					    &has_radar);
+
+	return has_radar;
+}
+
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_stop_cac(ar);
+
+	if (!ath10k_mac_has_radar_enabled(ar))
+		return;
+
+	if (ar->num_started_vdevs > 0)
+		return;
+
+	ret = ath10k_start_cac(ar);
+	if (ret) {
+		/*
+		 * Not possible to start CAC on current channel so starting
+		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
+		 * by indicating that radar was detected.
+		 */
+		ath10k_warn(ar, "failed to start CAC: %d\n", ret);
+		ieee80211_radar_detected(ar->hw);
+	}
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->vdev_setup_done);
+
+	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_vdev_setup_sync(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	WARN_ON(ar->num_started_vdevs == 0);
+
+	if (ar->num_started_vdevs != 0) {
+		ar->num_started_vdevs--;
+		ath10k_recalc_radar_detection(ar);
+	}
+
+	return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+				     const struct cfg80211_chan_def *chandef,
+				     bool restart)
+{
+	struct ath10k *ar = arvif->ar;
+	struct wmi_vdev_start_request_arg arg = {};
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->vdev_setup_done);
+
+	arg.vdev_id = arvif->vdev_id;
+	arg.dtim_period = arvif->dtim_period;
+	arg.bcn_intval = arvif->beacon_interval;
+
+	arg.channel.freq = chandef->chan->center_freq;
+	arg.channel.band_center_freq1 = chandef->center_freq1;
+	arg.channel.mode = chan_to_phymode(chandef);
+
+	arg.channel.min_power = 0;
+	arg.channel.max_power = chandef->chan->max_power * 2;
+	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		arg.ssid = arvif->u.ap.ssid;
+		arg.ssid_len = arvif->u.ap.ssid_len;
+		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+		/* For now allow DFS for AP mode */
+		arg.channel.chan_radar =
+			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+		arg.ssid = arvif->vif->bss_conf.ssid;
+		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac vdev %d start center_freq %d phymode %s\n",
+		   arg.vdev_id, arg.channel.freq,
+		   ath10k_wmi_phymode_str(arg.channel.mode));
+
+	if (restart)
+		ret = ath10k_wmi_vdev_restart(ar, &arg);
+	else
+		ret = ath10k_wmi_vdev_start(ar, &arg);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
+			    arg.vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_vdev_setup_sync(ar);
+	if (ret) {
+		ath10k_warn(ar,
+			    "failed to synchronize setup for vdev %i restart %d: %d\n",
+			    arg.vdev_id, restart, ret);
+		return ret;
+	}
+
+	ar->num_started_vdevs++;
+	ath10k_recalc_radar_detection(ar);
+
+	return ret;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+			     const struct cfg80211_chan_def *def)
+{
+	return ath10k_vdev_start_restart(arvif, def, false);
+}
+
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+			       const struct cfg80211_chan_def *def)
+{
+	return ath10k_vdev_start_restart(arvif, def, true);
+}
+
+static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
+				       struct sk_buff *bcn)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_mgmt *mgmt;
+	const u8 *p2p_ie;
+	int ret;
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return 0;
+
+	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+		return 0;
+
+	mgmt = (void *)bcn->data;
+	p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+					 mgmt->u.beacon.variable,
+					 bcn->len - (mgmt->u.beacon.variable -
+						     bcn->data));
+	if (!p2p_ie)
+		return -ENOENT;
+
+	ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+				       u8 oui_type, size_t ie_offset)
+{
+	size_t len;
+	const u8 *next;
+	const u8 *end;
+	u8 *ie;
+
+	if (WARN_ON(skb->len < ie_offset))
+		return -EINVAL;
+
+	ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+					   skb->data + ie_offset,
+					   skb->len - ie_offset);
+	if (!ie)
+		return -ENOENT;
+
+	len = ie[1] + 2;
+	end = skb->data + skb->len;
+	next = ie + len;
+
+	if (WARN_ON(next > end))
+		return -EINVAL;
+
+	memmove(ie, next, end - next);
+	skb_trim(skb, skb->len - len);
+
+	return 0;
+}
+
+static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_vif *vif = arvif->vif;
+	struct ieee80211_mutable_offsets offs = {};
+	struct sk_buff *bcn;
+	int ret;
+
+	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+		return 0;
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+		return 0;
+
+	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+	if (!bcn) {
+		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
+		return -EPERM;
+	}
+
+	ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
+	if (ret) {
+		ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
+		kfree_skb(bcn);
+		return ret;
+	}
+
+	/* P2P IE is inserted by firmware automatically (as configured above)
+	 * so remove it from the base beacon template to avoid duplicate P2P
+	 * IEs in beacon frames.
+	 */
+	ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+				    offsetof(struct ieee80211_mgmt,
+					     u.beacon.variable));
+
+	ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
+				  0, NULL, 0);
+	kfree_skb(bcn);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to submit beacon template command: %d\n",
+			    ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_vif *vif = arvif->vif;
+	struct sk_buff *prb;
+	int ret;
+
+	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+		return 0;
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return 0;
+
+	prb = ieee80211_proberesp_get(hw, vif);
+	if (!prb) {
+		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
+		return -EPERM;
+	}
+
+	ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
+	kfree_skb(prb);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
+			    ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct cfg80211_chan_def def;
+	int ret;
+
+	/* When originally vdev is started during assign_vif_chanctx() some
+	 * information is missing, notably SSID. Firmware revisions with beacon
+	 * offloading require the SSID to be provided during vdev (re)start to
+	 * handle hidden SSID properly.
+	 *
+	 * Vdev restart must be done after vdev has been both started and
+	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
+	 * deliver vdev restart response event causing timeouts during vdev
+	 * syncing in ath10k.
+	 *
+	 * Note: The vdev down/up and template reinstallation could be skipped
+	 * since only wmi-tlv firmware are known to have beacon offload and
+	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+	 * response delivery. It's probably more robust to keep it as is.
+	 */
+	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+		return 0;
+
+	if (WARN_ON(!arvif->is_started))
+		return -EINVAL;
+
+	if (WARN_ON(!arvif->is_up))
+		return -EINVAL;
+
+	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+		return -EINVAL;
+
+	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+	 * firmware will crash upon vdev up.
+	 */
+
+	ret = ath10k_mac_setup_bcn_tmpl(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_mac_setup_prb_tmpl(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_vdev_restart(arvif, &def);
+	if (ret) {
+		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+				 arvif->bssid);
+	if (ret) {
+		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+				     struct ieee80211_bss_conf *info)
+{
+	struct ath10k *ar = arvif->ar;
+	int ret = 0;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (!info->enable_beacon) {
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret)
+			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+				    arvif->vdev_id, ret);
+
+		arvif->is_up = false;
+
+		spin_lock_bh(&arvif->ar->data_lock);
+		ath10k_mac_vif_beacon_free(arvif);
+		spin_unlock_bh(&arvif->ar->data_lock);
+
+		return;
+	}
+
+	arvif->tx_seq_no = 0x1000;
+
+	arvif->aid = 0;
+	ether_addr_copy(arvif->bssid, info->bssid);
+
+	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+				 arvif->bssid);
+	if (ret) {
+		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
+			    arvif->vdev_id, ret);
+		return;
+	}
+
+	arvif->is_up = true;
+
+	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+			    arvif->vdev_id, ret);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+				struct ieee80211_bss_conf *info,
+				const u8 self_peer[ETH_ALEN])
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+	int ret = 0;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (!info->ibss_joined) {
+		if (is_zero_ether_addr(arvif->bssid))
+			return;
+
+		eth_zero_addr(arvif->bssid);
+
+		return;
+	}
+
+	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
+					ATH10K_DEFAULT_ATIM);
+	if (ret)
+		ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
+			    arvif->vdev_id, ret);
+}
+
+static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 param;
+	u32 value;
+	int ret;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (arvif->u.sta.uapsd)
+		value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
+	else
+		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+
+	param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
+			    value, arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 param;
+	u32 value;
+	int ret;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (arvif->u.sta.uapsd)
+		value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
+	else
+		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+
+	param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+					  param, value);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
+			    value, arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+	int num = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		if (arvif->is_started)
+			num++;
+
+	return num;
+}
+
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_vif *vif = arvif->vif;
+	struct ieee80211_conf *conf = &ar->hw->conf;
+	enum wmi_sta_powersave_param param;
+	enum wmi_sta_ps_mode psmode;
+	int ret;
+	int ps_timeout;
+	bool enable_ps;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (arvif->vif->type != NL80211_IFTYPE_STATION)
+		return 0;
+
+	enable_ps = arvif->ps;
+
+	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
+	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+		      ar->fw_features)) {
+		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+			    arvif->vdev_id);
+		enable_ps = false;
+	}
+
+	if (!arvif->is_started) {
+		/* mac80211 can update vif powersave state while disconnected.
+		 * Firmware doesn't behave nicely and consumes more power than
+		 * necessary if PS is disabled on a non-started vdev. Hence
+		 * force-enable PS for non-running vdevs.
+		 */
+		psmode = WMI_STA_PS_MODE_ENABLED;
+	} else if (enable_ps) {
+		psmode = WMI_STA_PS_MODE_ENABLED;
+		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+		ps_timeout = conf->dynamic_ps_timeout;
+		if (ps_timeout == 0) {
+			/* Firmware doesn't like 0 */
+			ps_timeout = ieee80211_tu_to_usec(
+				vif->bss_conf.beacon_int) / 1000;
+		}
+
+		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+						  ps_timeout);
+		if (ret) {
+			ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	} else {
+		psmode = WMI_STA_PS_MODE_DISABLED;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+		   arvif->vdev_id, psmode ? "enable" : "disable");
+
+	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+	if (ret) {
+		ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
+			    psmode, arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct wmi_sta_keepalive_arg arg = {};
+	int ret;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+		return 0;
+
+	if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
+		return 0;
+
+	/* Some firmware revisions have a bug and ignore the `enabled` field.
+	 * Instead use the interval to disable the keepalive.
+	 */
+	arg.vdev_id = arvif->vdev_id;
+	arg.enabled = 1;
+	arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
+	arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
+
+	ret = ath10k_wmi_sta_keepalive(ar, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_vif *vif = arvif->vif;
+	int ret;
+
+	lockdep_assert_held(&arvif->ar->conf_mutex);
+
+	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+		return;
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+		return;
+
+	if (!vif->csa_active)
+		return;
+
+	if (!arvif->is_up)
+		return;
+
+	if (!ieee80211_csa_is_complete(vif)) {
+		ieee80211_csa_update_counter(vif);
+
+		ret = ath10k_mac_setup_bcn_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_mac_setup_prb_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+				    ret);
+	} else {
+		ieee80211_csa_finish(vif);
+	}
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+						ap_csa_work);
+	struct ath10k *ar = arvif->ar;
+
+	mutex_lock(&ar->conf_mutex);
+	ath10k_mac_vif_ap_csa_count_down(arvif);
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+					  struct ieee80211_vif *vif)
+{
+	struct sk_buff *skb = data;
+	struct ieee80211_mgmt *mgmt = (void *)skb->data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+		return;
+
+	cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_mac_handle_beacon_iter,
+						   skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+					       struct ieee80211_vif *vif)
+{
+	u32 *vdev_id = data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k *ar = arvif->ar;
+	struct ieee80211_hw *hw = ar->hw;
+
+	if (arvif->vdev_id != *vdev_id)
+		return;
+
+	if (!arvif->is_up)
+		return;
+
+	ieee80211_beacon_loss(vif);
+
+	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
+	 * (done by mac80211) succeeds but beacons do not resume then it
+	 * doesn't make sense to continue operation. Queue connection loss work
+	 * which can be cancelled when beacon is received.
+	 */
+	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+				     ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_mac_handle_beacon_miss_iter,
+						   &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+						connection_loss_work.work);
+	struct ieee80211_vif *vif = arvif->vif;
+
+	if (!arvif->is_up)
+		return;
+
+	ieee80211_connection_loss(vif);
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
+					     struct ieee80211_vif *vif)
+{
+	/* Some firmware revisions have unstable STA powersave when listen
+	 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
+	 * generate NullFunc frames properly even if buffered frames have been
+	 * indicated in Beacon TIM. Firmware would seldom wake up to pull
+	 * buffered frames. Often pinging the device from AP would simply fail.
+	 *
+	 * As a workaround set it to 1.
+	 */
+	if (vif->type == NL80211_IFTYPE_STATION)
+		return 1;
+
+	return ar->hw->conf.listen_interval;
+}
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_sta *sta,
+				      struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	u32 aid;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (vif->type == NL80211_IFTYPE_STATION)
+		aid = vif->bss_conf.aid;
+	else
+		aid = sta->aid;
+
+	ether_addr_copy(arg->addr, sta->addr);
+	arg->vdev_id = arvif->vdev_id;
+	arg->peer_aid = aid;
+	arg->peer_flags |= WMI_PEER_AUTH;
+	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
+	arg->peer_num_spatial_streams = 1;
+	arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+				       struct ieee80211_vif *vif,
+				       struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct ieee80211_bss_conf *info = &vif->bss_conf;
+	struct cfg80211_chan_def def;
+	struct cfg80211_bss *bss;
+	const u8 *rsnie = NULL;
+	const u8 *wpaie = NULL;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+	if (bss) {
+		const struct cfg80211_bss_ies *ies;
+
+		rcu_read_lock();
+		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+		ies = rcu_dereference(bss->ies);
+
+		wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+						WLAN_OUI_TYPE_MICROSOFT_WPA,
+						ies->data,
+						ies->len);
+		rcu_read_unlock();
+		cfg80211_put_bss(ar->hw->wiphy, bss);
+	}
+
+	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
+	if (rsnie || wpaie) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+		arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+	}
+
+	if (wpaie) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+		arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+	}
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_sta *sta,
+				      struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+	struct cfg80211_chan_def def;
+	const struct ieee80211_supported_band *sband;
+	const struct ieee80211_rate *rates;
+	enum ieee80211_band band;
+	u32 ratemask;
+	u8 rate;
+	int i;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	band = def.chan->band;
+	sband = ar->hw->wiphy->bands[band];
+	ratemask = sta->supp_rates[band];
+	ratemask &= arvif->bitrate_mask.control[band].legacy;
+	rates = sband->bitrates;
+
+	rateset->num_rates = 0;
+
+	for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+		if (!(ratemask & 1))
+			continue;
+
+		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+		rateset->rates[rateset->num_rates] = rate;
+		rateset->num_rates++;
+	}
+}
+
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+	int nss;
+
+	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+		if (ht_mcs_mask[nss])
+			return false;
+
+	return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+	int nss;
+
+	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+		if (vht_mcs_mask[nss])
+			return false;
+
+	return true;
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta,
+				   struct wmi_peer_assoc_complete_arg *arg)
+{
+	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
+	int i, n;
+	u8 max_nss;
+	u32 stbc;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	if (!ht_cap->ht_supported)
+		return;
+
+	band = def.chan->band;
+	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+		return;
+
+	arg->peer_flags |= WMI_PEER_HT;
+	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+				    ht_cap->ampdu_factor)) - 1;
+
+	arg->peer_mpdu_density =
+		ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+	arg->peer_ht_caps = ht_cap->cap;
+	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+		arg->peer_flags |= WMI_PEER_LDPC;
+
+	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+		arg->peer_flags |= WMI_PEER_40MHZ;
+		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+	}
+
+	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+	}
+
+	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+		arg->peer_flags |= WMI_PEER_STBC;
+	}
+
+	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+		stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+		arg->peer_rate_caps |= stbc;
+		arg->peer_flags |= WMI_PEER_STBC;
+	}
+
+	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+		arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+	else if (ht_cap->mcs.rx_mask[1])
+		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+			max_nss = (i / 8) + 1;
+			arg->peer_ht_rates.rates[n++] = i;
+		}
+
+	/*
+	 * This is a workaround for HT-enabled STAs which break the spec
+	 * and have no HT capabilities RX mask (no HT RX MCS map).
+	 *
+	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+	 *
+	 * Firmware asserts if such situation occurs.
+	 */
+	if (n == 0) {
+		arg->peer_ht_rates.num_rates = 8;
+		for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+			arg->peer_ht_rates.rates[i] = i;
+	} else {
+		arg->peer_ht_rates.num_rates = n;
+		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+		   arg->addr,
+		   arg->peer_ht_rates.num_rates,
+		   arg->peer_num_spatial_streams);
+}
+
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+				    struct ath10k_vif *arvif,
+				    struct ieee80211_sta *sta)
+{
+	u32 uapsd = 0;
+	u32 max_sp = 0;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (sta->wme && sta->uapsd_queues) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+			   sta->uapsd_queues, sta->max_sp);
+
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+			uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+				 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+			uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+				 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+			uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+				 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+			max_sp = sta->max_sp;
+
+		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+						 sta->addr,
+						 WMI_AP_PS_PEER_PARAM_UAPSD,
+						 uapsd);
+		if (ret) {
+			ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+
+		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+						 sta->addr,
+						 WMI_AP_PS_PEER_PARAM_MAX_SP,
+						 max_sp);
+		if (ret) {
+			ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+
+		/* TODO setup this based on STA listen interval and
+		   beacon interval. Currently we don't know
+		   sta->listen_interval - mac80211 patch required.
+		   Currently use 10 seconds */
+		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+						 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+						 10);
+		if (ret) {
+			ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+	int idx_limit;
+	int nss;
+	u16 mcs_map;
+	u16 mcs;
+
+	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+			  vht_mcs_limit[nss];
+
+		if (mcs_map)
+			idx_limit = fls(mcs_map) - 1;
+		else
+			idx_limit = -1;
+
+		switch (idx_limit) {
+		case 0: /* fall through */
+		case 1: /* fall through */
+		case 2: /* fall through */
+		case 3: /* fall through */
+		case 4: /* fall through */
+		case 5: /* fall through */
+		case 6: /* fall through */
+		default:
+			/* see ath10k_mac_can_set_bitrate_mask() */
+			WARN_ON(1);
+			/* fall through */
+		case -1:
+			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+			break;
+		case 7:
+			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+			break;
+		case 8:
+			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+			break;
+		case 9:
+			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+			break;
+		}
+
+		tx_mcs_set &= ~(0x3 << (nss * 2));
+		tx_mcs_set |= mcs << (nss * 2);
+	}
+
+	return tx_mcs_set;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_sta *sta,
+				    struct wmi_peer_assoc_complete_arg *arg)
+{
+	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u16 *vht_mcs_mask;
+	u8 ampdu_factor;
+
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	if (!vht_cap->vht_supported)
+		return;
+
+	band = def.chan->band;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+		return;
+
+	arg->peer_flags |= WMI_PEER_VHT;
+
+	if (def.chan->band == IEEE80211_BAND_2GHZ)
+		arg->peer_flags |= WMI_PEER_VHT_2G;
+
+	arg->peer_vht_caps = vht_cap->cap;
+
+	ampdu_factor = (vht_cap->cap &
+			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+	 * zero in VHT IE. Using it would result in degraded throughput.
+	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+	 * it if VHT max_mpdu is smaller. */
+	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+					ampdu_factor)) - 1);
+
+	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+		arg->peer_flags |= WMI_PEER_80MHZ;
+
+	arg->peer_vht_rates.rx_max_rate =
+		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+	arg->peer_vht_rates.rx_mcs_set =
+		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+	arg->peer_vht_rates.tx_max_rate =
+		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_sta *sta,
+				    struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	switch (arvif->vdev_type) {
+	case WMI_VDEV_TYPE_AP:
+		if (sta->wme)
+			arg->peer_flags |= WMI_PEER_QOS;
+
+		if (sta->wme && sta->uapsd_queues) {
+			arg->peer_flags |= WMI_PEER_APSD;
+			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+		}
+		break;
+	case WMI_VDEV_TYPE_STA:
+		if (vif->bss_conf.qos)
+			arg->peer_flags |= WMI_PEER_QOS;
+		break;
+	case WMI_VDEV_TYPE_IBSS:
+		if (sta->wme)
+			arg->peer_flags |= WMI_PEER_QOS;
+		break;
+	default:
+		break;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
+		   sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
+}
+
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+	return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+					struct ieee80211_vif *vif,
+					struct ieee80211_sta *sta,
+					struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
+	enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+		return;
+
+	band = def.chan->band;
+	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	switch (band) {
+	case IEEE80211_BAND_2GHZ:
+		if (sta->vht_cap.vht_supported &&
+		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+				phymode = MODE_11AC_VHT40;
+			else
+				phymode = MODE_11AC_VHT20;
+		} else if (sta->ht_cap.ht_supported &&
+			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+				phymode = MODE_11NG_HT40;
+			else
+				phymode = MODE_11NG_HT20;
+		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
+			phymode = MODE_11G;
+		} else {
+			phymode = MODE_11B;
+		}
+
+		break;
+	case IEEE80211_BAND_5GHZ:
+		/*
+		 * Check VHT first.
+		 */
+		if (sta->vht_cap.vht_supported &&
+		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+				phymode = MODE_11AC_VHT80;
+			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+				phymode = MODE_11AC_VHT40;
+			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+				phymode = MODE_11AC_VHT20;
+		} else if (sta->ht_cap.ht_supported &&
+			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+				phymode = MODE_11NA_HT40;
+			else
+				phymode = MODE_11NA_HT20;
+		} else {
+			phymode = MODE_11A;
+		}
+
+		break;
+	default:
+		break;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+		   sta->addr, ath10k_wmi_phymode_str(phymode));
+
+	arg->peer_phymode = phymode;
+	WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+				     struct ieee80211_vif *vif,
+				     struct ieee80211_sta *sta,
+				     struct wmi_peer_assoc_complete_arg *arg)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+
+	memset(arg, 0, sizeof(*arg));
+
+	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_crypto(ar, vif, arg);
+	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
+	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
+
+	return 0;
+}
+
+static const u32 ath10k_smps_map[] = {
+	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+				  const u8 *addr,
+				  const struct ieee80211_sta_ht_cap *ht_cap)
+{
+	int smps;
+
+	if (!ht_cap->ht_supported)
+		return 0;
+
+	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+	if (smps >= ARRAY_SIZE(ath10k_smps_map))
+		return -EINVAL;
+
+	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+					 WMI_PEER_SMPS_STATE,
+					 ath10k_smps_map[smps]);
+}
+
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_sta_vht_cap vht_cap)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int ret;
+	u32 param;
+	u32 value;
+
+	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+		return 0;
+
+	if (!(ar->vht_cap_info &
+	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+	       IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+	       IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+		return 0;
+
+	param = ar->wmi.vdev_param->txbf;
+	value = 0;
+
+	if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+		return 0;
+
+	/* The following logic is correct. If a remote STA advertises support
+	 * for being a beamformer then we should enable us being a beamformee.
+	 */
+
+	if (ar->vht_cap_info &
+	    (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+	     IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+	}
+
+	if (ar->vht_cap_info &
+	    (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+	     IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+	}
+
+	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+			    value, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_bss_conf *bss_conf)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ieee80211_sta_ht_cap ht_cap;
+	struct ieee80211_sta_vht_cap vht_cap;
+	struct wmi_peer_assoc_complete_arg peer_arg;
+	struct ieee80211_sta *ap_sta;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+		   arvif->vdev_id, arvif->bssid, arvif->aid);
+
+	rcu_read_lock();
+
+	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+	if (!ap_sta) {
+		ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
+			    bss_conf->bssid, arvif->vdev_id);
+		rcu_read_unlock();
+		return;
+	}
+
+	/* ap_sta must be accessed only within rcu section which must be left
+	 * before calling ath10k_setup_peer_smps() which might sleep. */
+	ht_cap = ap_sta->ht_cap;
+	vht_cap = ap_sta->vht_cap;
+
+	ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
+			    bss_conf->bssid, arvif->vdev_id, ret);
+		rcu_read_unlock();
+		return;
+	}
+
+	rcu_read_unlock();
+
+	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
+			    bss_conf->bssid, arvif->vdev_id, ret);
+		return;
+	}
+
+	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+	if (ret) {
+		ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return;
+	}
+
+	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+	if (ret) {
+		ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+			    arvif->vdev_id, bss_conf->bssid, ret);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac vdev %d up (associated) bssid %pM aid %d\n",
+		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
+	WARN_ON(arvif->is_up);
+
+	arvif->aid = bss_conf->aid;
+	ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+	if (ret) {
+		ath10k_warn(ar, "failed to set vdev %d up: %d\n",
+			    arvif->vdev_id, ret);
+		return;
+	}
+
+	arvif->is_up = true;
+
+	/* Workaround: Some firmware revisions (tested with qca6174
+	 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+	 * poked with peer param command.
+	 */
+	ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+					WMI_PEER_DUMMY_VAR, 1);
+	if (ret) {
+		ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+			    arvif->bssid, arvif->vdev_id, ret);
+		return;
+	}
+}
+
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ieee80211_sta_vht_cap vht_cap = {};
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+		   arvif->vdev_id, arvif->bssid);
+
+	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+	if (ret)
+		ath10k_warn(ar, "faield to down vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+
+	arvif->def_wep_key_idx = -1;
+
+	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+	if (ret) {
+		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return;
+	}
+
+	arvif->is_up = false;
+
+	cancel_delayed_work_sync(&arvif->connection_loss_work);
+}
+
+static int ath10k_station_assoc(struct ath10k *ar,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta,
+				bool reassoc)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct wmi_peer_assoc_complete_arg peer_arg;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
+			    sta->addr, arvif->vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+			    sta->addr, arvif->vdev_id, ret);
+		return ret;
+	}
+
+	/* Re-assoc is run only to update supported rates for given station. It
+	 * doesn't make much sense to reconfigure the peer completely.
+	 */
+	if (!reassoc) {
+		ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
+					     &sta->ht_cap);
+		if (ret) {
+			ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+
+		ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+		if (ret) {
+			ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
+				    sta->addr, arvif->vdev_id, ret);
+			return ret;
+		}
+
+		if (!sta->wme) {
+			arvif->num_legacy_stations++;
+			ret  = ath10k_recalc_rtscts_prot(arvif);
+			if (ret) {
+				ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+					    arvif->vdev_id, ret);
+				return ret;
+			}
+		}
+
+		/* Plumb cached keys only for static WEP */
+		if (arvif->def_wep_key_idx != -1) {
+			ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+			if (ret) {
+				ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+					    arvif->vdev_id, ret);
+				return ret;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!sta->wme) {
+		arvif->num_legacy_stations--;
+		ret = ath10k_recalc_rtscts_prot(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	ret = ath10k_clear_peer_keys(arvif, sta->addr);
+	if (ret) {
+		ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_supported_band **bands;
+	enum ieee80211_band band;
+	struct ieee80211_channel *channel;
+	struct wmi_scan_chan_list_arg arg = {0};
+	struct wmi_channel_arg *ch;
+	bool passive;
+	int len;
+	int ret;
+	int i;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	bands = hw->wiphy->bands;
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		if (!bands[band])
+			continue;
+
+		for (i = 0; i < bands[band]->n_channels; i++) {
+			if (bands[band]->channels[i].flags &
+			    IEEE80211_CHAN_DISABLED)
+				continue;
+
+			arg.n_channels++;
+		}
+	}
+
+	len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+	arg.channels = kzalloc(len, GFP_KERNEL);
+	if (!arg.channels)
+		return -ENOMEM;
+
+	ch = arg.channels;
+	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		if (!bands[band])
+			continue;
+
+		for (i = 0; i < bands[band]->n_channels; i++) {
+			channel = &bands[band]->channels[i];
+
+			if (channel->flags & IEEE80211_CHAN_DISABLED)
+				continue;
+
+			ch->allow_ht   = true;
+
+			/* FIXME: when should we really allow VHT? */
+			ch->allow_vht = true;
+
+			ch->allow_ibss =
+				!(channel->flags & IEEE80211_CHAN_NO_IR);
+
+			ch->ht40plus =
+				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+			ch->chan_radar =
+				!!(channel->flags & IEEE80211_CHAN_RADAR);
+
+			passive = channel->flags & IEEE80211_CHAN_NO_IR;
+			ch->passive = passive;
+
+			ch->freq = channel->center_freq;
+			ch->band_center_freq1 = channel->center_freq;
+			ch->min_power = 0;
+			ch->max_power = channel->max_power * 2;
+			ch->max_reg_power = channel->max_reg_power * 2;
+			ch->max_antenna_gain = channel->max_antenna_gain * 2;
+			ch->reg_class_id = 0; /* FIXME */
+
+			/* FIXME: why use only legacy modes, why not any
+			 * HT/VHT modes? Would that even make any
+			 * difference? */
+			if (channel->band == IEEE80211_BAND_2GHZ)
+				ch->mode = MODE_11G;
+			else
+				ch->mode = MODE_11A;
+
+			if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+				continue;
+
+			ath10k_dbg(ar, ATH10K_DBG_WMI,
+				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+				    ch - arg.channels, arg.n_channels,
+				   ch->freq, ch->max_power, ch->max_reg_power,
+				   ch->max_antenna_gain, ch->mode);
+
+			ch++;
+		}
+	}
+
+	ret = ath10k_wmi_scan_chan_list(ar, &arg);
+	kfree(arg.channels);
+
+	return ret;
+}
+
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+	switch (dfs_region) {
+	case NL80211_DFS_UNSET:
+		return WMI_UNINIT_DFS_DOMAIN;
+	case NL80211_DFS_FCC:
+		return WMI_FCC_DFS_DOMAIN;
+	case NL80211_DFS_ETSI:
+		return WMI_ETSI_DFS_DOMAIN;
+	case NL80211_DFS_JP:
+		return WMI_MKK4_DFS_DOMAIN;
+	}
+	return WMI_UNINIT_DFS_DOMAIN;
+}
+
+static void ath10k_regd_update(struct ath10k *ar)
+{
+	struct reg_dmn_pair_mapping *regpair;
+	int ret;
+	enum wmi_dfs_region wmi_dfs_reg;
+	enum nl80211_dfs_regions nl_dfs_reg;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_update_channel_list(ar);
+	if (ret)
+		ath10k_warn(ar, "failed to update channel list: %d\n", ret);
+
+	regpair = ar->ath_common.regulatory.regpair;
+
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+		nl_dfs_reg = ar->dfs_detector->region;
+		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+	} else {
+		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+	}
+
+	/* Target allows setting up per-band regdomain but ath_common provides
+	 * a combined one only */
+	ret = ath10k_wmi_pdev_set_regdomain(ar,
+					    regpair->reg_domain,
+					    regpair->reg_domain, /* 2ghz */
+					    regpair->reg_domain, /* 5ghz */
+					    regpair->reg_2ghz_ctl,
+					    regpair->reg_5ghz_ctl,
+					    wmi_dfs_reg);
+	if (ret)
+		ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+				struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct ath10k *ar = hw->priv;
+	bool result;
+
+	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+			   request->dfs_region);
+		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+							  request->dfs_region);
+		if (!result)
+			ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
+				    request->dfs_region);
+	}
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state == ATH10K_STATE_ON)
+		ath10k_regd_update(ar);
+	mutex_unlock(&ar->conf_mutex);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
+{
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+	ar->tx_paused |= BIT(reason);
+	ieee80211_stop_queues(ar->hw);
+}
+
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+				      struct ieee80211_vif *vif)
+{
+	struct ath10k *ar = data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	if (arvif->tx_paused)
+		return;
+
+	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+	ar->tx_paused &= ~BIT(reason);
+
+	if (ar->tx_paused)
+		return;
+
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_RESUME_ALL,
+						   ath10k_mac_tx_unlock_iter,
+						   ar);
+
+	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= BITS_PER_LONG);
+	arvif->tx_paused |= BIT(reason);
+	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	WARN_ON(reason >= BITS_PER_LONG);
+	arvif->tx_paused &= ~BIT(reason);
+
+	if (ar->tx_paused)
+		return;
+
+	if (arvif->tx_paused)
+		return;
+
+	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+					   enum wmi_tlv_tx_pause_id pause_id,
+					   enum wmi_tlv_tx_pause_action action)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	switch (action) {
+	case WMI_TLV_TX_PAUSE_ACTION_STOP:
+		ath10k_mac_vif_tx_lock(arvif, pause_id);
+		break;
+	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+		ath10k_mac_vif_tx_unlock(arvif, pause_id);
+		break;
+	default:
+		ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+			    action, arvif->vdev_id);
+		break;
+	}
+}
+
+struct ath10k_mac_tx_pause {
+	u32 vdev_id;
+	enum wmi_tlv_tx_pause_id pause_id;
+	enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_mac_tx_pause *arg = data;
+
+	if (arvif->vdev_id != arg->vdev_id)
+		return;
+
+	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+				     enum wmi_tlv_tx_pause_id pause_id,
+				     enum wmi_tlv_tx_pause_action action)
+{
+	struct ath10k_mac_tx_pause arg = {
+		.vdev_id = vdev_id,
+		.pause_id = pause_id,
+		.action = action,
+	};
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_RESUME_ALL,
+						   ath10k_mac_handle_tx_pause_iter,
+						   &arg);
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		return HTT_DATA_TX_EXT_TID_MGMT;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+	if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+	return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
+{
+	if (vif)
+		return ath10k_vif_to_arvif(vif)->vdev_id;
+
+	if (ar->monitor_started)
+		return ar->monitor_vdev_id;
+
+	ath10k_warn(ar, "failed to resolve vdev id\n");
+	return 0;
+}
+
+static enum ath10k_hw_txrx_mode
+ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+	const struct ieee80211_hdr *hdr = (void *)skb->data;
+	__le16 fc = hdr->frame_control;
+
+	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+		return ATH10K_HW_TXRX_RAW;
+
+	if (ieee80211_is_mgmt(fc))
+		return ATH10K_HW_TXRX_MGMT;
+
+	/* Workaround:
+	 *
+	 * NullFunc frames are mostly used to ping if a client or AP are still
+	 * reachable and responsive. This implies tx status reports must be
+	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+	 * come to a conclusion that the other end disappeared and tear down
+	 * BSS connection or it can never disconnect from BSS/client (which is
+	 * the case).
+	 *
+	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
+	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+	 * which seems to deliver correct tx reports for NullFunc frames. The
+	 * downside of using it is it ignores client powersave state so it can
+	 * end up disconnecting sleeping clients in AP mode. It should fix STA
+	 * mode though because AP don't sleep.
+	 */
+	if (ar->htt.target_version_major < 3 &&
+	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+		return ATH10K_HW_TXRX_MGMT;
+
+	/* Workaround:
+	 *
+	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
+	 * to work with Ethernet txmode so use it.
+	 *
+	 * FIXME: Check if raw mode works with TDLS.
+	 */
+	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+		return ATH10K_HW_TXRX_ETHERNET;
+
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		return ATH10K_HW_TXRX_RAW;
+
+	return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+				     struct sk_buff *skb) {
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+			 IEEE80211_TX_CTL_INJECTED;
+	if ((info->flags & mask) == mask)
+		return false;
+	if (vif)
+		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+	return true;
+}
+
+/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
+ * Control in the header.
+ */
+static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+	u8 *qos_ctl;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return;
+
+	qos_ctl = ieee80211_get_qos_ctl(hdr);
+	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+		skb->data, (void *)qos_ctl - (void *)skb->data);
+	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+	/* Some firmware revisions don't handle sending QoS NullFunc well.
+	 * These frames are mainly used for CQM purposes so it doesn't really
+	 * matter whether QoS NullFunc or NullFunc are sent.
+	 */
+	hdr = (void *)skb->data;
+	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+		cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr;
+	struct rfc1042_hdr *rfc1042;
+	struct ethhdr *eth;
+	size_t hdrlen;
+	u8 da[ETH_ALEN];
+	u8 sa[ETH_ALEN];
+	__be16 type;
+
+	hdr = (void *)skb->data;
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	rfc1042 = (void *)skb->data + hdrlen;
+
+	ether_addr_copy(da, ieee80211_get_DA(hdr));
+	ether_addr_copy(sa, ieee80211_get_SA(hdr));
+	type = rfc1042->snap_type;
+
+	skb_pull(skb, hdrlen + sizeof(*rfc1042));
+	skb_push(skb, sizeof(*eth));
+
+	eth = (void *)skb->data;
+	ether_addr_copy(eth->h_dest, da);
+	ether_addr_copy(eth->h_source, sa);
+	eth->h_proto = type;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
+				       struct ieee80211_vif *vif,
+				       struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	/* This is case only for P2P_GO */
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+		return;
+
+	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+		spin_lock_bh(&ar->data_lock);
+		if (arvif->u.ap.noa_data)
+			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+					      GFP_ATOMIC))
+				memcpy(skb_put(skb, arvif->u.ap.noa_len),
+				       arvif->u.ap.noa_data,
+				       arvif->u.ap.noa_len);
+		spin_unlock_bh(&ar->data_lock);
+	}
+}
+
+static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
+{
+	/* FIXME: Not really sure since when the behaviour changed. At some
+	 * point new firmware stopped requiring creation of peer entries for
+	 * offchannel tx (and actually creating them causes issues with wmi-htc
+	 * tx credit replenishment and reliability). Assuming it's at least 3.4
+	 * because that's when the `freq` was introduced to TX_FRM HTT command.
+	 */
+	return !(ar->htt.target_version_major >= 3 &&
+		 ar->htt.target_version_minor >= 4);
+}
+
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+	int ret = 0;
+
+	spin_lock_bh(&ar->data_lock);
+
+	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+		ret = -ENOSPC;
+		goto unlock;
+	}
+
+	__skb_queue_tail(q, skb);
+	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	return ret;
+}
+
+static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+	struct ath10k_htt *htt = &ar->htt;
+	int ret = 0;
+
+	switch (cb->txmode) {
+	case ATH10K_HW_TXRX_RAW:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+	case ATH10K_HW_TXRX_ETHERNET:
+		ret = ath10k_htt_tx(htt, skb);
+		break;
+	case ATH10K_HW_TXRX_MGMT:
+		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+			     ar->fw_features))
+			ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+		else if (ar->htt.target_version_major >= 3)
+			ret = ath10k_htt_tx(htt, skb);
+		else
+			ret = ath10k_htt_mgmt_tx(htt, skb);
+		break;
+	}
+
+	if (ret) {
+		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
+			    ret);
+		ieee80211_free_txskb(ar->hw, skb);
+	}
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	for (;;) {
+		skb = skb_dequeue(&ar->offchan_tx_queue);
+		if (!skb)
+			break;
+
+		ieee80211_free_txskb(ar->hw, skb);
+	}
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+	struct ath10k_peer *peer;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *skb;
+	const u8 *peer_addr;
+	int vdev_id;
+	int ret;
+	unsigned long time_left;
+	bool tmp_peer_created = false;
+
+	/* FW requirement: We must create a peer before FW will send out
+	 * an offchannel frame. Otherwise the frame will be stuck and
+	 * never transmitted. We delete the peer upon tx completion.
+	 * It is unlikely that a peer for offchannel tx will already be
+	 * present. However it may be in some rare cases so account for that.
+	 * Otherwise we might remove a legitimate peer and break stuff. */
+
+	for (;;) {
+		skb = skb_dequeue(&ar->offchan_tx_queue);
+		if (!skb)
+			break;
+
+		mutex_lock(&ar->conf_mutex);
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+			   skb);
+
+		hdr = (struct ieee80211_hdr *)skb->data;
+		peer_addr = ieee80211_get_DA(hdr);
+		vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
+
+		spin_lock_bh(&ar->data_lock);
+		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+		spin_unlock_bh(&ar->data_lock);
+
+		if (peer)
+			/* FIXME: should this use ath10k_warn()? */
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+				   peer_addr, vdev_id);
+
+		if (!peer) {
+			ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+						 WMI_PEER_TYPE_DEFAULT);
+			if (ret)
+				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
+					    peer_addr, vdev_id, ret);
+			tmp_peer_created = (ret == 0);
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		reinit_completion(&ar->offchan_tx_completed);
+		ar->offchan_tx_skb = skb;
+		spin_unlock_bh(&ar->data_lock);
+
+		ath10k_mac_tx(ar, skb);
+
+		time_left =
+		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+		if (time_left == 0)
+			ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+				    skb);
+
+		if (!peer && tmp_peer_created) {
+			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+			if (ret)
+				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
+					    peer_addr, vdev_id, ret);
+		}
+
+		mutex_unlock(&ar->conf_mutex);
+	}
+}
+
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	for (;;) {
+		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+		if (!skb)
+			break;
+
+		ieee80211_free_txskb(ar->hw, skb);
+	}
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+	struct sk_buff *skb;
+	int ret;
+
+	for (;;) {
+		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+		if (!skb)
+			break;
+
+		ret = ath10k_wmi_mgmt_tx(ar, skb);
+		if (ret) {
+			ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
+				    ret);
+			ieee80211_free_txskb(ar->hw, skb);
+		}
+	}
+}
+
+/************/
+/* Scanning */
+/************/
+
+void __ath10k_scan_finish(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+		break;
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		if (!ar->scan.is_roc)
+			ieee80211_scan_completed(ar->hw,
+						 (ar->scan.state ==
+						  ATH10K_SCAN_ABORTING));
+		else if (ar->scan.roc_notify)
+			ieee80211_remain_on_channel_expired(ar->hw);
+		/* fall through */
+	case ATH10K_SCAN_STARTING:
+		ar->scan.state = ATH10K_SCAN_IDLE;
+		ar->scan_channel = NULL;
+		ath10k_offchan_tx_purge(ar);
+		cancel_delayed_work(&ar->scan.timeout);
+		complete_all(&ar->scan.completed);
+		break;
+	}
+}
+
+void ath10k_scan_finish(struct ath10k *ar)
+{
+	spin_lock_bh(&ar->data_lock);
+	__ath10k_scan_finish(ar);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_scan_stop(struct ath10k *ar)
+{
+	struct wmi_stop_scan_arg arg = {
+		.req_id = 1, /* FIXME */
+		.req_type = WMI_SCAN_STOP_ONE,
+		.u.scan_id = ATH10K_SCAN_ID,
+	};
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_wmi_stop_scan(ar, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
+		goto out;
+	}
+
+	ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+	if (ret == 0) {
+		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
+		ret = -ETIMEDOUT;
+	} else if (ret > 0) {
+		ret = 0;
+	}
+
+out:
+	/* Scan state should be updated upon scan completion but in case
+	 * firmware fails to deliver the event (for whatever reason) it is
+	 * desired to clean up scan state anyway. Firmware may have just
+	 * dropped the scan completion event delivery due to transport pipe
+	 * being overflown with data and/or it can recover on its own before
+	 * next scan request is submitted.
+	 */
+	spin_lock_bh(&ar->data_lock);
+	if (ar->scan.state != ATH10K_SCAN_IDLE)
+		__ath10k_scan_finish(ar);
+	spin_unlock_bh(&ar->data_lock);
+
+	return ret;
+}
+
+static void ath10k_scan_abort(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+		/* This can happen if timeout worker kicked in and called
+		 * abortion while scan completion was being processed.
+		 */
+		break;
+	case ATH10K_SCAN_STARTING:
+	case ATH10K_SCAN_ABORTING:
+		ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
+			    ath10k_scan_state_str(ar->scan.state),
+			    ar->scan.state);
+		break;
+	case ATH10K_SCAN_RUNNING:
+		ar->scan.state = ATH10K_SCAN_ABORTING;
+		spin_unlock_bh(&ar->data_lock);
+
+		ret = ath10k_scan_stop(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to abort scan: %d\n", ret);
+
+		spin_lock_bh(&ar->data_lock);
+		break;
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_scan_timeout_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k,
+					 scan.timeout.work);
+
+	mutex_lock(&ar->conf_mutex);
+	ath10k_scan_abort(ar);
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+			     const struct wmi_start_scan_arg *arg)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ret = ath10k_wmi_start_scan(ar, arg);
+	if (ret)
+		return ret;
+
+	ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+	if (ret == 0) {
+		ret = ath10k_scan_stop(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+		return -ETIMEDOUT;
+	}
+
+	/* If we failed to start the scan, return error code at
+	 * this point.  This is probably due to some issue in the
+	 * firmware, but no need to wedge the driver due to that...
+	 */
+	spin_lock_bh(&ar->data_lock);
+	if (ar->scan.state == ATH10K_SCAN_IDLE) {
+		spin_unlock_bh(&ar->data_lock);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+	return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_tx(struct ieee80211_hw *hw,
+		      struct ieee80211_tx_control *control,
+		      struct sk_buff *skb)
+{
+	struct ath10k *ar = hw->priv;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_vif *vif = info->control.vif;
+	struct ieee80211_sta *sta = control->sta;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	__le16 fc = hdr->frame_control;
+
+	/* We should disable CCK RATE due to P2P */
+	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+	ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+	ATH10K_SKB_CB(skb)->htt.freq = 0;
+	ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+	ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
+	ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+	ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
+	ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
+
+	switch (ATH10K_SKB_CB(skb)->txmode) {
+	case ATH10K_HW_TXRX_MGMT:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		ath10k_tx_h_nwifi(hw, skb);
+		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+		ath10k_tx_h_seq_no(vif, skb);
+		break;
+	case ATH10K_HW_TXRX_ETHERNET:
+		ath10k_tx_h_8023(skb);
+		break;
+	case ATH10K_HW_TXRX_RAW:
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			WARN_ON_ONCE(1);
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
+	}
+
+	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+		spin_lock_bh(&ar->data_lock);
+		ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
+		ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
+		spin_unlock_bh(&ar->data_lock);
+
+		if (ath10k_mac_need_offchan_tx_work(ar)) {
+			ATH10K_SKB_CB(skb)->htt.freq = 0;
+			ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+				   skb);
+
+			skb_queue_tail(&ar->offchan_tx_queue, skb);
+			ieee80211_queue_work(hw, &ar->offchan_tx_work);
+			return;
+		}
+	}
+
+	ath10k_mac_tx(ar, skb);
+}
+
+/* Must not be called with conf_mutex held as workers can use that also. */
+void ath10k_drain_tx(struct ath10k *ar)
+{
+	/* make sure rcu-protected mac80211 tx path itself is drained */
+	synchronize_net();
+
+	ath10k_offchan_tx_purge(ar);
+	ath10k_mgmt_over_wmi_tx_purge(ar);
+
+	cancel_work_sync(&ar->offchan_tx_work);
+	cancel_work_sync(&ar->wmi_mgmt_tx_work);
+}
+
+void ath10k_halt(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+	ar->filter_flags = 0;
+	ar->monitor = false;
+	ar->monitor_arvif = NULL;
+
+	if (ar->monitor_started)
+		ath10k_monitor_stop(ar);
+
+	ar->monitor_started = false;
+	ar->tx_paused = 0;
+
+	ath10k_scan_finish(ar);
+	ath10k_peer_cleanup_all(ar);
+	ath10k_core_stop(ar);
+	ath10k_hif_power_down(ar);
+
+	spin_lock_bh(&ar->data_lock);
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		ath10k_mac_vif_beacon_cleanup(arvif);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+	struct ath10k *ar = hw->priv;
+
+	mutex_lock(&ar->conf_mutex);
+
+	*tx_ant = ar->cfg_tx_chainmask;
+	*rx_ant = ar->cfg_rx_chainmask;
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+}
+
+static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+{
+	/* It is not clear that allowing gaps in chainmask
+	 * is helpful.  Probably it will not do what user
+	 * is hoping for, so warn in that case.
+	 */
+	if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
+		return;
+
+	ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x.  Suggested values: 15, 7, 3, 1 or 0.\n",
+		    dbg, cm);
+}
+
+static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
+{
+	int nsts = ar->vht_cap_info;
+
+	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+
+	/* If firmware does not deliver to host number of space-time
+	 * streams supported, assume it support up to 4 BF STS and return
+	 * the value for VHT CAP: nsts-1)
+	 */
+	if (nsts == 0)
+		return 3;
+
+	return nsts;
+}
+
+static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
+{
+	int sound_dim = ar->vht_cap_info;
+
+	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+	/* If the sounding dimension is not advertised by the firmware,
+	 * let's use a default value of 1
+	 */
+	if (sound_dim == 0)
+		return 1;
+
+	return sound_dim;
+}
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+	struct ieee80211_sta_vht_cap vht_cap = {0};
+	u16 mcs_map;
+	u32 val;
+	int i;
+
+	vht_cap.vht_supported = 1;
+	vht_cap.cap = ar->vht_cap_info;
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+		val = ath10k_mac_get_vht_cap_bf_sts(ar);
+		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+		vht_cap.cap |= val;
+	}
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+		vht_cap.cap |= val;
+	}
+
+	mcs_map = 0;
+	for (i = 0; i < 8; i++) {
+		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
+			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+		else
+			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+	}
+
+	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+	return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+	int i;
+	struct ieee80211_sta_ht_cap ht_cap = {0};
+
+	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+		return ht_cap;
+
+	ht_cap.ht_supported = 1;
+	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+	ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+		u32 smps;
+
+		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
+		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+		ht_cap.cap |= smps;
+	}
+
+	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+		u32 stbc;
+
+		stbc   = ar->ht_cap_info;
+		stbc  &= WMI_HT_CAP_RX_STBC;
+		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+		stbc  &= IEEE80211_HT_CAP_RX_STBC;
+
+		ht_cap.cap |= stbc;
+	}
+
+	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+	/* max AMSDU is implicitly taken from vht_cap_info */
+	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+	for (i = 0; i < ar->num_rf_chains; i++) {
+		if (ar->cfg_rx_chainmask & BIT(i))
+			ht_cap.mcs.rx_mask[i] = 0xFF;
+	}
+
+	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+	return ht_cap;
+}
+
+static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
+{
+	struct ieee80211_supported_band *band;
+	struct ieee80211_sta_vht_cap vht_cap;
+	struct ieee80211_sta_ht_cap ht_cap;
+
+	ht_cap = ath10k_get_ht_cap(ar);
+	vht_cap = ath10k_create_vht_cap(ar);
+
+	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+		band->ht_cap = ht_cap;
+
+		/* Enable the VHT support at 2.4 GHz */
+		band->vht_cap = vht_cap;
+	}
+	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+		band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+		band->ht_cap = ht_cap;
+		band->vht_cap = vht_cap;
+	}
+}
+
+static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_check_chain_mask(ar, tx_ant, "tx");
+	ath10k_check_chain_mask(ar, rx_ant, "rx");
+
+	ar->cfg_tx_chainmask = tx_ant;
+	ar->cfg_rx_chainmask = rx_ant;
+
+	if ((ar->state != ATH10K_STATE_ON) &&
+	    (ar->state != ATH10K_STATE_RESTARTED))
+		return 0;
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
+					tx_ant);
+	if (ret) {
+		ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
+			    ret, tx_ant);
+		return ret;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
+					rx_ant);
+	if (ret) {
+		ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
+			    ret, rx_ant);
+		return ret;
+	}
+
+	/* Reload HT/VHT capability */
+	ath10k_mac_setup_ht_vht_cap(ar);
+
+	return 0;
+}
+
+static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+	struct ath10k *ar = hw->priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+	u32 burst_enable;
+	int ret = 0;
+
+	/*
+	 * This makes sense only when restarting hw. It is harmless to call
+	 * uncoditionally. This is necessary to make sure no HTT/WMI tx
+	 * commands will be submitted while restarting.
+	 */
+	ath10k_drain_tx(ar);
+
+	mutex_lock(&ar->conf_mutex);
+
+	switch (ar->state) {
+	case ATH10K_STATE_OFF:
+		ar->state = ATH10K_STATE_ON;
+		break;
+	case ATH10K_STATE_RESTARTING:
+		ath10k_halt(ar);
+		ar->state = ATH10K_STATE_RESTARTED;
+		break;
+	case ATH10K_STATE_ON:
+	case ATH10K_STATE_RESTARTED:
+	case ATH10K_STATE_WEDGED:
+		WARN_ON(1);
+		ret = -EINVAL;
+		goto err;
+	case ATH10K_STATE_UTF:
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ret = ath10k_hif_power_up(ar);
+	if (ret) {
+		ath10k_err(ar, "Could not init hif: %d\n", ret);
+		goto err_off;
+	}
+
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	if (ret) {
+		ath10k_err(ar, "Could not init core: %d\n", ret);
+		goto err_power_down;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
+		goto err_core_stop;
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
+		goto err_core_stop;
+	}
+
+	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+		ret = ath10k_wmi_adaptive_qcs(ar, true);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+				    ret);
+			goto err_core_stop;
+		}
+	}
+
+	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+		burst_enable = ar->wmi.pdev_param->burst_enable;
+		ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+		if (ret) {
+			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+			goto err_core_stop;
+		}
+	}
+
+	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+	/*
+	 * By default FW set ARP frames ac to voice (6). In that case ARP
+	 * exchange is not working properly for UAPSD enabled AP. ARP requests
+	 * which arrives with access category 0 are processed by network stack
+	 * and send back with access category 0, but FW changes access category
+	 * to 6. Set ARP frames access category to best effort (0) solves
+	 * this problem.
+	 */
+
+	ret = ath10k_wmi_pdev_set_param(ar,
+					ar->wmi.pdev_param->arp_ac_override, 0);
+	if (ret) {
+		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
+			    ret);
+		goto err_core_stop;
+	}
+
+	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
+		     ar->fw_features)) {
+		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
+							  WMI_CCA_DETECT_LEVEL_AUTO,
+							  WMI_CCA_DETECT_MARGIN_AUTO);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
+				    ret);
+			goto err_core_stop;
+		}
+	}
+
+	ret = ath10k_wmi_pdev_set_param(ar,
+					ar->wmi.pdev_param->ani_enable, 1);
+	if (ret) {
+		ath10k_warn(ar, "failed to enable ani by default: %d\n",
+			    ret);
+		goto err_core_stop;
+	}
+
+	ar->ani_enabled = true;
+
+	ar->num_started_vdevs = 0;
+	ath10k_regd_update(ar);
+
+	ath10k_spectral_start(ar);
+	ath10k_thermal_set_throttling(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_core_stop:
+	ath10k_core_stop(ar);
+
+err_power_down:
+	ath10k_hif_power_down(ar);
+
+err_off:
+	ar->state = ATH10K_STATE_OFF;
+
+err:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+
+	ath10k_drain_tx(ar);
+
+	mutex_lock(&ar->conf_mutex);
+	if (ar->state != ATH10K_STATE_OFF) {
+		ath10k_halt(ar);
+		ar->state = ATH10K_STATE_OFF;
+	}
+	mutex_unlock(&ar->conf_mutex);
+
+	cancel_delayed_work_sync(&ar->scan.timeout);
+	cancel_work_sync(&ar->restart_work);
+}
+
+static int ath10k_config_ps(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ret = ath10k_mac_vif_setup_ps(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+	int ret;
+	u32 param;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
+
+	param = ar->wmi.pdev_param->txpower_limit2g;
+	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+	if (ret) {
+		ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+			    txpower, ret);
+		return ret;
+	}
+
+	param = ar->wmi.pdev_param->txpower_limit5g;
+	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+	if (ret) {
+		ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+			    txpower, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+	int ret, txpower = -1;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		WARN_ON(arvif->txpower < 0);
+
+		if (txpower == -1)
+			txpower = arvif->txpower;
+		else
+			txpower = min(txpower, arvif->txpower);
+	}
+
+	if (WARN_ON(txpower == -1))
+		return -EINVAL;
+
+	ret = ath10k_mac_txpower_setup(ar, txpower);
+	if (ret) {
+		ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+			    txpower, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct ath10k *ar = hw->priv;
+	struct ieee80211_conf *conf = &hw->conf;
+	int ret = 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_PS)
+		ath10k_config_ps(ar);
+
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
+		ret = ath10k_monitor_recalc(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static u32 get_nss_from_chainmask(u16 chain_mask)
+{
+	if ((chain_mask & 0xf) == 0xf)
+		return 4;
+	else if ((chain_mask & 0x7) == 0x7)
+		return 3;
+	else if ((chain_mask & 0x3) == 0x3)
+		return 2;
+	return 1;
+}
+
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+	u32 value = 0;
+	struct ath10k *ar = arvif->ar;
+	int nsts;
+	int sound_dim;
+
+	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+		return 0;
+
+	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+
+	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+
+	if (!value)
+		return 0;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					 ar->wmi.vdev_param->txbf, value);
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	enum wmi_sta_powersave_param param;
+	int ret = 0;
+	u32 value;
+	int bit;
+	int i;
+	u32 vdev_param;
+
+	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+	mutex_lock(&ar->conf_mutex);
+
+	memset(arvif, 0, sizeof(*arvif));
+
+	arvif->ar = ar;
+	arvif->vif = vif;
+
+	INIT_LIST_HEAD(&arvif->list);
+	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+	INIT_DELAYED_WORK(&arvif->connection_loss_work,
+			  ath10k_mac_vif_sta_connection_loss_work);
+
+	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+	}
+
+	if (ar->num_peers >= ar->max_num_peers) {
+		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+		ret = -ENOBUFS;
+		goto err;
+	}
+
+	if (ar->free_vdev_map == 0) {
+		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
+		ret = -EBUSY;
+		goto err;
+	}
+	bit = __ffs64(ar->free_vdev_map);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
+		   bit, ar->free_vdev_map);
+
+	arvif->vdev_id = bit;
+	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_P2P_DEVICE:
+		arvif->vdev_type = WMI_VDEV_TYPE_STA;
+		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+		break;
+	case NL80211_IFTYPE_UNSPECIFIED:
+	case NL80211_IFTYPE_STATION:
+		arvif->vdev_type = WMI_VDEV_TYPE_STA;
+		if (vif->p2p)
+			arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			ret = -EINVAL;
+			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
+			goto err;
+		}
+		arvif->vdev_type = WMI_VDEV_TYPE_AP;
+		break;
+	case NL80211_IFTYPE_AP:
+		arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+		if (vif->p2p)
+			arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	/* Using vdev_id as queue number will make it very easy to do per-vif
+	 * tx queue locking. This shouldn't wrap due to interface combinations
+	 * but do a modulo for correctness sake and prevent using offchannel tx
+	 * queues for regular vif tx.
+	 */
+	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
+	/* Some firmware revisions don't wait for beacon tx completion before
+	 * sending another SWBA event. This could lead to hardware using old
+	 * (freed) beacon data in some cases, e.g. tx credit starvation
+	 * combined with missed TBTT. This is very very rare.
+	 *
+	 * On non-IOMMU-enabled hosts this could be a possible security issue
+	 * because hw could beacon some random data on the air.  On
+	 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
+	 * device would crash.
+	 *
+	 * Since there are no beacon tx completions (implicit nor explicit)
+	 * propagated to host the only workaround for this is to allocate a
+	 * DMA-coherent buffer for a lifetime of a vif and use it for all
+	 * beacon tx commands. Worst case for this approach is some beacons may
+	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC ||
+	    vif->type == NL80211_IFTYPE_MESH_POINT ||
+	    vif->type == NL80211_IFTYPE_AP) {
+		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
+							IEEE80211_MAX_FRAME_LEN,
+							&arvif->beacon_paddr,
+							GFP_ATOMIC);
+		if (!arvif->beacon_buf) {
+			ret = -ENOMEM;
+			ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+				    ret);
+			goto err;
+		}
+	}
+	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+		arvif->nohwcrypt = true;
+
+	if (arvif->nohwcrypt &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+		goto err;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
+		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+		   arvif->beacon_buf ? "single-buf" : "per-skb");
+
+	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+				     arvif->vdev_subtype, vif->addr);
+	if (ret) {
+		ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		goto err;
+	}
+
+	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
+	list_add(&arvif->list, &ar->arvifs);
+
+	/* It makes no sense to have firmware do keepalives. mac80211 already
+	 * takes care of this with idle connection polling.
+	 */
+	ret = ath10k_mac_vif_disable_keepalive(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_vdev_delete;
+	}
+
+	arvif->def_wep_key_idx = -1;
+
+	vdev_param = ar->wmi.vdev_param->tx_encap_type;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+					ATH10K_HW_TXRX_NATIVE_WIFI);
+	/* 10.X firmware does not support this VDEV parameter. Do not warn */
+	if (ret && ret != -EOPNOTSUPP) {
+		ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_vdev_delete;
+	}
+
+	/* Configuring number of spatial stream for monitor interface is causing
+	 * target assert in qca9888 and qca6174.
+	 */
+	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
+		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+		vdev_param = ar->wmi.vdev_param->nss;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						nss);
+		if (ret) {
+			ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
+				    arvif->vdev_id, ar->cfg_tx_chainmask, nss,
+				    ret);
+			goto err_vdev_delete;
+		}
+	}
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
+					 WMI_PEER_TYPE_DEFAULT);
+		if (ret) {
+			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_vdev_delete;
+		}
+	}
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		ret = ath10k_mac_set_kickout(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_peer_delete;
+		}
+	}
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+		param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+						  param, value);
+		if (ret) {
+			ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_peer_delete;
+		}
+
+		ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_peer_delete;
+		}
+
+		ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_peer_delete;
+		}
+	}
+
+	ret = ath10k_mac_set_txbf_conf(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_peer_delete;
+	}
+
+	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+	if (ret) {
+		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_peer_delete;
+	}
+
+	arvif->txpower = vif->bss_conf.txpower;
+	ret = ath10k_mac_txpower_recalc(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+		goto err_peer_delete;
+	}
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		ar->monitor_arvif = arvif;
+		ret = ath10k_monitor_recalc(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+			goto err_peer_delete;
+		}
+	}
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	if (!ar->tx_paused)
+		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_peer_delete:
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
+		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+	ar->free_vdev_map |= 1LL << arvif->vdev_id;
+	list_del(&arvif->list);
+
+err:
+	if (arvif->beacon_buf) {
+		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+				  arvif->beacon_buf, arvif->beacon_paddr);
+		arvif->beacon_buf = NULL;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+	int i;
+
+	for (i = 0; i < BITS_PER_LONG; i++)
+		ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int ret;
+
+	cancel_work_sync(&arvif->ap_csa_work);
+	cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_vif_beacon_cleanup(arvif);
+	spin_unlock_bh(&ar->data_lock);
+
+	ret = ath10k_spectral_vif_stop(arvif);
+	if (ret)
+		ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+
+	ar->free_vdev_map |= 1LL << arvif->vdev_id;
+	list_del(&arvif->list);
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+					     vif->addr);
+		if (ret)
+			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+
+		kfree(arvif->u.ap.noa_data);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
+		   arvif->vdev_id);
+
+	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+	if (ret)
+		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+
+	/* Some firmware revisions don't notify host about self-peer removal
+	 * until after associated vdev is deleted.
+	 */
+	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+						   vif->addr);
+		if (ret)
+			ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+
+		spin_lock_bh(&ar->data_lock);
+		ar->num_peers--;
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	ath10k_peer_cleanup(ar, arvif->vdev_id);
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		ar->monitor_arvif = NULL;
+		ret = ath10k_monitor_recalc(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+	}
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	ath10k_mac_vif_tx_unlock_all(arvif);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS			\
+	(FIF_ALLMULTI |				\
+	FIF_CONTROL |				\
+	FIF_PSPOLL |				\
+	FIF_OTHER_BSS |				\
+	FIF_BCN_PRBRESP_PROMISC |		\
+	FIF_PROBE_REQ |				\
+	FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+				    unsigned int changed_flags,
+				    unsigned int *total_flags,
+				    u64 multicast)
+{
+	struct ath10k *ar = hw->priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	changed_flags &= SUPPORTED_FILTERS;
+	*total_flags &= SUPPORTED_FILTERS;
+	ar->filter_flags = *total_flags;
+
+	ret = ath10k_monitor_recalc(ar);
+	if (ret)
+		ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_bss_conf *info,
+				    u32 changed)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int ret = 0;
+	u32 vdev_param, pdev_param, slottime, preamble;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (changed & BSS_CHANGED_IBSS)
+		ath10k_control_ibss(arvif, info, vif->addr);
+
+	if (changed & BSS_CHANGED_BEACON_INT) {
+		arvif->beacon_interval = info->beacon_int;
+		vdev_param = ar->wmi.vdev_param->beacon_interval;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						arvif->beacon_interval);
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac vdev %d beacon_interval %d\n",
+			   arvif->vdev_id, arvif->beacon_interval);
+
+		if (ret)
+			ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
+				    arvif->vdev_id, ret);
+	}
+
+	if (changed & BSS_CHANGED_BEACON) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "vdev %d set beacon tx mode to staggered\n",
+			   arvif->vdev_id);
+
+		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
+						WMI_BEACON_STAGGERED_MODE);
+		if (ret)
+			ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
+				    arvif->vdev_id, ret);
+
+		ret = ath10k_mac_setup_bcn_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update beacon template: %d\n",
+				    ret);
+
+		if (ieee80211_vif_is_mesh(vif)) {
+			/* mesh doesn't use SSID but firmware needs it */
+			strncpy(arvif->u.ap.ssid, "mesh",
+				sizeof(arvif->u.ap.ssid));
+			arvif->u.ap.ssid_len = 4;
+		}
+	}
+
+	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+		ret = ath10k_mac_setup_prb_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+	}
+
+	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+		arvif->dtim_period = info->dtim_period;
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac vdev %d dtim_period %d\n",
+			   arvif->vdev_id, arvif->dtim_period);
+
+		vdev_param = ar->wmi.vdev_param->dtim_period;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						arvif->dtim_period);
+		if (ret)
+			ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
+				    arvif->vdev_id, ret);
+	}
+
+	if (changed & BSS_CHANGED_SSID &&
+	    vif->type == NL80211_IFTYPE_AP) {
+		arvif->u.ap.ssid_len = info->ssid_len;
+		if (info->ssid_len)
+			memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+		arvif->u.ap.hidden_ssid = info->hidden_ssid;
+	}
+
+	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+		ether_addr_copy(arvif->bssid, info->bssid);
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED)
+		ath10k_control_beaconing(arvif, info);
+
+	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+		arvif->use_cts_prot = info->use_cts_prot;
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+			   arvif->vdev_id, info->use_cts_prot);
+
+		ret = ath10k_recalc_rtscts_prot(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+
+		vdev_param = ar->wmi.vdev_param->protection_mode;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						info->use_cts_prot ? 1 : 0);
+		if (ret)
+			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
+				    info->use_cts_prot, arvif->vdev_id, ret);
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		if (info->use_short_slot)
+			slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+		else
+			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+			   arvif->vdev_id, slottime);
+
+		vdev_param = ar->wmi.vdev_param->slot_time;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						slottime);
+		if (ret)
+			ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
+				    arvif->vdev_id, ret);
+	}
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+		if (info->use_short_preamble)
+			preamble = WMI_VDEV_PREAMBLE_SHORT;
+		else
+			preamble = WMI_VDEV_PREAMBLE_LONG;
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac vdev %d preamble %dn",
+			   arvif->vdev_id, preamble);
+
+		vdev_param = ar->wmi.vdev_param->preamble;
+		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+						preamble);
+		if (ret)
+			ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
+				    arvif->vdev_id, ret);
+	}
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		if (info->assoc) {
+			/* Workaround: Make sure monitor vdev is not running
+			 * when associating to prevent some firmware revisions
+			 * (e.g. 10.1 and 10.2) from crashing.
+			 */
+			if (ar->monitor_started)
+				ath10k_monitor_stop(ar);
+			ath10k_bss_assoc(hw, vif, info);
+			ath10k_monitor_recalc(ar);
+		} else {
+			ath10k_bss_disassoc(hw, vif);
+		}
+	}
+
+	if (changed & BSS_CHANGED_TXPOWER) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+			   arvif->vdev_id, info->txpower);
+
+		arvif->txpower = info->txpower;
+		ret = ath10k_mac_txpower_recalc(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+	}
+
+	if (changed & BSS_CHANGED_PS) {
+		arvif->ps = vif->bss_conf.ps;
+
+		ret = ath10k_config_ps(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+			  struct ieee80211_vif *vif,
+			  struct ieee80211_scan_request *hw_req)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_scan_request *req = &hw_req->req;
+	struct wmi_start_scan_arg arg;
+	int ret = 0;
+	int i;
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+		reinit_completion(&ar->scan.started);
+		reinit_completion(&ar->scan.completed);
+		ar->scan.state = ATH10K_SCAN_STARTING;
+		ar->scan.is_roc = false;
+		ar->scan.vdev_id = arvif->vdev_id;
+		ret = 0;
+		break;
+	case ATH10K_SCAN_STARTING:
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		ret = -EBUSY;
+		break;
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+	if (ret)
+		goto exit;
+
+	memset(&arg, 0, sizeof(arg));
+	ath10k_wmi_start_scan_init(ar, &arg);
+	arg.vdev_id = arvif->vdev_id;
+	arg.scan_id = ATH10K_SCAN_ID;
+
+	if (req->ie_len) {
+		arg.ie_len = req->ie_len;
+		memcpy(arg.ie, req->ie, arg.ie_len);
+	}
+
+	if (req->n_ssids) {
+		arg.n_ssids = req->n_ssids;
+		for (i = 0; i < arg.n_ssids; i++) {
+			arg.ssids[i].len  = req->ssids[i].ssid_len;
+			arg.ssids[i].ssid = req->ssids[i].ssid;
+		}
+	} else {
+		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+	}
+
+	if (req->n_channels) {
+		arg.n_channels = req->n_channels;
+		for (i = 0; i < arg.n_channels; i++)
+			arg.channels[i] = req->channels[i]->center_freq;
+	}
+
+	ret = ath10k_start_scan(ar, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
+		spin_lock_bh(&ar->data_lock);
+		ar->scan.state = ATH10K_SCAN_IDLE;
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	/* Add a 200ms margin to account for event/command processing */
+	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+				     msecs_to_jiffies(arg.max_scan_time +
+						      200));
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif)
+{
+	struct ath10k *ar = hw->priv;
+
+	mutex_lock(&ar->conf_mutex);
+	ath10k_scan_abort(ar);
+	mutex_unlock(&ar->conf_mutex);
+
+	cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+					struct ath10k_vif *arvif,
+					enum set_key_cmd cmd,
+					struct ieee80211_key_conf *key)
+{
+	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+	int ret;
+
+	/* 10.1 firmware branch requires default key index to be set to group
+	 * key index after installing it. Otherwise FW/HW Txes corrupted
+	 * frames with multi-vif APs. This is not required for main firmware
+	 * branch (e.g. 636).
+	 *
+	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+	 *
+	 * FIXME: It remains unknown if this is required for multi-vif STA
+	 * interfaces on 10.1.
+	 */
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+		return;
+
+	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+		return;
+
+	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+		return;
+
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+		return;
+
+	if (cmd != SET_KEY)
+		return;
+
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+					key->keyidx);
+	if (ret)
+		ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
+			    arvif->vdev_id, ret);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+			  struct ieee80211_key_conf *key)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_peer *peer;
+	const u8 *peer_addr;
+	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
+	int ret = 0;
+	int ret2;
+	u32 flags = 0;
+	u32 flags2;
+
+	/* this one needs to be done in software */
+	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+		return 1;
+
+	if (arvif->nohwcrypt)
+		return 1;
+
+	if (key->keyidx > WMI_MAX_KEY_INDEX)
+		return -ENOSPC;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (sta)
+		peer_addr = sta->addr;
+	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+		peer_addr = vif->bss_conf.bssid;
+	else
+		peer_addr = vif->addr;
+
+	key->hw_key_idx = key->keyidx;
+
+	if (is_wep) {
+		if (cmd == SET_KEY)
+			arvif->wep_keys[key->keyidx] = key;
+		else
+			arvif->wep_keys[key->keyidx] = NULL;
+	}
+
+	/* the peer should not disappear in mid-way (unless FW goes awry) since
+	 * we already hold conf_mutex. we just make sure its there now. */
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!peer) {
+		if (cmd == SET_KEY) {
+			ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
+				    peer_addr);
+			ret = -EOPNOTSUPP;
+			goto exit;
+		} else {
+			/* if the peer doesn't exist there is no key to disable
+			 * anymore */
+			goto exit;
+		}
+	}
+
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+		flags |= WMI_KEY_PAIRWISE;
+	else
+		flags |= WMI_KEY_GROUP;
+
+	if (is_wep) {
+		if (cmd == DISABLE_KEY)
+			ath10k_clear_vdev_key(arvif, key);
+
+		/* When WEP keys are uploaded it's possible that there are
+		 * stations associated already (e.g. when merging) without any
+		 * keys. Static WEP needs an explicit per-peer key upload.
+		 */
+		if (vif->type == NL80211_IFTYPE_ADHOC &&
+		    cmd == SET_KEY)
+			ath10k_mac_vif_update_wep_key(arvif, key);
+
+		/* 802.1x never sets the def_wep_key_idx so each set_key()
+		 * call changes default tx key.
+		 *
+		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+		 * after first set_key().
+		 */
+		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+			flags |= WMI_KEY_TX_USAGE;
+	}
+
+	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
+	if (ret) {
+		WARN_ON(ret > 0);
+		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
+			    arvif->vdev_id, peer_addr, ret);
+		goto exit;
+	}
+
+	/* mac80211 sets static WEP keys as groupwise while firmware requires
+	 * them to be installed twice as both pairwise and groupwise.
+	 */
+	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+		flags2 = flags;
+		flags2 &= ~WMI_KEY_GROUP;
+		flags2 |= WMI_KEY_PAIRWISE;
+
+		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+		if (ret) {
+			WARN_ON(ret > 0);
+			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+				    arvif->vdev_id, peer_addr, ret);
+			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+						  peer_addr, flags);
+			if (ret2) {
+				WARN_ON(ret2 > 0);
+				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+					    arvif->vdev_id, peer_addr, ret2);
+			}
+			goto exit;
+		}
+	}
+
+	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+	if (peer && cmd == SET_KEY)
+		peer->keys[key->keyidx] = key;
+	else if (peer && cmd == DISABLE_KEY)
+		peer->keys[key->keyidx] = NULL;
+	else if (peer == NULL)
+		/* impossible unless FW goes crazy */
+		ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
+	spin_unlock_bh(&ar->data_lock);
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif,
+					   int keyidx)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int ret;
+
+	mutex_lock(&arvif->ar->conf_mutex);
+
+	if (arvif->ar->state != ATH10K_STATE_ON)
+		goto unlock;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+		   arvif->vdev_id, keyidx);
+
+	ret = ath10k_wmi_vdev_set_param(arvif->ar,
+					arvif->vdev_id,
+					arvif->ar->wmi.vdev_param->def_keyid,
+					keyidx);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
+			    arvif->vdev_id,
+			    ret);
+		goto unlock;
+	}
+
+	arvif->def_wep_key_idx = keyidx;
+
+unlock:
+	mutex_unlock(&arvif->ar->conf_mutex);
+}
+
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+	struct ath10k *ar;
+	struct ath10k_vif *arvif;
+	struct ath10k_sta *arsta;
+	struct ieee80211_sta *sta;
+	struct cfg80211_chan_def def;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
+	u32 changed, bw, nss, smps;
+	int err;
+
+	arsta = container_of(wk, struct ath10k_sta, update_wk);
+	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+	arvif = arsta->arvif;
+	ar = arvif->ar;
+
+	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+		return;
+
+	band = def.chan->band;
+	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	changed = arsta->changed;
+	arsta->changed = 0;
+
+	bw = arsta->bw;
+	nss = arsta->nss;
+	smps = arsta->smps;
+
+	spin_unlock_bh(&ar->data_lock);
+
+	mutex_lock(&ar->conf_mutex);
+
+	nss = max_t(u32, 1, nss);
+	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+	if (changed & IEEE80211_RC_BW_CHANGED) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+			   sta->addr, bw);
+
+		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+						WMI_PEER_CHAN_WIDTH, bw);
+		if (err)
+			ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
+				    sta->addr, bw, err);
+	}
+
+	if (changed & IEEE80211_RC_NSS_CHANGED) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+			   sta->addr, nss);
+
+		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+						WMI_PEER_NSS, nss);
+		if (err)
+			ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
+				    sta->addr, nss, err);
+	}
+
+	if (changed & IEEE80211_RC_SMPS_CHANGED) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+			   sta->addr, smps);
+
+		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+						WMI_PEER_SMPS_STATE, smps);
+		if (err)
+			ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
+				    sta->addr, smps, err);
+	}
+
+	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+	    changed & IEEE80211_RC_NSS_CHANGED) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+			   sta->addr);
+
+		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+		if (err)
+			ath10k_warn(ar, "failed to reassociate station: %pM\n",
+				    sta->addr);
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+				       struct ieee80211_sta *sta)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+		return 0;
+
+	if (ar->num_stations >= ar->max_num_stations)
+		return -ENOBUFS;
+
+	ar->num_stations++;
+
+	return 0;
+}
+
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+					struct ieee80211_sta *sta)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+		return;
+
+	ar->num_stations--;
+}
+
+struct ath10k_mac_tdls_iter_data {
+	u32 num_tdls_stations;
+	struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+						    struct ieee80211_sta *sta)
+{
+	struct ath10k_mac_tdls_iter_data *iter_data = data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+	if (sta->tdls && sta_vif == iter_data->curr_vif)
+		iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+					      struct ieee80211_vif *vif)
+{
+	struct ath10k_mac_tdls_iter_data data = {};
+
+	data.curr_vif = vif;
+
+	ieee80211_iterate_stations_atomic(hw,
+					  ath10k_mac_tdls_vif_stations_count_iter,
+					  &data);
+	return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	int *num_tdls_vifs = data;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+		(*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+	int num_tdls_vifs = 0;
+
+	ieee80211_iterate_active_interfaces_atomic(hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_mac_tdls_vifs_count_iter,
+						   &num_tdls_vifs);
+	return num_tdls_vifs;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta,
+			    enum ieee80211_sta_state old_state,
+			    enum ieee80211_sta_state new_state)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	int ret = 0;
+
+	if (old_state == IEEE80211_STA_NOTEXIST &&
+	    new_state == IEEE80211_STA_NONE) {
+		memset(arsta, 0, sizeof(*arsta));
+		arsta->arvif = arvif;
+		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+	}
+
+	/* cancel must be done outside the mutex to avoid deadlock */
+	if ((old_state == IEEE80211_STA_NONE &&
+	     new_state == IEEE80211_STA_NOTEXIST))
+		cancel_work_sync(&arsta->update_wk);
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (old_state == IEEE80211_STA_NOTEXIST &&
+	    new_state == IEEE80211_STA_NONE) {
+		/*
+		 * New station addition.
+		 */
+		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+		u32 num_tdls_stations;
+		u32 num_tdls_vifs;
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
+			   arvif->vdev_id, sta->addr,
+			   ar->num_stations + 1, ar->max_num_stations,
+			   ar->num_peers + 1, ar->max_num_peers);
+
+		ret = ath10k_mac_inc_num_stations(arvif, sta);
+		if (ret) {
+			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
+				    ar->max_num_stations);
+			goto exit;
+		}
+
+		if (sta->tdls)
+			peer_type = WMI_PEER_TYPE_TDLS;
+
+		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
+					 peer_type);
+		if (ret) {
+			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+			ath10k_mac_dec_num_stations(arvif, sta);
+			goto exit;
+		}
+
+		if (!sta->tdls)
+			goto exit;
+
+		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
+
+		if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
+		    num_tdls_stations == 0) {
+			ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+				    arvif->vdev_id, ar->max_num_tdls_vdevs);
+			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+			ath10k_mac_dec_num_stations(arvif, sta);
+			ret = -ENOBUFS;
+			goto exit;
+		}
+
+		if (num_tdls_stations == 0) {
+			/* This is the first tdls peer in current vif */
+			enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
+
+			ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+							      state);
+			if (ret) {
+				ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+					    arvif->vdev_id, ret);
+				ath10k_peer_delete(ar, arvif->vdev_id,
+						   sta->addr);
+				ath10k_mac_dec_num_stations(arvif, sta);
+				goto exit;
+			}
+		}
+
+		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+						  WMI_TDLS_PEER_STATE_PEERING);
+		if (ret) {
+			ath10k_warn(ar,
+				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+			ath10k_mac_dec_num_stations(arvif, sta);
+
+			if (num_tdls_stations != 0)
+				goto exit;
+			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+							WMI_TDLS_DISABLE);
+		}
+	} else if ((old_state == IEEE80211_STA_NONE &&
+		    new_state == IEEE80211_STA_NOTEXIST)) {
+		/*
+		 * Existing station deletion.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac vdev %d peer delete %pM (sta gone)\n",
+			   arvif->vdev_id, sta->addr);
+
+		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+		if (ret)
+			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+
+		ath10k_mac_dec_num_stations(arvif, sta);
+
+		if (!sta->tdls)
+			goto exit;
+
+		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+			goto exit;
+
+		/* This was the last tdls peer in current vif */
+		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+						      WMI_TDLS_DISABLE);
+		if (ret) {
+			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+				    arvif->vdev_id, ret);
+		}
+	} else if (old_state == IEEE80211_STA_AUTH &&
+		   new_state == IEEE80211_STA_ASSOC &&
+		   (vif->type == NL80211_IFTYPE_AP ||
+		    vif->type == NL80211_IFTYPE_MESH_POINT ||
+		    vif->type == NL80211_IFTYPE_ADHOC)) {
+		/*
+		 * New association.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
+			   sta->addr);
+
+		ret = ath10k_station_assoc(ar, vif, sta, false);
+		if (ret)
+			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		   new_state == IEEE80211_STA_AUTHORIZED &&
+		   sta->tdls) {
+		/*
+		 * Tdls station authorized.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+			   sta->addr);
+
+		ret = ath10k_station_assoc(ar, vif, sta, false);
+		if (ret) {
+			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+			goto exit;
+		}
+
+		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+						  WMI_TDLS_PEER_STATE_CONNECTED);
+		if (ret)
+			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		    new_state == IEEE80211_STA_AUTH &&
+		    (vif->type == NL80211_IFTYPE_AP ||
+		     vif->type == NL80211_IFTYPE_MESH_POINT ||
+		     vif->type == NL80211_IFTYPE_ADHOC)) {
+		/*
+		 * Disassociation.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+			   sta->addr);
+
+		ret = ath10k_station_disassoc(ar, vif, sta);
+		if (ret)
+			ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
+				    sta->addr, arvif->vdev_id, ret);
+	}
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+				u16 ac, bool enable)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct wmi_sta_uapsd_auto_trig_arg arg = {};
+	u32 prio = 0, acc = 0;
+	u32 value = 0;
+	int ret = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+		return 0;
+
+	switch (ac) {
+	case IEEE80211_AC_VO:
+		value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+			WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+		prio = 7;
+		acc = 3;
+		break;
+	case IEEE80211_AC_VI:
+		value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+			WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+		prio = 5;
+		acc = 2;
+		break;
+	case IEEE80211_AC_BE:
+		value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+			WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+		prio = 2;
+		acc = 1;
+		break;
+	case IEEE80211_AC_BK:
+		value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+			WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+		prio = 0;
+		acc = 0;
+		break;
+	}
+
+	if (enable)
+		arvif->u.sta.uapsd |= value;
+	else
+		arvif->u.sta.uapsd &= ~value;
+
+	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+					  WMI_STA_PS_PARAM_UAPSD,
+					  arvif->u.sta.uapsd);
+	if (ret) {
+		ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
+		goto exit;
+	}
+
+	if (arvif->u.sta.uapsd)
+		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+	else
+		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+					  value);
+	if (ret)
+		ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
+
+	ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
+	    test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
+		/* Only userspace can make an educated decision when to send
+		 * trigger frame. The following effectively disables u-UAPSD
+		 * autotrigger in firmware (which is enabled by default
+		 * provided the autotrigger service is available).
+		 */
+
+		arg.wmm_ac = acc;
+		arg.user_priority = prio;
+		arg.service_interval = 0;
+		arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+		arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+
+		ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
+						arvif->bssid, &arg, 1);
+		if (ret) {
+			ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
+				    ret);
+			return ret;
+		}
+	}
+
+exit:
+	return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+			  struct ieee80211_vif *vif, u16 ac,
+			  const struct ieee80211_tx_queue_params *params)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct wmi_wmm_params_arg *p = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	switch (ac) {
+	case IEEE80211_AC_VO:
+		p = &arvif->wmm_params.ac_vo;
+		break;
+	case IEEE80211_AC_VI:
+		p = &arvif->wmm_params.ac_vi;
+		break;
+	case IEEE80211_AC_BE:
+		p = &arvif->wmm_params.ac_be;
+		break;
+	case IEEE80211_AC_BK:
+		p = &arvif->wmm_params.ac_bk;
+		break;
+	}
+
+	if (WARN_ON(!p)) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	p->cwmin = params->cw_min;
+	p->cwmax = params->cw_max;
+	p->aifs = params->aifs;
+
+	/*
+	 * The channel time duration programmed in the HW is in absolute
+	 * microseconds, while mac80211 gives the txop in units of
+	 * 32 microseconds.
+	 */
+	p->txop = params->txop * 32;
+
+	if (ar->wmi.ops->gen_vdev_wmm_conf) {
+		ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
+					       &arvif->wmm_params);
+		if (ret) {
+			ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			goto exit;
+		}
+	} else {
+		/* This won't work well with multi-interface cases but it's
+		 * better than nothing.
+		 */
+		ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
+		if (ret) {
+			ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
+			goto exit;
+		}
+	}
+
+	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+	if (ret)
+		ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_channel *chan,
+				    int duration,
+				    enum ieee80211_roc_type type)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct wmi_start_scan_arg arg;
+	int ret = 0;
+	u32 scan_time_msec;
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+		reinit_completion(&ar->scan.started);
+		reinit_completion(&ar->scan.completed);
+		reinit_completion(&ar->scan.on_channel);
+		ar->scan.state = ATH10K_SCAN_STARTING;
+		ar->scan.is_roc = true;
+		ar->scan.vdev_id = arvif->vdev_id;
+		ar->scan.roc_freq = chan->center_freq;
+		ar->scan.roc_notify = true;
+		ret = 0;
+		break;
+	case ATH10K_SCAN_STARTING:
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		ret = -EBUSY;
+		break;
+	}
+	spin_unlock_bh(&ar->data_lock);
+
+	if (ret)
+		goto exit;
+
+	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+	memset(&arg, 0, sizeof(arg));
+	ath10k_wmi_start_scan_init(ar, &arg);
+	arg.vdev_id = arvif->vdev_id;
+	arg.scan_id = ATH10K_SCAN_ID;
+	arg.n_channels = 1;
+	arg.channels[0] = chan->center_freq;
+	arg.dwell_time_active = scan_time_msec;
+	arg.dwell_time_passive = scan_time_msec;
+	arg.max_scan_time = scan_time_msec;
+	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+	arg.burst_duration_ms = duration;
+
+	ret = ath10k_start_scan(ar, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
+		spin_lock_bh(&ar->data_lock);
+		ar->scan.state = ATH10K_SCAN_IDLE;
+		spin_unlock_bh(&ar->data_lock);
+		goto exit;
+	}
+
+	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+	if (ret == 0) {
+		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
+
+		ret = ath10k_scan_stop(ar);
+		if (ret)
+			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+		ret = -ETIMEDOUT;
+		goto exit;
+	}
+
+	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+				     msecs_to_jiffies(duration));
+
+	ret = 0;
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->scan.roc_notify = false;
+	spin_unlock_bh(&ar->data_lock);
+
+	ath10k_scan_abort(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	cancel_delayed_work_sync(&ar->scan.timeout);
+
+	return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif;
+	int ret = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+			   arvif->vdev_id, value);
+
+		ret = ath10k_mac_set_rts(arvif, value);
+		if (ret) {
+			ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			break;
+		}
+	}
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	/* Even though there's a WMI enum for fragmentation threshold no known
+	 * firmware actually implements it. Moreover it is not possible to rely
+	 * frame fragmentation to mac80211 because firmware clears the "more
+	 * fragments" bit in frame control making it impossible for remote
+	 * devices to reassemble frames.
+	 *
+	 * Hence implement a dummy callback just to say fragmentation isn't
+	 * supported. This effectively prevents mac80211 from doing frame
+	 * fragmentation in software.
+	 */
+	return -EOPNOTSUPP;
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			 u32 queues, bool drop)
+{
+	struct ath10k *ar = hw->priv;
+	bool skip;
+	long time_left;
+
+	/* mac80211 doesn't care if we really xmit queued frames or not
+	 * we'll collect those frames either way if we stop/delete vdevs */
+	if (drop)
+		return;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state == ATH10K_STATE_WEDGED)
+		goto skip;
+
+	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
+			bool empty;
+
+			spin_lock_bh(&ar->htt.tx_lock);
+			empty = (ar->htt.num_pending_tx == 0);
+			spin_unlock_bh(&ar->htt.tx_lock);
+
+			skip = (ar->state == ATH10K_STATE_WEDGED) ||
+			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
+					&ar->dev_flags);
+
+			(empty || skip);
+		}), ATH10K_FLUSH_TIMEOUT_HZ);
+
+	if (time_left == 0 || skip)
+		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+			    skip, ar->state, time_left);
+
+skip:
+	mutex_unlock(&ar->conf_mutex);
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Propably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+	return 1;
+}
+
+static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+				     enum ieee80211_reconfig_type reconfig_type)
+{
+	struct ath10k *ar = hw->priv;
+
+	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+		return;
+
+	mutex_lock(&ar->conf_mutex);
+
+	/* If device failed to restart it will be in a different state, e.g.
+	 * ATH10K_STATE_WEDGED */
+	if (ar->state == ATH10K_STATE_RESTARTED) {
+		ath10k_info(ar, "device successfully recovered\n");
+		ar->state = ATH10K_STATE_ON;
+		ieee80211_wake_queues(ar->hw);
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+			     struct survey_info *survey)
+{
+	struct ath10k *ar = hw->priv;
+	struct ieee80211_supported_band *sband;
+	struct survey_info *ar_survey = &ar->survey[idx];
+	int ret = 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+	if (sband && idx >= sband->n_channels) {
+		idx -= sband->n_channels;
+		sband = NULL;
+	}
+
+	if (!sband)
+		sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+	if (!sband || idx >= sband->n_channels) {
+		ret = -ENOENT;
+		goto exit;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	memcpy(survey, ar_survey, sizeof(*survey));
+	spin_unlock_bh(&ar->data_lock);
+
+	survey->channel = &sband->channels[idx];
+
+	if (ar->rx_channel == survey->channel)
+		survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static bool
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+					enum ieee80211_band band,
+					const struct cfg80211_bitrate_mask *mask)
+{
+	int num_rates = 0;
+	int i;
+
+	num_rates += hweight32(mask->control[band].legacy);
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+		num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+		num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+	return num_rates == 1;
+}
+
+static bool
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+				       enum ieee80211_band band,
+				       const struct cfg80211_bitrate_mask *mask,
+				       int *nss)
+{
+	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+	u8 ht_nss_mask = 0;
+	u8 vht_nss_mask = 0;
+	int i;
+
+	if (mask->control[band].legacy)
+		return false;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+		if (mask->control[band].ht_mcs[i] == 0)
+			continue;
+		else if (mask->control[band].ht_mcs[i] ==
+			 sband->ht_cap.mcs.rx_mask[i])
+			ht_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+		if (mask->control[band].vht_mcs[i] == 0)
+			continue;
+		else if (mask->control[band].vht_mcs[i] ==
+			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+			vht_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	if (ht_nss_mask != vht_nss_mask)
+		return false;
+
+	if (ht_nss_mask == 0)
+		return false;
+
+	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+		return false;
+
+	*nss = fls(ht_nss_mask);
+
+	return true;
+}
+
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+					enum ieee80211_band band,
+					const struct cfg80211_bitrate_mask *mask,
+					u8 *rate, u8 *nss)
+{
+	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+	int rate_idx;
+	int i;
+	u16 bitrate;
+	u8 preamble;
+	u8 hw_rate;
+
+	if (hweight32(mask->control[band].legacy) == 1) {
+		rate_idx = ffs(mask->control[band].legacy) - 1;
+
+		hw_rate = sband->bitrates[rate_idx].hw_value;
+		bitrate = sband->bitrates[rate_idx].bitrate;
+
+		if (ath10k_mac_bitrate_is_cck(bitrate))
+			preamble = WMI_RATE_PREAMBLE_CCK;
+		else
+			preamble = WMI_RATE_PREAMBLE_OFDM;
+
+		*nss = 1;
+		*rate = preamble << 6 |
+			(*nss - 1) << 4 |
+			hw_rate << 0;
+
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+			*nss = i + 1;
+			*rate = WMI_RATE_PREAMBLE_HT << 6 |
+				(*nss - 1) << 4 |
+				(ffs(mask->control[band].ht_mcs[i]) - 1);
+
+			return 0;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+			*nss = i + 1;
+			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
+				(*nss - 1) << 4 |
+				(ffs(mask->control[band].vht_mcs[i]) - 1);
+
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+		   arvif->vdev_id, rate, nss, sgi);
+
+	vdev_param = ar->wmi.vdev_param->fixed_rate;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
+	if (ret) {
+		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
+			    rate, ret);
+		return ret;
+	}
+
+	vdev_param = ar->wmi.vdev_param->nss;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+	if (ret) {
+		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+		return ret;
+	}
+
+	vdev_param = ar->wmi.vdev_param->sgi;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
+	if (ret) {
+		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+		return ret;
+	}
+
+	vdev_param = ar->wmi.vdev_param->ldpc;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
+	if (ret) {
+		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+				enum ieee80211_band band,
+				const struct cfg80211_bitrate_mask *mask)
+{
+	int i;
+	u16 vht_mcs;
+
+	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+	 * to express all VHT MCS rate masks. Effectively only the following
+	 * ranges can be used: none, 0-7, 0-8 and 0-9.
+	 */
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		vht_mcs = mask->control[band].vht_mcs[i];
+
+		switch (vht_mcs) {
+		case 0:
+		case BIT(8) - 1:
+		case BIT(9) - 1:
+		case BIT(10) - 1:
+			break;
+		default:
+			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+					     struct ieee80211_sta *sta)
+{
+	struct ath10k_vif *arvif = data;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arvif->ar;
+
+	if (arsta->arvif != arvif)
+		return;
+
+	spin_lock_bh(&ar->data_lock);
+	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+	spin_unlock_bh(&ar->data_lock);
+
+	ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+					  struct ieee80211_vif *vif,
+					  const struct cfg80211_bitrate_mask *mask)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct cfg80211_chan_def def;
+	struct ath10k *ar = arvif->ar;
+	enum ieee80211_band band;
+	const u8 *ht_mcs_mask;
+	const u16 *vht_mcs_mask;
+	u8 rate;
+	u8 nss;
+	u8 sgi;
+	u8 ldpc;
+	int single_nss;
+	int ret;
+
+	if (ath10k_mac_vif_chan(vif, &def))
+		return -EPERM;
+
+	band = def.chan->band;
+	ht_mcs_mask = mask->control[band].ht_mcs;
+	vht_mcs_mask = mask->control[band].vht_mcs;
+	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+	sgi = mask->control[band].gi;
+	if (sgi == NL80211_TXRATE_FORCE_LGI)
+		return -EINVAL;
+
+	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+							      &rate, &nss);
+		if (ret) {
+			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+							  &single_nss)) {
+		rate = WMI_FIXED_RATE_NONE;
+		nss = single_nss;
+	} else {
+		rate = WMI_FIXED_RATE_NONE;
+		nss = min(ar->num_rf_chains,
+			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
+			return -EINVAL;
+
+		mutex_lock(&ar->conf_mutex);
+
+		arvif->bitrate_mask = *mask;
+		ieee80211_iterate_stations_atomic(ar->hw,
+						  ath10k_mac_set_bitrate_mask_iter,
+						  arvif);
+
+		mutex_unlock(&ar->conf_mutex);
+	}
+
+	mutex_lock(&ar->conf_mutex);
+
+	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+	if (ret) {
+		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		goto exit;
+	}
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_sta *sta,
+				 u32 changed)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	u32 bw, smps;
+
+	spin_lock_bh(&ar->data_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
+		   sta->smps_mode);
+
+	if (changed & IEEE80211_RC_BW_CHANGED) {
+		bw = WMI_PEER_CHWIDTH_20MHZ;
+
+		switch (sta->bandwidth) {
+		case IEEE80211_STA_RX_BW_20:
+			bw = WMI_PEER_CHWIDTH_20MHZ;
+			break;
+		case IEEE80211_STA_RX_BW_40:
+			bw = WMI_PEER_CHWIDTH_40MHZ;
+			break;
+		case IEEE80211_STA_RX_BW_80:
+			bw = WMI_PEER_CHWIDTH_80MHZ;
+			break;
+		case IEEE80211_STA_RX_BW_160:
+			ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
+				    sta->bandwidth, sta->addr);
+			bw = WMI_PEER_CHWIDTH_20MHZ;
+			break;
+		}
+
+		arsta->bw = bw;
+	}
+
+	if (changed & IEEE80211_RC_NSS_CHANGED)
+		arsta->nss = sta->rx_nss;
+
+	if (changed & IEEE80211_RC_SMPS_CHANGED) {
+		smps = WMI_PEER_SMPS_PS_NONE;
+
+		switch (sta->smps_mode) {
+		case IEEE80211_SMPS_AUTOMATIC:
+		case IEEE80211_SMPS_OFF:
+			smps = WMI_PEER_SMPS_PS_NONE;
+			break;
+		case IEEE80211_SMPS_STATIC:
+			smps = WMI_PEER_SMPS_STATIC;
+			break;
+		case IEEE80211_SMPS_DYNAMIC:
+			smps = WMI_PEER_SMPS_DYNAMIC;
+			break;
+		case IEEE80211_SMPS_NUM_MODES:
+			ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
+				    sta->smps_mode, sta->addr);
+			smps = WMI_PEER_SMPS_PS_NONE;
+			break;
+		}
+
+		arsta->smps = smps;
+	}
+
+	arsta->changed |= changed;
+
+	spin_unlock_bh(&ar->data_lock);
+
+	ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	/*
+	 * FIXME: Return 0 for time being. Need to figure out whether FW
+	 * has the API to fetch 64-bit local TSF
+	 */
+
+	return 0;
+}
+
+static int ath10k_ampdu_action(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ieee80211_ampdu_params *params)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ieee80211_sta *sta = params->sta;
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	u16 tid = params->tid;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
+		   arvif->vdev_id, sta->addr, tid, action);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+	case IEEE80211_AMPDU_RX_STOP:
+		/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
+		 * creation/removal. Do we need to verify this?
+		 */
+		return 0;
+	case IEEE80211_AMPDU_TX_START:
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		/* Firmware offloads Tx aggregation entirely so deny mac80211
+		 * Tx aggregation requests.
+		 */
+		return -EOPNOTSUPP;
+	}
+
+	return -EINVAL;
+}
+
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar,
+			     struct ieee80211_chanctx_conf *ctx,
+			     struct ieee80211_vif_chanctx_switch *vifs,
+			     int n_vifs)
+{
+	struct cfg80211_chan_def *def = NULL;
+
+	/* Both locks are required because ar->rx_channel is modified. This
+	 * allows readers to hold either lock.
+	 */
+	lockdep_assert_held(&ar->conf_mutex);
+	lockdep_assert_held(&ar->data_lock);
+
+	WARN_ON(ctx && vifs);
+	WARN_ON(vifs && n_vifs != 1);
+
+	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+	 * ppdu on Rx may reduce performance on low-end systems. It should be
+	 * possible to make tables/hashmaps to speed the lookup up (be vary of
+	 * cpu data cache lines though regarding sizes) but to keep the initial
+	 * implementation simple and less intrusive fallback to the slow lookup
+	 * only for multi-channel cases. Single-channel cases will remain to
+	 * use the old channel derival and thus performance should not be
+	 * affected much.
+	 */
+	rcu_read_lock();
+	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
+		ieee80211_iter_chan_contexts_atomic(ar->hw,
+						    ath10k_mac_get_any_chandef_iter,
+						    &def);
+
+		if (vifs)
+			def = &vifs[0].new_ctx->def;
+
+		ar->rx_channel = def->chan;
+	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
+		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
+		/* During driver restart due to firmware assert, since mac80211
+		 * already has valid channel context for given radio, channel
+		 * context iteration return num_chanctx > 0. So fix rx_channel
+		 * when restart is in progress.
+		 */
+		ar->rx_channel = ctx->def.chan;
+	} else {
+		ar->rx_channel = NULL;
+	}
+	rcu_read_unlock();
+}
+
+static void
+ath10k_mac_update_vif_chan(struct ath10k *ar,
+			   struct ieee80211_vif_chanctx_switch *vifs,
+			   int n_vifs)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+	int i;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* First stop monitor interface. Some FW versions crash if there's a
+	 * lone monitor interface.
+	 */
+	if (ar->monitor_started)
+		ath10k_monitor_stop(ar);
+
+	for (i = 0; i < n_vifs; i++) {
+		arvif = ath10k_vif_to_arvif(vifs[i].vif);
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+			   arvif->vdev_id,
+			   vifs[i].old_ctx->def.chan->center_freq,
+			   vifs[i].new_ctx->def.chan->center_freq,
+			   vifs[i].old_ctx->def.width,
+			   vifs[i].new_ctx->def.width);
+
+		if (WARN_ON(!arvif->is_started))
+			continue;
+
+		if (WARN_ON(!arvif->is_up))
+			continue;
+
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret) {
+			ath10k_warn(ar, "failed to down vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+	}
+
+	/* All relevant vdevs are downed and associated channel resources
+	 * should be available for the channel switch now.
+	 */
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
+	spin_unlock_bh(&ar->data_lock);
+
+	for (i = 0; i < n_vifs; i++) {
+		arvif = ath10k_vif_to_arvif(vifs[i].vif);
+
+		if (WARN_ON(!arvif->is_started))
+			continue;
+
+		if (WARN_ON(!arvif->is_up))
+			continue;
+
+		ret = ath10k_mac_setup_bcn_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_mac_setup_prb_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+		if (ret) {
+			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+
+		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+					 arvif->bssid);
+		if (ret) {
+			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+	}
+
+	ath10k_monitor_recalc(ar);
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+			  struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx add freq %hu width %d ptr %p\n",
+		   ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
+	spin_unlock_bh(&ar->data_lock);
+
+	ath10k_recalc_radar_detection(ar);
+	ath10k_monitor_recalc(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+			     struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx remove freq %hu width %d ptr %p\n",
+		   ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
+	spin_unlock_bh(&ar->data_lock);
+
+	ath10k_recalc_radar_detection(ar);
+	ath10k_monitor_recalc(ar);
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+struct ath10k_mac_change_chanctx_arg {
+	struct ieee80211_chanctx_conf *ctx;
+	struct ieee80211_vif_chanctx_switch *vifs;
+	int n_vifs;
+	int next_vif;
+};
+
+static void
+ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+				   struct ieee80211_vif *vif)
+{
+	struct ath10k_mac_change_chanctx_arg *arg = data;
+
+	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+		return;
+
+	arg->n_vifs++;
+}
+
+static void
+ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+				    struct ieee80211_vif *vif)
+{
+	struct ath10k_mac_change_chanctx_arg *arg = data;
+	struct ieee80211_chanctx_conf *ctx;
+
+	ctx = rcu_access_pointer(vif->chanctx_conf);
+	if (ctx != arg->ctx)
+		return;
+
+	if (WARN_ON(arg->next_vif == arg->n_vifs))
+		return;
+
+	arg->vifs[arg->next_vif].vif = vif;
+	arg->vifs[arg->next_vif].old_ctx = ctx;
+	arg->vifs[arg->next_vif].new_ctx = ctx;
+	arg->next_vif++;
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+			     struct ieee80211_chanctx_conf *ctx,
+			     u32 changed)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+	/* This shouldn't really happen because channel switching should use
+	 * switch_vif_chanctx().
+	 */
+	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+		goto unlock;
+
+	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+		ieee80211_iterate_active_interfaces_atomic(
+					hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					ath10k_mac_change_chanctx_cnt_iter,
+					&arg);
+		if (arg.n_vifs == 0)
+			goto radar;
+
+		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
+				   GFP_KERNEL);
+		if (!arg.vifs)
+			goto radar;
+
+		ieee80211_iterate_active_interfaces_atomic(
+					hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					ath10k_mac_change_chanctx_fill_iter,
+					&arg);
+		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+		kfree(arg.vifs);
+	}
+
+radar:
+	ath10k_recalc_radar_detection(ar);
+
+	/* FIXME: How to configure Rx chains properly? */
+
+	/* No other actions are actually necessary. Firmware maintains channel
+	 * definitions per vdev internally and there's no host-side channel
+	 * context abstraction to configure, e.g. channel width.
+	 */
+
+unlock:
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = (void *)vif->drv_priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx assign ptr %p vdev_id %i\n",
+		   ctx, arvif->vdev_id);
+
+	if (WARN_ON(arvif->is_started)) {
+		mutex_unlock(&ar->conf_mutex);
+		return -EBUSY;
+	}
+
+	ret = ath10k_vdev_start(arvif, &ctx->def);
+	if (ret) {
+		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+			    arvif->vdev_id, vif->addr,
+			    ctx->def.chan->center_freq, ret);
+		goto err;
+	}
+
+	arvif->is_started = true;
+
+	ret = ath10k_mac_vif_setup_ps(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_stop;
+	}
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+		if (ret) {
+			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			goto err_stop;
+		}
+
+		arvif->is_up = true;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_stop:
+	ath10k_vdev_stop(arvif);
+	arvif->is_started = false;
+	ath10k_mac_vif_setup_ps(arvif);
+
+err:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_chanctx_conf *ctx)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = (void *)vif->drv_priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx unassign ptr %p vdev_id %i\n",
+		   ctx, arvif->vdev_id);
+
+	WARN_ON(!arvif->is_started);
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		WARN_ON(!arvif->is_up);
+
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret)
+			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+
+		arvif->is_up = false;
+	}
+
+	ret = ath10k_vdev_stop(arvif);
+	if (ret)
+		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+
+	arvif->is_started = false;
+
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+				 struct ieee80211_vif_chanctx_switch *vifs,
+				 int n_vifs,
+				 enum ieee80211_chanctx_switch_mode mode)
+{
+	struct ath10k *ar = hw->priv;
+
+	mutex_lock(&ar->conf_mutex);
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC,
+		   "mac chanctx switch n_vifs %d mode %d\n",
+		   n_vifs, mode);
+	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+	.tx				= ath10k_tx,
+	.start				= ath10k_start,
+	.stop				= ath10k_stop,
+	.config				= ath10k_config,
+	.add_interface			= ath10k_add_interface,
+	.remove_interface		= ath10k_remove_interface,
+	.configure_filter		= ath10k_configure_filter,
+	.bss_info_changed		= ath10k_bss_info_changed,
+	.hw_scan			= ath10k_hw_scan,
+	.cancel_hw_scan			= ath10k_cancel_hw_scan,
+	.set_key			= ath10k_set_key,
+	.set_default_unicast_key        = ath10k_set_default_unicast_key,
+	.sta_state			= ath10k_sta_state,
+	.conf_tx			= ath10k_conf_tx,
+	.remain_on_channel		= ath10k_remain_on_channel,
+	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
+	.set_rts_threshold		= ath10k_set_rts_threshold,
+	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
+	.flush				= ath10k_flush,
+	.tx_last_beacon			= ath10k_tx_last_beacon,
+	.set_antenna			= ath10k_set_antenna,
+	.get_antenna			= ath10k_get_antenna,
+	.reconfig_complete		= ath10k_reconfig_complete,
+	.get_survey			= ath10k_get_survey,
+	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
+	.sta_rc_update			= ath10k_sta_rc_update,
+	.get_tsf			= ath10k_get_tsf,
+	.ampdu_action			= ath10k_ampdu_action,
+	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
+	.get_et_stats			= ath10k_debug_get_et_stats,
+	.get_et_strings			= ath10k_debug_get_et_strings,
+	.add_chanctx			= ath10k_mac_op_add_chanctx,
+	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
+	.change_chanctx			= ath10k_mac_op_change_chanctx,
+	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
+	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
+	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
+
+	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
+
+#ifdef CONFIG_PM
+	.suspend			= ath10k_wow_op_suspend,
+	.resume				= ath10k_wow_op_resume,
+#endif
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.sta_add_debugfs		= ath10k_sta_add_debugfs,
+#endif
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+	.band			= IEEE80211_BAND_2GHZ, \
+	.hw_value		= (_channel), \
+	.center_freq		= (_freq), \
+	.flags			= (_flags), \
+	.max_antenna_gain	= 0, \
+	.max_power		= 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+	.band			= IEEE80211_BAND_5GHZ, \
+	.hw_value		= (_channel), \
+	.center_freq		= (_freq), \
+	.flags			= (_flags), \
+	.max_antenna_gain	= 0, \
+	.max_power		= 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+	CHAN2G(1, 2412, 0),
+	CHAN2G(2, 2417, 0),
+	CHAN2G(3, 2422, 0),
+	CHAN2G(4, 2427, 0),
+	CHAN2G(5, 2432, 0),
+	CHAN2G(6, 2437, 0),
+	CHAN2G(7, 2442, 0),
+	CHAN2G(8, 2447, 0),
+	CHAN2G(9, 2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+	CHAN5G(36, 5180, 0),
+	CHAN5G(40, 5200, 0),
+	CHAN5G(44, 5220, 0),
+	CHAN5G(48, 5240, 0),
+	CHAN5G(52, 5260, 0),
+	CHAN5G(56, 5280, 0),
+	CHAN5G(60, 5300, 0),
+	CHAN5G(64, 5320, 0),
+	CHAN5G(100, 5500, 0),
+	CHAN5G(104, 5520, 0),
+	CHAN5G(108, 5540, 0),
+	CHAN5G(112, 5560, 0),
+	CHAN5G(116, 5580, 0),
+	CHAN5G(120, 5600, 0),
+	CHAN5G(124, 5620, 0),
+	CHAN5G(128, 5640, 0),
+	CHAN5G(132, 5660, 0),
+	CHAN5G(136, 5680, 0),
+	CHAN5G(140, 5700, 0),
+	CHAN5G(144, 5720, 0),
+	CHAN5G(149, 5745, 0),
+	CHAN5G(153, 5765, 0),
+	CHAN5G(157, 5785, 0),
+	CHAN5G(161, 5805, 0),
+	CHAN5G(165, 5825, 0),
+};
+
+struct ath10k *ath10k_mac_create(size_t priv_size)
+{
+	struct ieee80211_hw *hw;
+	struct ath10k *ar;
+
+	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
+	if (!hw)
+		return NULL;
+
+	ar = hw->priv;
+	ar->hw = hw;
+
+	return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+	ieee80211_free_hw(ar->hw);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+	{
+	.max	= 8,
+	.types	= BIT(NL80211_IFTYPE_STATION)
+		| BIT(NL80211_IFTYPE_P2P_CLIENT)
+	},
+	{
+	.max	= 3,
+	.types	= BIT(NL80211_IFTYPE_P2P_GO)
+	},
+	{
+	.max	= 1,
+	.types	= BIT(NL80211_IFTYPE_P2P_DEVICE)
+	},
+	{
+	.max	= 7,
+	.types	= BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+		| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+	{
+	.max	= 8,
+	.types	= BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+		| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+	{
+		.limits = ath10k_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+	{
+		.limits = ath10k_10x_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+					BIT(NL80211_CHAN_WIDTH_20) |
+					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+	{
+		.limits = ath10k_tlv_if_limit,
+		.num_different_channels = 1,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+	},
+	{
+		.limits = ath10k_tlv_if_limit_ibss,
+		.num_different_channels = 1,
+		.max_interfaces = 2,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+	},
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+	{
+		.limits = ath10k_tlv_if_limit,
+		.num_different_channels = 1,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+	},
+	{
+		.limits = ath10k_tlv_qcs_if_limit,
+		.num_different_channels = 2,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+	},
+	{
+		.limits = ath10k_tlv_if_limit_ibss,
+		.num_different_channels = 1,
+		.max_interfaces = 2,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+	},
+};
+
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max	= 16,
+		.types	= BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+			| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+	{
+		.limits = ath10k_10_4_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+		.max_interfaces = 16,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+					BIT(NL80211_CHAN_WIDTH_20) |
+					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
+};
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+				  struct ieee80211_vif *vif)
+{
+	struct ath10k_vif_iter *arvif_iter = data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	if (arvif->vdev_id == arvif_iter->vdev_id)
+		arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+	struct ath10k_vif_iter arvif_iter;
+	u32 flags;
+
+	memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+	arvif_iter.vdev_id = vdev_id;
+
+	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   flags,
+						   ath10k_get_arvif_iter,
+						   &arvif_iter);
+	if (!arvif_iter.arvif) {
+		ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
+		return NULL;
+	}
+
+	return arvif_iter.arvif;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+	static const u32 cipher_suites[] = {
+		WLAN_CIPHER_SUITE_WEP40,
+		WLAN_CIPHER_SUITE_WEP104,
+		WLAN_CIPHER_SUITE_TKIP,
+		WLAN_CIPHER_SUITE_CCMP,
+		WLAN_CIPHER_SUITE_AES_CMAC,
+	};
+	struct ieee80211_supported_band *band;
+	void *channels;
+	int ret;
+
+	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+	SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
+		     ATH10K_NUM_CHANS);
+
+	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+		channels = kmemdup(ath10k_2ghz_channels,
+				   sizeof(ath10k_2ghz_channels),
+				   GFP_KERNEL);
+		if (!channels) {
+			ret = -ENOMEM;
+			goto err_free;
+		}
+
+		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+		band->channels = channels;
+		band->n_bitrates = ath10k_g_rates_size;
+		band->bitrates = ath10k_g_rates;
+
+		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+	}
+
+	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+		channels = kmemdup(ath10k_5ghz_channels,
+				   sizeof(ath10k_5ghz_channels),
+				   GFP_KERNEL);
+		if (!channels) {
+			ret = -ENOMEM;
+			goto err_free;
+		}
+
+		band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+		band->channels = channels;
+		band->n_bitrates = ath10k_a_rates_size;
+		band->bitrates = ath10k_a_rates;
+		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+	}
+
+	ath10k_mac_setup_ht_vht_cap(ar);
+
+	ar->hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_MESH_POINT);
+
+	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
+	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
+
+	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+		ar->hw->wiphy->interface_modes |=
+			BIT(NL80211_IFTYPE_P2P_DEVICE) |
+			BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			BIT(NL80211_IFTYPE_P2P_GO);
+
+	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(ar->hw, AP_LINK_PS);
+	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+
+	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
+	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+	}
+
+	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+
+	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+	if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
+		ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+		/* Firmware delivers WPS/P2P Probe Requests frames to driver so
+		 * that userspace (e.g. wpa_supplicant/hostapd) can generate
+		 * correct Probe Responses. This is more of a hack advert..
+		 */
+		ar->hw->wiphy->probe_resp_offload |=
+			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+	}
+
+	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
+		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
+	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+
+	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+	ret = ath10k_wow_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to init wow: %d\n", ret);
+		goto err_free;
+	}
+
+	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+	/*
+	 * on LL hardware queues are managed entirely by the FW
+	 * so we only advertise to mac we can do the queues thing
+	 */
+	ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+	 * something that vdev_ids can't reach so that we don't stop the queue
+	 * accidentally.
+	 */
+	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
+
+	switch (ar->wmi.op_version) {
+	case ATH10K_FW_WMI_OP_VERSION_MAIN:
+		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_if_comb);
+		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_TLV:
+		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+			ar->hw->wiphy->iface_combinations =
+				ath10k_tlv_qcs_if_comb;
+			ar->hw->wiphy->n_iface_combinations =
+				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+		} else {
+			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+			ar->hw->wiphy->n_iface_combinations =
+				ARRAY_SIZE(ath10k_tlv_if_comb);
+		}
+		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_1:
+	case ATH10K_FW_WMI_OP_VERSION_10_2:
+	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_10x_if_comb);
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_10_4_if_comb);
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_UNSET:
+	case ATH10K_FW_WMI_OP_VERSION_MAX:
+		WARN_ON(1);
+		ret = -EINVAL;
+		goto err_free;
+	}
+
+	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+		/* Init ath dfs pattern detector */
+		ar->ath_common.debug_mask = ATH_DBG_DFS;
+		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+							     NL80211_DFS_UNSET);
+
+		if (!ar->dfs_detector)
+			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
+	}
+
+	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+			    ath10k_reg_notifier);
+	if (ret) {
+		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
+		goto err_dfs_detector_exit;
+	}
+
+	ar->hw->wiphy->cipher_suites = cipher_suites;
+	ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+	ret = ieee80211_register_hw(ar->hw);
+	if (ret) {
+		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
+		goto err_dfs_detector_exit;
+	}
+
+	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+		ret = regulatory_hint(ar->hw->wiphy,
+				      ar->ath_common.regulatory.alpha2);
+		if (ret)
+			goto err_unregister;
+	}
+
+	return 0;
+
+err_unregister:
+	ieee80211_unregister_hw(ar->hw);
+
+err_dfs_detector_exit:
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+		ar->dfs_detector->exit(ar->dfs_detector);
+
+err_free:
+	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+	SET_IEEE80211_DEV(ar->hw, NULL);
+	return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+	ieee80211_unregister_hw(ar->hw);
+
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+		ar->dfs_detector->exit(ar->dfs_detector);
+
+	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+	SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644
index 0000000..e3cefe4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+#define WEP_KEYID_SHIFT 6
+
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
+struct ath10k_generic_iter {
+	struct ath10k *ar;
+	int ret;
+};
+
+struct rfc1042_hdr {
+	u8 llc_dsap;
+	u8 llc_ssap;
+	u8 llc_ctrl;
+	u8 snap_oui[3];
+	__be16 snap_type;
+} __packed;
+
+struct ath10k *ath10k_mac_create(size_t priv_size);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void __ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_timeout_work(struct work_struct *work);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
+void ath10k_drain_tx(struct ath10k *ar);
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+				    u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+			struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+				     enum wmi_tlv_tx_pause_id pause_id,
+				     enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+			     u8 hw_rate);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+			     u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
+
+static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+	return (struct ath10k_vif *)vif->drv_priv;
+}
+
+static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
+				      struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	if (info->flags  & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+		if (arvif->tx_seq_no == 0)
+			arvif->tx_seq_no = 0x1000;
+
+		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+			arvif->tx_seq_no += 0x10;
+		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+	}
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644
index 0000000..c0b6ffa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+				   const struct wmi_p2p_noa_info *noa)
+{
+	struct ieee80211_p2p_noa_attr *noa_attr;
+	u8  ctwindow_oppps = noa->ctwindow_oppps;
+	u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+	bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+	__le16 *noa_attr_len;
+	u16 attr_len;
+	u8 noa_descriptors = noa->num_descriptors;
+	int i;
+
+	/* P2P IE */
+	data[0] = WLAN_EID_VENDOR_SPECIFIC;
+	data[1] = len - 2;
+	data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+	data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+	data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+	data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+	/* NOA ATTR */
+	data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+	noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+	noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+	noa_attr->index = noa->index;
+	noa_attr->oppps_ctwindow = ctwindow;
+	if (oppps)
+		noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+	for (i = 0; i < noa_descriptors; i++) {
+		noa_attr->desc[i].count =
+			__le32_to_cpu(noa->descriptors[i].type_count);
+		noa_attr->desc[i].duration = noa->descriptors[i].duration;
+		noa_attr->desc[i].interval = noa->descriptors[i].interval;
+		noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+	}
+
+	attr_len = 2; /* index + oppps_ctwindow */
+	attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+	*noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+	size_t len = 0;
+
+	if (!noa->num_descriptors &&
+	    !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+		return 0;
+
+	len += 1 + 1 + 4; /* EID + len + OUI */
+	len += 1 + 2; /* noa attr + attr len */
+	len += 1 + 1; /* index + oppps_ctwindow */
+	len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+	return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+				     size_t len)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	kfree(arvif->u.ap.noa_data);
+
+	arvif->u.ap.noa_data = ie;
+	arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+				    const struct wmi_p2p_noa_info *noa)
+{
+	struct ath10k *ar = arvif->ar;
+	void *ie;
+	size_t len;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+	len = ath10k_p2p_noa_ie_len_compute(noa);
+	if (!len)
+		return;
+
+	ie = kmalloc(len, GFP_ATOMIC);
+	if (!ie)
+		return;
+
+	ath10k_p2p_noa_ie_fill(ie, len, noa);
+	ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+			   const struct wmi_p2p_noa_info *noa)
+{
+	struct ath10k *ar = arvif->ar;
+
+	spin_lock_bh(&ar->data_lock);
+	__ath10k_p2p_noa_update(arvif, noa);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+	u32 vdev_id;
+	const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_p2p_noa_arg *arg = data;
+
+	if (arvif->vdev_id != arg->vdev_id)
+		return;
+
+	ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+				      const struct wmi_p2p_noa_info *noa)
+{
+	struct ath10k_p2p_noa_arg arg = {
+		.vdev_id = vdev_id,
+		.noa = noa,
+	};
+
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_p2p_noa_update_vdev_iter,
+						   &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644
index 0000000..7be616e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+			   const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+				      const struct wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644
index 0000000..907fd60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -0,0 +1,3218 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include "core.h"
+#include "debug.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+enum ath10k_pci_irq_mode {
+	ATH10K_PCI_IRQ_AUTO = 0,
+	ATH10K_PCI_IRQ_LEGACY = 1,
+	ATH10K_PCI_IRQ_MSI = 2,
+};
+
+enum ath10k_pci_reset_mode {
+	ATH10K_PCI_RESET_AUTO = 0,
+	ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
+
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+
+static const struct pci_device_id ath10k_pci_id_table[] = {
+	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
+	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+	{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+	{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
+	{0}
+};
+
+static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
+	/* QCA988X pre 2.0 chips are not supported because they need some nasty
+	 * hacks. ath10k doesn't have them and these devices crash horribly
+	 * because of that.
+	 */
+	{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+	{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+
+	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
+	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
+};
+
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+			       struct ath10k_ce_pipe *rx_pipe,
+			       struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static struct ce_attr host_ce_config_wlan[] = {
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 16,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+		.send_cb = ath10k_pci_htc_tx_cb,
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_pci_htt_htc_rx_cb,
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+		.recv_cb = ath10k_pci_htc_rx_cb,
+	},
+
+	/* CE3: host->target WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+		.send_cb = ath10k_pci_htc_tx_cb,
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+		.send_cb = ath10k_pci_htt_tx_cb,
+	},
+
+	/* CE5: target->host HTT (HIF->HTT) */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 512,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_pci_htt_rx_cb,
+	},
+
+	/* CE6: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE7: ce_diag, the Diagnostic Window */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 2,
+		.src_sz_max = DIAG_TRANSFER_LIMIT,
+		.dest_nentries = 2,
+	},
+
+	/* CE8: target->host pktlog */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+	},
+
+	/* CE9 target autonomous qcache memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE10: target autonomous hif memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE11: target autonomous hif memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+};
+
+/* Target firmware's Copy Engine configuration. */
+static struct ce_pipe_config target_ce_config_wlan[] = {
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.pipenum = __cpu_to_le32(0),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(256),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.pipenum = __cpu_to_le32(1),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.pipenum = __cpu_to_le32(2),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(64),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE3: host->target WMI */
+	{
+		.pipenum = __cpu_to_le32(3),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.pipenum = __cpu_to_le32(4),
+		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.nentries = __cpu_to_le32(256),
+		.nbytes_max = __cpu_to_le32(256),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* NB: 50% of src nentries, since tx has 2 frags */
+
+	/* CE5: target->host HTT (HIF->HTT) */
+	{
+		.pipenum = __cpu_to_le32(5),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(512),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE6: Reserved for target autonomous hif_memcpy */
+	{
+		.pipenum = __cpu_to_le32(6),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(4096),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE7 used only by Host */
+	{
+		.pipenum = __cpu_to_le32(7),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(0),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE8 target->host packtlog */
+	{
+		.pipenum = __cpu_to_le32(8),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(64),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE9 target autonomous qcache memcpy */
+	{
+		.pipenum = __cpu_to_le32(9),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* It not necessary to send target wlan configuration for CE10 & CE11
+	 * as these CEs are not actively used in target.
+	 */
+};
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(3),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(2),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(0),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(1),
+	},
+	{ /* not used */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(0),
+	},
+	{ /* not used */
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(1),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
+		__cpu_to_le32(4),
+	},
+	{
+		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
+		__cpu_to_le32(5),
+	},
+
+	/* (Additions here) */
+
+	{ /* must be last */
+		__cpu_to_le32(0),
+		__cpu_to_le32(0),
+		__cpu_to_le32(0),
+	},
+};
+
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+			   RTC_STATE_ADDRESS);
+
+	return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	lockdep_assert_held(&ar_pci->ps_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	iowrite32(PCIE_SOC_WAKE_V_MASK,
+		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+		  PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	lockdep_assert_held(&ar_pci->ps_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	iowrite32(PCIE_SOC_WAKE_RESET,
+		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+		  PCIE_SOC_WAKE_ADDRESS);
+	ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+	int tot_delay = 0;
+	int curr_delay = 5;
+
+	while (tot_delay < PCIE_WAKE_TIMEOUT) {
+		if (ath10k_pci_is_awake(ar)) {
+			if (tot_delay > PCIE_WAKE_LATE_US)
+				ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
+					    tot_delay / 1000);
+			return 0;
+		}
+
+		udelay(curr_delay);
+		tot_delay += curr_delay;
+
+		if (curr_delay < 50)
+			curr_delay += 5;
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int ath10k_pci_force_wake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	if (!ar_pci->ps_awake) {
+		iowrite32(PCIE_SOC_WAKE_V_MASK,
+			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+			  PCIE_SOC_WAKE_ADDRESS);
+
+		ret = ath10k_pci_wake_wait(ar);
+		if (ret == 0)
+			ar_pci->ps_awake = true;
+	}
+
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+	return ret;
+}
+
+static void ath10k_pci_force_sleep(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	iowrite32(PCIE_SOC_WAKE_RESET,
+		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+		  PCIE_SOC_WAKE_ADDRESS);
+	ar_pci->ps_awake = false;
+
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+	int ret = 0;
+
+	if (ar_pci->pci_ps == 0)
+		return ret;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	/* This function can be called very frequently. To avoid excessive
+	 * CPU stalls for MMIO reads use a cache var to hold the device state.
+	 */
+	if (!ar_pci->ps_awake) {
+		__ath10k_pci_wake(ar);
+
+		ret = ath10k_pci_wake_wait(ar);
+		if (ret == 0)
+			ar_pci->ps_awake = true;
+	}
+
+	if (ret == 0) {
+		ar_pci->ps_wake_refcount++;
+		WARN_ON(ar_pci->ps_wake_refcount == 0);
+	}
+
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+	return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	if (ar_pci->pci_ps == 0)
+		return;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+		goto skip;
+
+	ar_pci->ps_wake_refcount--;
+
+	mod_timer(&ar_pci->ps_timer, jiffies +
+		  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(unsigned long ptr)
+{
+	struct ath10k *ar = (void *)ptr;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+	if (ar_pci->ps_wake_refcount > 0)
+		goto skip;
+
+	__ath10k_pci_sleep(ar);
+
+skip:
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	if (ar_pci->pci_ps == 0) {
+		ath10k_pci_force_sleep(ar);
+		return;
+	}
+
+	del_timer_sync(&ar_pci->ps_timer);
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+	WARN_ON(ar_pci->ps_wake_refcount > 0);
+	__ath10k_pci_sleep(ar);
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+		ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+			    offset, offset + sizeof(value), ar_pci->mem_len);
+		return;
+	}
+
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+			    value, offset, ret);
+		return;
+	}
+
+	iowrite32(value, ar_pci->mem + offset);
+	ath10k_pci_sleep(ar);
+}
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	u32 val;
+	int ret;
+
+	if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+		ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+			    offset, offset + sizeof(val), ar_pci->mem_len);
+		return 0;
+	}
+
+	ret = ath10k_pci_wake(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+			    offset, ret);
+		return 0xffffffff;
+	}
+
+	val = ioread32(ar_pci->mem + offset);
+	ath10k_pci_sleep(ar);
+
+	return val;
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+	return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+	ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
+static bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+	u32 cause;
+
+	/* Check if the shared legacy irq is for us */
+	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				  PCIE_INTR_CAUSE_ADDRESS);
+	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+		return true;
+
+	return false;
+}
+
+static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+	/* IMPORTANT: INTR_CLR register has to be set after
+	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
+	 * really cleared. */
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+			   0);
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+	/* IMPORTANT: this extra read transaction is required to
+	 * flush the posted write buffer. */
+	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+			   PCIE_INTR_ENABLE_ADDRESS,
+			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+	/* IMPORTANT: this extra read transaction is required to
+	 * flush the posted write buffer. */
+	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	if (ar_pci->num_msi_intrs > 1)
+		return "msi-x";
+
+	if (ar_pci->num_msi_intrs == 1)
+		return "msi";
+
+	return "legacy";
+}
+
+static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
+{
+	struct ath10k *ar = pipe->hif_ce_state;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+	struct sk_buff *skb;
+	dma_addr_t paddr;
+	int ret;
+
+	skb = dev_alloc_skb(pipe->buf_sz);
+	if (!skb)
+		return -ENOMEM;
+
+	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+	paddr = dma_map_single(ar->dev, skb->data,
+			       skb->len + skb_tailroom(skb),
+			       DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+		ath10k_warn(ar, "failed to dma map pci rx buf\n");
+		dev_kfree_skb_any(skb);
+		return -EIO;
+	}
+
+	ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+	spin_unlock_bh(&ar_pci->ce_lock);
+	if (ret) {
+		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+	struct ath10k *ar = pipe->hif_ce_state;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+	int ret, num;
+
+	if (pipe->buf_sz == 0)
+		return;
+
+	if (!ce_pipe->dest_ring)
+		return;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+	spin_unlock_bh(&ar_pci->ce_lock);
+	while (num--) {
+		ret = __ath10k_pci_rx_post_buf(pipe);
+		if (ret) {
+			if (ret == -ENOSPC)
+				break;
+			ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+			mod_timer(&ar_pci->rx_post_retry, jiffies +
+				  ATH10K_PCI_RX_POST_RETRY_MS);
+			break;
+		}
+	}
+}
+
+static void ath10k_pci_rx_post(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
+
+	for (i = 0; i < CE_COUNT; i++)
+		ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
+}
+
+static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
+{
+	struct ath10k *ar = (void *)ptr;
+
+	ath10k_pci_rx_post(ar);
+}
+
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+	u32 val = 0;
+
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
+		val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					  CORE_CTRL_ADDRESS) &
+		       0x7ff) << 21;
+		break;
+	case ATH10K_HW_QCA99X0:
+		val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+		break;
+	}
+
+	val |= 0x100000 | (addr & 0xfffff);
+	return val;
+}
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+				    int nbytes)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret = 0;
+	u32 buf;
+	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+	unsigned int id;
+	unsigned int flags;
+	struct ath10k_ce_pipe *ce_diag;
+	/* Host buffer address in CE space */
+	u32 ce_data;
+	dma_addr_t ce_data_base = 0;
+	void *data_buf = NULL;
+	int i;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	ce_diag = ar_pci->ce_diag;
+
+	/*
+	 * Allocate a temporary bounce buffer to hold caller's data
+	 * to be DMA'ed from Target. This guarantees
+	 *   1) 4-byte alignment
+	 *   2) Buffer in DMA-able space
+	 */
+	orig_nbytes = nbytes;
+	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+						       orig_nbytes,
+						       &ce_data_base,
+						       GFP_ATOMIC);
+
+	if (!data_buf) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	memset(data_buf, 0, orig_nbytes);
+
+	remaining_bytes = orig_nbytes;
+	ce_data = ce_data_base;
+	while (remaining_bytes) {
+		nbytes = min_t(unsigned int, remaining_bytes,
+			       DIAG_TRANSFER_LIMIT);
+
+		ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+		if (ret != 0)
+			goto done;
+
+		/* Request CE to send from Target(!) address to Host buffer */
+		/*
+		 * The address supplied by the caller is in the
+		 * Target CPU virtual address space.
+		 *
+		 * In order to use this address with the diagnostic CE,
+		 * convert it from Target CPU virtual address space
+		 * to CE address space
+		 */
+		address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
+					    0);
+		if (ret)
+			goto done;
+
+		i = 0;
+		while (ath10k_ce_completed_send_next_nolock(ce_diag,
+							    NULL) != 0) {
+			mdelay(1);
+			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+				ret = -EBUSY;
+				goto done;
+			}
+		}
+
+		i = 0;
+		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+							    &completed_nbytes,
+							    &id, &flags) != 0) {
+			mdelay(1);
+
+			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+				ret = -EBUSY;
+				goto done;
+			}
+		}
+
+		if (nbytes != completed_nbytes) {
+			ret = -EIO;
+			goto done;
+		}
+
+		if (buf != ce_data) {
+			ret = -EIO;
+			goto done;
+		}
+
+		remaining_bytes -= nbytes;
+		address += nbytes;
+		ce_data += nbytes;
+	}
+
+done:
+	if (ret == 0)
+		memcpy(data, data_buf, orig_nbytes);
+	else
+		ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
+			    address, ret);
+
+	if (data_buf)
+		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+				  ce_data_base);
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
+{
+	__le32 val = 0;
+	int ret;
+
+	ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
+	*value = __le32_to_cpu(val);
+
+	return ret;
+}
+
+static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
+				     u32 src, u32 len)
+{
+	u32 host_addr, addr;
+	int ret;
+
+	host_addr = host_interest_item_address(src);
+
+	ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
+	if (ret != 0) {
+		ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
+			    src, ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
+	if (ret != 0) {
+		ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
+			    addr, len, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+#define ath10k_pci_diag_read_hi(ar, dest, src, len)		\
+	__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
+
+static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+				     const void *data, int nbytes)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret = 0;
+	u32 buf;
+	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+	unsigned int id;
+	unsigned int flags;
+	struct ath10k_ce_pipe *ce_diag;
+	void *data_buf = NULL;
+	u32 ce_data;	/* Host buffer address in CE space */
+	dma_addr_t ce_data_base = 0;
+	int i;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	ce_diag = ar_pci->ce_diag;
+
+	/*
+	 * Allocate a temporary bounce buffer to hold caller's data
+	 * to be DMA'ed to Target. This guarantees
+	 *   1) 4-byte alignment
+	 *   2) Buffer in DMA-able space
+	 */
+	orig_nbytes = nbytes;
+	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+						       orig_nbytes,
+						       &ce_data_base,
+						       GFP_ATOMIC);
+	if (!data_buf) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	/* Copy caller's data to allocated DMA buf */
+	memcpy(data_buf, data, orig_nbytes);
+
+	/*
+	 * The address supplied by the caller is in the
+	 * Target CPU virtual address space.
+	 *
+	 * In order to use this address with the diagnostic CE,
+	 * convert it from
+	 *    Target CPU virtual address space
+	 * to
+	 *    CE address space
+	 */
+	address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+	remaining_bytes = orig_nbytes;
+	ce_data = ce_data_base;
+	while (remaining_bytes) {
+		/* FIXME: check cast */
+		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+		/* Set up to receive directly into Target(!) address */
+		ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+		if (ret != 0)
+			goto done;
+
+		/*
+		 * Request CE to send caller-supplied data that
+		 * was copied to bounce buffer to Target(!) address.
+		 */
+		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+					    nbytes, 0, 0);
+		if (ret != 0)
+			goto done;
+
+		i = 0;
+		while (ath10k_ce_completed_send_next_nolock(ce_diag,
+							    NULL) != 0) {
+			mdelay(1);
+
+			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+				ret = -EBUSY;
+				goto done;
+			}
+		}
+
+		i = 0;
+		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
+							    &completed_nbytes,
+							    &id, &flags) != 0) {
+			mdelay(1);
+
+			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+				ret = -EBUSY;
+				goto done;
+			}
+		}
+
+		if (nbytes != completed_nbytes) {
+			ret = -EIO;
+			goto done;
+		}
+
+		if (buf != address) {
+			ret = -EIO;
+			goto done;
+		}
+
+		remaining_bytes -= nbytes;
+		address += nbytes;
+		ce_data += nbytes;
+	}
+
+done:
+	if (data_buf) {
+		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+				  ce_data_base);
+	}
+
+	if (ret != 0)
+		ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
+			    address, ret);
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+
+	return ret;
+}
+
+static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
+{
+	__le32 val = __cpu_to_le32(value);
+
+	return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct sk_buff_head list;
+	struct sk_buff *skb;
+
+	__skb_queue_head_init(&list);
+	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+		/* no need to call tx completion for NULL pointers */
+		if (skb == NULL)
+			continue;
+
+		__skb_queue_tail(&list, skb);
+	}
+
+	while ((skb = __skb_dequeue(&list)))
+		ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+				     void (*callback)(struct ath10k *ar,
+						      struct sk_buff *skb))
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+	struct sk_buff *skb;
+	struct sk_buff_head list;
+	void *transfer_context;
+	u32 ce_data;
+	unsigned int nbytes, max_nbytes;
+	unsigned int transfer_id;
+	unsigned int flags;
+
+	__skb_queue_head_init(&list);
+	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+					     &ce_data, &nbytes, &transfer_id,
+					     &flags) == 0) {
+		skb = transfer_context;
+		max_nbytes = skb->len + skb_tailroom(skb);
+		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+				 max_nbytes, DMA_FROM_DEVICE);
+
+		if (unlikely(max_nbytes < nbytes)) {
+			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+				    nbytes, max_nbytes);
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+
+		skb_put(skb, nbytes);
+		__skb_queue_tail(&list, skb);
+	}
+
+	while ((skb = __skb_dequeue(&list))) {
+		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+			   ce_state->id, skb->len);
+		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+				skb->data, skb->len);
+
+		callback(ar, skb);
+	}
+
+	ath10k_pci_rx_post_pipe(pipe_info);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	/* CE4 polling needs to be done whenever CE pipe which transports
+	 * HTT Rx (target->host) is processed.
+	 */
+	ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when a send to HTT Target completes. */
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct sk_buff *skb;
+
+	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+		/* no need to call tx completion for NULL pointers */
+		if (!skb)
+			continue;
+
+		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+				 skb->len, DMA_TO_DEVICE);
+		ath10k_htt_hif_tx_complete(ar, skb);
+	}
+}
+
+static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+	ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+/* Called by lower (CE) layer when HTT data is received from the Target. */
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	/* CE4 polling needs to be done whenever CE pipe which transports
+	 * HTT Rx (target->host) is processed.
+	 */
+	ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+	ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+}
+
+static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+				struct ath10k_hif_sg_item *items, int n_items)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	int err, i = 0;
+
+	spin_lock_bh(&ar_pci->ce_lock);
+
+	nentries_mask = src_ring->nentries_mask;
+	sw_index = src_ring->sw_index;
+	write_index = src_ring->write_index;
+
+	if (unlikely(CE_RING_DELTA(nentries_mask,
+				   write_index, sw_index - 1) < n_items)) {
+		err = -ENOBUFS;
+		goto err;
+	}
+
+	for (i = 0; i < n_items - 1; i++) {
+		ath10k_dbg(ar, ATH10K_DBG_PCI,
+			   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+			   i, items[i].paddr, items[i].len, n_items);
+		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+				items[i].vaddr, items[i].len);
+
+		err = ath10k_ce_send_nolock(ce_pipe,
+					    items[i].transfer_context,
+					    items[i].paddr,
+					    items[i].len,
+					    items[i].transfer_id,
+					    CE_SEND_FLAG_GATHER);
+		if (err)
+			goto err;
+	}
+
+	/* `i` is equal to `n_items -1` after for() */
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI,
+		   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+		   i, items[i].paddr, items[i].len, n_items);
+	ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+			items[i].vaddr, items[i].len);
+
+	err = ath10k_ce_send_nolock(ce_pipe,
+				    items[i].transfer_context,
+				    items[i].paddr,
+				    items[i].len,
+				    items[i].transfer_id,
+				    0);
+	if (err)
+		goto err;
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+	return 0;
+
+err:
+	for (; i > 0; i--)
+		__ath10k_ce_send_revert(ce_pipe);
+
+	spin_unlock_bh(&ar_pci->ce_lock);
+	return err;
+}
+
+static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+				    size_t buf_len)
+{
+	return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
+}
+
+static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
+	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_pci_dump_registers(struct ath10k *ar,
+				      struct ath10k_fw_crash_data *crash_data)
+{
+	__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+	int i, ret;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
+				      hi_failure_state,
+				      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
+	if (ret) {
+		ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
+		return;
+	}
+
+	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+	ath10k_err(ar, "firmware register dump:\n");
+	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+			   i,
+			   __le32_to_cpu(reg_dump_values[i]),
+			   __le32_to_cpu(reg_dump_values[i + 1]),
+			   __le32_to_cpu(reg_dump_values[i + 2]),
+			   __le32_to_cpu(reg_dump_values[i + 3]));
+
+	if (!crash_data)
+		return;
+
+	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
+		crash_data->registers[i] = reg_dump_values[i];
+}
+
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data;
+	char uuid[50];
+
+	spin_lock_bh(&ar->data_lock);
+
+	ar->stats.fw_crash_counter++;
+
+	crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+
+	if (crash_data)
+		scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
+	else
+		scnprintf(uuid, sizeof(uuid), "n/a");
+
+	ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
+	ath10k_print_driver_info(ar);
+	ath10k_pci_dump_registers(ar, crash_data);
+
+	spin_unlock_bh(&ar->data_lock);
+
+	queue_work(ar->workqueue, &ar->restart_work);
+}
+
+static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+					       int force)
+{
+	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
+
+	if (!force) {
+		int resources;
+		/*
+		 * Decide whether to actually poll for completions, or just
+		 * wait for a later chance.
+		 * If there seem to be plenty of resources left, then just wait
+		 * since checking involves reading a CE register, which is a
+		 * relatively expensive operation.
+		 */
+		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+		/*
+		 * If at least 50% of the total resources are still available,
+		 * don't bother checking again yet.
+		 */
+		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+			return;
+	}
+	ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
+
+	tasklet_kill(&ar_pci->intr_tq);
+	tasklet_kill(&ar_pci->msi_fw_err);
+
+	for (i = 0; i < CE_COUNT; i++)
+		tasklet_kill(&ar_pci->pipe_info[i].intr);
+
+	del_timer_sync(&ar_pci->rx_post_retry);
+}
+
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+					      u8 *ul_pipe, u8 *dl_pipe)
+{
+	const struct service_to_pipe *entry;
+	bool ul_set = false, dl_set = false;
+	int i;
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
+
+	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+		entry = &target_service_to_ce_map_wlan[i];
+
+		if (__le32_to_cpu(entry->service_id) != service_id)
+			continue;
+
+		switch (__le32_to_cpu(entry->pipedir)) {
+		case PIPEDIR_NONE:
+			break;
+		case PIPEDIR_IN:
+			WARN_ON(dl_set);
+			*dl_pipe = __le32_to_cpu(entry->pipenum);
+			dl_set = true;
+			break;
+		case PIPEDIR_OUT:
+			WARN_ON(ul_set);
+			*ul_pipe = __le32_to_cpu(entry->pipenum);
+			ul_set = true;
+			break;
+		case PIPEDIR_INOUT:
+			WARN_ON(dl_set);
+			WARN_ON(ul_set);
+			*dl_pipe = __le32_to_cpu(entry->pipenum);
+			*ul_pipe = __le32_to_cpu(entry->pipenum);
+			dl_set = true;
+			ul_set = true;
+			break;
+		}
+	}
+
+	if (WARN_ON(!ul_set || !dl_set))
+		return -ENOENT;
+
+	return 0;
+}
+
+static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+					    u8 *ul_pipe, u8 *dl_pipe)
+{
+	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
+	(void)ath10k_pci_hif_map_service_to_pipe(ar,
+						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
+						 ul_pipe, dl_pipe);
+}
+
+static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+{
+	u32 val;
+
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
+		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					CORE_CTRL_ADDRESS);
+		val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+				   CORE_CTRL_ADDRESS, val);
+		break;
+	case ATH10K_HW_QCA99X0:
+		/* TODO: Find appropriate register configuration for QCA99X0
+		 *  to mask irq/MSI.
+		 */
+		 break;
+	}
+}
+
+static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
+{
+	u32 val;
+
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
+		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					CORE_CTRL_ADDRESS);
+		val |= CORE_CTRL_PCIE_REG_31_MASK;
+		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+				   CORE_CTRL_ADDRESS, val);
+		break;
+	case ATH10K_HW_QCA99X0:
+		/* TODO: Find appropriate register configuration for QCA99X0
+		 *  to unmask irq/MSI.
+		 */
+		break;
+	}
+}
+
+static void ath10k_pci_irq_disable(struct ath10k *ar)
+{
+	ath10k_ce_disable_interrupts(ar);
+	ath10k_pci_disable_and_clear_legacy_irq(ar);
+	ath10k_pci_irq_msi_fw_mask(ar);
+}
+
+static void ath10k_pci_irq_sync(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
+
+	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+		synchronize_irq(ar_pci->pdev->irq + i);
+}
+
+static void ath10k_pci_irq_enable(struct ath10k *ar)
+{
+	ath10k_ce_enable_interrupts(ar);
+	ath10k_pci_enable_legacy_irq(ar);
+	ath10k_pci_irq_msi_fw_unmask(ar);
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+	ath10k_pci_irq_enable(ar);
+	ath10k_pci_rx_post(ar);
+
+	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+				   ar_pci->link_ctl);
+
+	return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+	struct ath10k *ar;
+	struct ath10k_ce_pipe *ce_pipe;
+	struct ath10k_ce_ring *ce_ring;
+	struct sk_buff *skb;
+	int i;
+
+	ar = pci_pipe->hif_ce_state;
+	ce_pipe = pci_pipe->ce_hdl;
+	ce_ring = ce_pipe->dest_ring;
+
+	if (!ce_ring)
+		return;
+
+	if (!pci_pipe->buf_sz)
+		return;
+
+	for (i = 0; i < ce_ring->nentries; i++) {
+		skb = ce_ring->per_transfer_context[i];
+		if (!skb)
+			continue;
+
+		ce_ring->per_transfer_context[i] = NULL;
+
+		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+				 skb->len + skb_tailroom(skb),
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+	struct ath10k *ar;
+	struct ath10k_pci *ar_pci;
+	struct ath10k_ce_pipe *ce_pipe;
+	struct ath10k_ce_ring *ce_ring;
+	struct sk_buff *skb;
+	int i;
+
+	ar = pci_pipe->hif_ce_state;
+	ar_pci = ath10k_pci_priv(ar);
+	ce_pipe = pci_pipe->ce_hdl;
+	ce_ring = ce_pipe->src_ring;
+
+	if (!ce_ring)
+		return;
+
+	if (!pci_pipe->buf_sz)
+		return;
+
+	for (i = 0; i < ce_ring->nentries; i++) {
+		skb = ce_ring->per_transfer_context[i];
+		if (!skb)
+			continue;
+
+		ce_ring->per_transfer_context[i] = NULL;
+
+		ath10k_htc_tx_completion_handler(ar, skb);
+	}
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ *    buffers that were enqueued for receive
+ *    buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int pipe_num;
+
+	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+		struct ath10k_pci_pipe *pipe_info;
+
+		pipe_info = &ar_pci->pipe_info[pipe_num];
+		ath10k_pci_rx_pipe_cleanup(pipe_info);
+		ath10k_pci_tx_pipe_cleanup(pipe_info);
+	}
+}
+
+static void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+	int i;
+
+	for (i = 0; i < CE_COUNT; i++)
+		ath10k_ce_deinit_pipe(ar, i);
+}
+
+static void ath10k_pci_flush(struct ath10k *ar)
+{
+	ath10k_pci_kill_tasklet(ar);
+	ath10k_pci_buffer_cleanup(ar);
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+
+	/* Most likely the device has HTT Rx ring configured. The only way to
+	 * prevent the device from accessing (and possible corrupting) host
+	 * memory is to reset the chip now.
+	 *
+	 * There's also no known way of masking MSI interrupts on the device.
+	 * For ranged MSI the CE-related interrupts can be masked. However
+	 * regardless how many MSI interrupts are assigned the first one
+	 * is always used for firmware indications (crashes) and cannot be
+	 * masked. To prevent the device from asserting the interrupt reset it
+	 * before proceeding with cleanup.
+	 */
+	ath10k_pci_safe_chip_reset(ar);
+
+	ath10k_pci_irq_disable(ar);
+	ath10k_pci_irq_sync(ar);
+	ath10k_pci_flush(ar);
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+	WARN_ON(ar_pci->ps_wake_refcount > 0);
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+					   void *req, u32 req_len,
+					   void *resp, u32 *resp_len)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
+	dma_addr_t req_paddr = 0;
+	dma_addr_t resp_paddr = 0;
+	struct bmi_xfer xfer = {};
+	void *treq, *tresp = NULL;
+	int ret = 0;
+
+	might_sleep();
+
+	if (resp && !resp_len)
+		return -EINVAL;
+
+	if (resp && resp_len && *resp_len == 0)
+		return -EINVAL;
+
+	treq = kmemdup(req, req_len, GFP_KERNEL);
+	if (!treq)
+		return -ENOMEM;
+
+	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+	ret = dma_mapping_error(ar->dev, req_paddr);
+	if (ret) {
+		ret = -EIO;
+		goto err_dma;
+	}
+
+	if (resp && resp_len) {
+		tresp = kzalloc(*resp_len, GFP_KERNEL);
+		if (!tresp) {
+			ret = -ENOMEM;
+			goto err_req;
+		}
+
+		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+					    DMA_FROM_DEVICE);
+		ret = dma_mapping_error(ar->dev, resp_paddr);
+		if (ret) {
+			ret = EIO;
+			goto err_req;
+		}
+
+		xfer.wait_for_resp = true;
+		xfer.resp_len = 0;
+
+		ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
+	}
+
+	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+	if (ret)
+		goto err_resp;
+
+	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
+	if (ret) {
+		u32 unused_buffer;
+		unsigned int unused_nbytes;
+		unsigned int unused_id;
+
+		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+					   &unused_nbytes, &unused_id);
+	} else {
+		/* non-zero means we did not time out */
+		ret = 0;
+	}
+
+err_resp:
+	if (resp) {
+		u32 unused_buffer;
+
+		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+		dma_unmap_single(ar->dev, resp_paddr,
+				 *resp_len, DMA_FROM_DEVICE);
+	}
+err_req:
+	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+	if (ret == 0 && resp_len) {
+		*resp_len = min(*resp_len, xfer.resp_len);
+		memcpy(resp, tresp, xfer.resp_len);
+	}
+err_dma:
+	kfree(treq);
+	kfree(tresp);
+
+	return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
+{
+	struct bmi_xfer *xfer;
+
+	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
+		return;
+
+	xfer->tx_done = true;
+}
+
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct bmi_xfer *xfer;
+	u32 ce_data;
+	unsigned int nbytes;
+	unsigned int transfer_id;
+	unsigned int flags;
+
+	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+					  &nbytes, &transfer_id, &flags))
+		return;
+
+	if (WARN_ON_ONCE(!xfer))
+		return;
+
+	if (!xfer->wait_for_resp) {
+		ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
+		return;
+	}
+
+	xfer->resp_len = nbytes;
+	xfer->rx_done = true;
+}
+
+static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
+			       struct ath10k_ce_pipe *rx_pipe,
+			       struct bmi_xfer *xfer)
+{
+	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+
+	while (time_before_eq(jiffies, timeout)) {
+		ath10k_pci_bmi_send_done(tx_pipe);
+		ath10k_pci_bmi_recv_data(rx_pipe);
+
+		if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
+			return 0;
+
+		schedule();
+	}
+
+	return -ETIMEDOUT;
+}
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+	u32 addr, val;
+
+	addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+	val = ath10k_pci_read32(ar, addr);
+	val |= CORE_CTRL_CPU_INTR_MASK;
+	ath10k_pci_write32(ar, addr, val);
+
+	return 0;
+}
+
+static int ath10k_pci_get_num_banks(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	switch (ar_pci->pdev->device) {
+	case QCA988X_2_0_DEVICE_ID:
+	case QCA99X0_2_0_DEVICE_ID:
+		return 1;
+	case QCA6164_2_1_DEVICE_ID:
+	case QCA6174_2_1_DEVICE_ID:
+		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
+		case QCA6174_HW_1_0_CHIP_ID_REV:
+		case QCA6174_HW_1_1_CHIP_ID_REV:
+		case QCA6174_HW_2_1_CHIP_ID_REV:
+		case QCA6174_HW_2_2_CHIP_ID_REV:
+			return 3;
+		case QCA6174_HW_1_3_CHIP_ID_REV:
+			return 2;
+		case QCA6174_HW_3_0_CHIP_ID_REV:
+		case QCA6174_HW_3_1_CHIP_ID_REV:
+		case QCA6174_HW_3_2_CHIP_ID_REV:
+			return 9;
+		}
+		break;
+	case QCA9377_1_0_DEVICE_ID:
+		return 2;
+	}
+
+	ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+	return 1;
+}
+
+static int ath10k_pci_init_config(struct ath10k *ar)
+{
+	u32 interconnect_targ_addr;
+	u32 pcie_state_targ_addr = 0;
+	u32 pipe_cfg_targ_addr = 0;
+	u32 svc_to_pipe_map = 0;
+	u32 pcie_config_flags = 0;
+	u32 ealloc_value;
+	u32 ealloc_targ_addr;
+	u32 flag2_value;
+	u32 flag2_targ_addr;
+	int ret = 0;
+
+	/* Download to Target the CE Config and the service-to-CE map */
+	interconnect_targ_addr =
+		host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+	/* Supply Target-side CE configuration */
+	ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
+				     &pcie_state_targ_addr);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
+		return ret;
+	}
+
+	if (pcie_state_targ_addr == 0) {
+		ret = -EIO;
+		ath10k_err(ar, "Invalid pcie state addr\n");
+		return ret;
+	}
+
+	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+					  offsetof(struct pcie_state,
+						   pipe_cfg_addr)),
+				     &pipe_cfg_targ_addr);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
+		return ret;
+	}
+
+	if (pipe_cfg_targ_addr == 0) {
+		ret = -EIO;
+		ath10k_err(ar, "Invalid pipe cfg addr\n");
+		return ret;
+	}
+
+	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+					target_ce_config_wlan,
+					sizeof(struct ce_pipe_config) *
+					NUM_TARGET_CE_CONFIG_WLAN);
+
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+					  offsetof(struct pcie_state,
+						   svc_to_pipe_map)),
+				     &svc_to_pipe_map);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
+		return ret;
+	}
+
+	if (svc_to_pipe_map == 0) {
+		ret = -EIO;
+		ath10k_err(ar, "Invalid svc_to_pipe map\n");
+		return ret;
+	}
+
+	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+					target_service_to_ce_map_wlan,
+					sizeof(target_service_to_ce_map_wlan));
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+					  offsetof(struct pcie_state,
+						   config_flags)),
+				     &pcie_config_flags);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
+		return ret;
+	}
+
+	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+	ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
+					   offsetof(struct pcie_state,
+						    config_flags)),
+				      pcie_config_flags);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
+		return ret;
+	}
+
+	/* configure early allocation */
+	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+	ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
+	if (ret != 0) {
+		ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+		return ret;
+	}
+
+	/* first bank is switched to IRAM */
+	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+			 HI_EARLY_ALLOC_MAGIC_MASK);
+	ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
+			  HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+	ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
+		return ret;
+	}
+
+	/* Tell Target to proceed with initialization */
+	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+	ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to get option val: %d\n", ret);
+		return ret;
+	}
+
+	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+	ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
+	if (ret != 0) {
+		ath10k_err(ar, "Failed to set option val: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_override_ce_config(struct ath10k *ar)
+{
+	struct ce_attr *attr;
+	struct ce_pipe_config *config;
+
+	/* For QCA6174 we're overriding the Copy Engine 5 configuration,
+	 * since it is currently used for other feature.
+	 */
+
+	/* Override Host's Copy Engine 5 configuration */
+	attr = &host_ce_config_wlan[5];
+	attr->src_sz_max = 0;
+	attr->dest_nentries = 0;
+
+	/* Override Target firmware's Copy Engine configuration */
+	config = &target_ce_config_wlan[5];
+	config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
+	config->nbytes_max = __cpu_to_le32(2048);
+
+	/* Map from service/endpoint to Copy Engine */
+	target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
+}
+
+static int ath10k_pci_alloc_pipes(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_pci_pipe *pipe;
+	int i, ret;
+
+	for (i = 0; i < CE_COUNT; i++) {
+		pipe = &ar_pci->pipe_info[i];
+		pipe->ce_hdl = &ar_pci->ce_states[i];
+		pipe->pipe_num = i;
+		pipe->hif_ce_state = ar;
+
+		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+		if (ret) {
+			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+				   i, ret);
+			return ret;
+		}
+
+		/* Last CE is Diagnostic Window */
+		if (i == CE_DIAG_PIPE) {
+			ar_pci->ce_diag = pipe->ce_hdl;
+			continue;
+		}
+
+		pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_free_pipes(struct ath10k *ar)
+{
+	int i;
+
+	for (i = 0; i < CE_COUNT; i++)
+		ath10k_ce_free_pipe(ar, i);
+}
+
+static int ath10k_pci_init_pipes(struct ath10k *ar)
+{
+	int i, ret;
+
+	for (i = 0; i < CE_COUNT; i++) {
+		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+		if (ret) {
+			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+				   i, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
+{
+	return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
+	       FW_IND_EVENT_PENDING;
+}
+
+static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
+{
+	u32 val;
+
+	val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+	val &= ~FW_IND_EVENT_PENDING;
+	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
+}
+
+/* this function effectively clears target memory controller assert line */
+static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
+{
+	u32 val;
+
+	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+			       val | SOC_RESET_CONTROL_SI0_RST_MASK);
+	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+	msleep(10);
+
+	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
+	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+	msleep(10);
+}
+
+static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
+{
+	u32 val;
+
+	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
+
+	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+				SOC_RESET_CONTROL_ADDRESS);
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
+{
+	u32 val;
+
+	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+				SOC_RESET_CONTROL_ADDRESS);
+
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+			   val | SOC_RESET_CONTROL_CE_RST_MASK);
+	msleep(10);
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+			   val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
+{
+	u32 val;
+
+	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+				SOC_LF_TIMER_CONTROL0_ADDRESS);
+	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
+			   SOC_LF_TIMER_CONTROL0_ADDRESS,
+			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+}
+
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
+
+	spin_lock_bh(&ar->data_lock);
+	ar->stats.fw_warm_reset_counter++;
+	spin_unlock_bh(&ar->data_lock);
+
+	ath10k_pci_irq_disable(ar);
+
+	/* Make sure the target CPU is not doing anything dangerous, e.g. if it
+	 * were to access copy engine while host performs copy engine reset
+	 * then it is possible for the device to confuse pci-e controller to
+	 * the point of bringing host system to a complete stop (i.e. hang).
+	 */
+	ath10k_pci_warm_reset_si0(ar);
+	ath10k_pci_warm_reset_cpu(ar);
+	ath10k_pci_init_pipes(ar);
+	ath10k_pci_wait_for_target_init(ar);
+
+	ath10k_pci_warm_reset_clear_lf(ar);
+	ath10k_pci_warm_reset_ce(ar);
+	ath10k_pci_warm_reset_cpu(ar);
+	ath10k_pci_init_pipes(ar);
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+	return 0;
+}
+
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+	if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+		return ath10k_pci_warm_reset(ar);
+	} else if (QCA_REV_99X0(ar)) {
+		ath10k_pci_irq_disable(ar);
+		return ath10k_pci_qca99x0_chip_reset(ar);
+	} else {
+		return -ENOTSUPP;
+	}
+}
+
+static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
+{
+	int i, ret;
+	u32 val;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
+
+	/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
+	 * It is thus preferred to use warm reset which is safer but may not be
+	 * able to recover the device from all possible fail scenarios.
+	 *
+	 * Warm reset doesn't always work on first try so attempt it a few
+	 * times before giving up.
+	 */
+	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+		ret = ath10k_pci_warm_reset(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
+				    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
+				    ret);
+			continue;
+		}
+
+		/* FIXME: Sometimes copy engine doesn't recover after warm
+		 * reset. In most cases this needs cold reset. In some of these
+		 * cases the device is in such a state that a cold reset may
+		 * lock up the host.
+		 *
+		 * Reading any host interest register via copy engine is
+		 * sufficient to verify if device is capable of booting
+		 * firmware blob.
+		 */
+		ret = ath10k_pci_init_pipes(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to init copy engine: %d\n",
+				    ret);
+			continue;
+		}
+
+		ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
+					     &val);
+		if (ret) {
+			ath10k_warn(ar, "failed to poke copy engine: %d\n",
+				    ret);
+			continue;
+		}
+
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
+		return 0;
+	}
+
+	if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
+		ath10k_warn(ar, "refusing cold reset as requested\n");
+		return -EPERM;
+	}
+
+	ret = ath10k_pci_cold_reset(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
+
+	return 0;
+}
+
+static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
+{
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
+
+	/* FIXME: QCA6174 requires cold + warm reset to work. */
+
+	ret = ath10k_pci_cold_reset(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_warm_reset(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to warm reset: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
+
+	return 0;
+}
+
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+	ret = ath10k_pci_cold_reset(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+	return 0;
+}
+
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+	if (QCA_REV_988X(ar))
+		return ath10k_pci_qca988x_chip_reset(ar);
+	else if (QCA_REV_6174(ar))
+		return ath10k_pci_qca6174_chip_reset(ar);
+	else if (QCA_REV_9377(ar))
+		return ath10k_pci_qca6174_chip_reset(ar);
+	else if (QCA_REV_99X0(ar))
+		return ath10k_pci_qca99x0_chip_reset(ar);
+	else
+		return -ENOTSUPP;
+}
+
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
+
+	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+				  &ar_pci->link_ctl);
+	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+				   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+	/*
+	 * Bring the target up cleanly.
+	 *
+	 * The target may be in an undefined state with an AUX-powered Target
+	 * and a Host in WoW mode. If the Host crashes, loses power, or is
+	 * restarted (without unloading the driver) then the Target is left
+	 * (aux) powered and running. On a subsequent driver load, the Target
+	 * is in an unexpected state. We try to catch that here in order to
+	 * reset the Target and retry the probe.
+	 */
+	ret = ath10k_pci_chip_reset(ar);
+	if (ret) {
+		if (ath10k_pci_has_fw_crashed(ar)) {
+			ath10k_warn(ar, "firmware crashed during chip reset\n");
+			ath10k_pci_fw_crashed_clear(ar);
+			ath10k_pci_fw_crashed_dump(ar);
+		}
+
+		ath10k_err(ar, "failed to reset chip: %d\n", ret);
+		goto err_sleep;
+	}
+
+	ret = ath10k_pci_init_pipes(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+		goto err_sleep;
+	}
+
+	ret = ath10k_pci_init_config(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to setup init config: %d\n", ret);
+		goto err_ce;
+	}
+
+	ret = ath10k_pci_wake_target_cpu(ar);
+	if (ret) {
+		ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+		goto err_ce;
+	}
+
+	return 0;
+
+err_ce:
+	ath10k_pci_ce_deinit(ar);
+
+err_sleep:
+	return ret;
+}
+
+static void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+	/* Currently hif_power_up performs effectively a reset and hif_stop
+	 * resets the chip as well so there's no point in resetting here.
+	 */
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+	/* The grace timer can still be counting down and ar->ps_awake be true.
+	 * It is known that the device may be asleep after resuming regardless
+	 * of the SoC powersave state before suspending. Hence make sure the
+	 * device is asleep before proceeding.
+	 */
+	ath10k_pci_sleep_sync(ar);
+
+	return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct pci_dev *pdev = ar_pci->pdev;
+	u32 val;
+	int ret = 0;
+
+	if (ar_pci->pci_ps == 0) {
+		ret = ath10k_pci_force_wake(ar);
+		if (ret) {
+			ath10k_err(ar, "failed to wake up target: %d\n", ret);
+			return ret;
+		}
+	}
+
+	/* Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+	 * from interfering with C3 CPU state. pci_restore_state won't help
+	 * here since it only restores the first 64 bytes pci config header.
+	 */
+	pci_read_config_dword(pdev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+	return ret;
+}
+#endif
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+	.tx_sg			= ath10k_pci_hif_tx_sg,
+	.diag_read		= ath10k_pci_hif_diag_read,
+	.diag_write		= ath10k_pci_diag_write_mem,
+	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
+	.start			= ath10k_pci_hif_start,
+	.stop			= ath10k_pci_hif_stop,
+	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
+	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
+	.send_complete_check	= ath10k_pci_hif_send_complete_check,
+	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
+	.power_up		= ath10k_pci_hif_power_up,
+	.power_down		= ath10k_pci_hif_power_down,
+	.read32			= ath10k_pci_read32,
+	.write32		= ath10k_pci_write32,
+#ifdef CONFIG_PM
+	.suspend		= ath10k_pci_hif_suspend,
+	.resume			= ath10k_pci_hif_resume,
+#endif
+};
+
+static void ath10k_pci_ce_tasklet(unsigned long ptr)
+{
+	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
+	struct ath10k_pci *ar_pci = pipe->ar_pci;
+
+	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
+}
+
+static void ath10k_msi_err_tasklet(unsigned long data)
+{
+	struct ath10k *ar = (struct ath10k *)data;
+
+	if (!ath10k_pci_has_fw_crashed(ar)) {
+		ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
+		return;
+	}
+
+	ath10k_pci_irq_disable(ar);
+	ath10k_pci_fw_crashed_clear(ar);
+	ath10k_pci_fw_crashed_dump(ar);
+}
+
+/*
+ * Handler for a per-engine interrupt on a PARTICULAR CE.
+ * This is used in cases where each CE has a private MSI interrupt.
+ */
+static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
+{
+	struct ath10k *ar = arg;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
+
+	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
+		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+			    ce_id);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * NOTE: We are able to derive ce_id from irq because we
+	 * use a one-to-one mapping for CE's 0..5.
+	 * CE's 6 & 7 do not use interrupts at all.
+	 *
+	 * This mapping must be kept in sync with the mapping
+	 * used by firmware.
+	 */
+	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
+{
+	struct ath10k *ar = arg;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	tasklet_schedule(&ar_pci->msi_fw_err);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+	struct ath10k *ar = arg;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	if (ar_pci->pci_ps == 0) {
+		ret = ath10k_pci_force_wake(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to wake device up on irq: %d\n",
+				    ret);
+			return IRQ_NONE;
+		}
+	}
+
+	if (ar_pci->num_msi_intrs == 0) {
+		if (!ath10k_pci_irq_pending(ar))
+			return IRQ_NONE;
+
+		ath10k_pci_disable_and_clear_legacy_irq(ar);
+	}
+
+	tasklet_schedule(&ar_pci->intr_tq);
+
+	return IRQ_HANDLED;
+}
+
+static void ath10k_pci_tasklet(unsigned long data)
+{
+	struct ath10k *ar = (struct ath10k *)data;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	if (ath10k_pci_has_fw_crashed(ar)) {
+		ath10k_pci_irq_disable(ar);
+		ath10k_pci_fw_crashed_clear(ar);
+		ath10k_pci_fw_crashed_dump(ar);
+		return;
+	}
+
+	ath10k_ce_per_engine_service_any(ar);
+
+	/* Re-enable legacy irq that was disabled in the irq handler */
+	if (ar_pci->num_msi_intrs == 0)
+		ath10k_pci_enable_legacy_irq(ar);
+}
+
+static int ath10k_pci_request_irq_msix(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret, i;
+
+	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
+			  ath10k_pci_msi_fw_handler,
+			  IRQF_SHARED, "ath10k_pci", ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
+			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
+		return ret;
+	}
+
+	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
+		ret = request_irq(ar_pci->pdev->irq + i,
+				  ath10k_pci_per_engine_handler,
+				  IRQF_SHARED, "ath10k_pci", ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
+				    ar_pci->pdev->irq + i, ret);
+
+			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
+				free_irq(ar_pci->pdev->irq + i, ar);
+
+			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	ret = request_irq(ar_pci->pdev->irq,
+			  ath10k_pci_interrupt_handler,
+			  IRQF_SHARED, "ath10k_pci", ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
+			    ar_pci->pdev->irq, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	ret = request_irq(ar_pci->pdev->irq,
+			  ath10k_pci_interrupt_handler,
+			  IRQF_SHARED, "ath10k_pci", ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+			    ar_pci->pdev->irq, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	switch (ar_pci->num_msi_intrs) {
+	case 0:
+		return ath10k_pci_request_irq_legacy(ar);
+	case 1:
+		return ath10k_pci_request_irq_msi(ar);
+	default:
+		return ath10k_pci_request_irq_msix(ar);
+	}
+}
+
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
+
+	/* There's at least one interrupt irregardless whether its legacy INTR
+	 * or MSI or MSI-X */
+	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+		free_irq(ar_pci->pdev->irq + i, ar);
+}
+
+static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int i;
+
+	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
+	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
+		     (unsigned long)ar);
+
+	for (i = 0; i < CE_COUNT; i++) {
+		ar_pci->pipe_info[i].ar_pci = ar_pci;
+		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
+			     (unsigned long)&ar_pci->pipe_info[i]);
+	}
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	ath10k_pci_init_irq_tasklets(ar);
+
+	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
+		ath10k_info(ar, "limiting irq mode to: %d\n",
+			    ath10k_pci_irq_mode);
+
+	/* Try MSI-X */
+	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
+		ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
+		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
+					   ar_pci->num_msi_intrs);
+		if (ret > 0)
+			return 0;
+
+		/* fall-through */
+	}
+
+	/* Try MSI */
+	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+		ar_pci->num_msi_intrs = 1;
+		ret = pci_enable_msi(ar_pci->pdev);
+		if (ret == 0)
+			return 0;
+
+		/* fall-through */
+	}
+
+	/* Try legacy irq
+	 *
+	 * A potential race occurs here: The CORE_BASE write
+	 * depends on target correctly decoding AXI address but
+	 * host won't know when target writes BAR to CORE_CTRL.
+	 * This write might get lost if target has NOT written BAR.
+	 * For now, fix the race by repeating the write in below
+	 * synchronization checking. */
+	ar_pci->num_msi_intrs = 0;
+
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+	return 0;
+}
+
+static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+{
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+			   0);
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+	switch (ar_pci->num_msi_intrs) {
+	case 0:
+		ath10k_pci_deinit_irq_legacy(ar);
+		break;
+	default:
+		pci_disable_msi(ar_pci->pdev);
+		break;
+	}
+
+	return 0;
+}
+
+static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long timeout;
+	u32 val;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
+
+	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+	do {
+		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
+			   val);
+
+		/* target should never return this */
+		if (val == 0xffffffff)
+			continue;
+
+		/* the device has crashed so don't bother trying anymore */
+		if (val & FW_IND_EVENT_PENDING)
+			break;
+
+		if (val & FW_IND_INITIALIZED)
+			break;
+
+		if (ar_pci->num_msi_intrs == 0)
+			/* Fix potential race by repeating CORE_BASE writes */
+			ath10k_pci_enable_legacy_irq(ar);
+
+		mdelay(10);
+	} while (time_before(jiffies, timeout));
+
+	ath10k_pci_disable_and_clear_legacy_irq(ar);
+	ath10k_pci_irq_msi_fw_mask(ar);
+
+	if (val == 0xffffffff) {
+		ath10k_err(ar, "failed to read device register, device is gone\n");
+		return -EIO;
+	}
+
+	if (val & FW_IND_EVENT_PENDING) {
+		ath10k_warn(ar, "device has crashed during init\n");
+		return -ECOMM;
+	}
+
+	if (!(val & FW_IND_INITIALIZED)) {
+		ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
+			   val);
+		return -ETIMEDOUT;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
+	return 0;
+}
+
+static int ath10k_pci_cold_reset(struct ath10k *ar)
+{
+	u32 val;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
+
+	spin_lock_bh(&ar->data_lock);
+
+	ar->stats.fw_cold_reset_counter++;
+
+	spin_unlock_bh(&ar->data_lock);
+
+	/* Put Target, including PCIe, into RESET. */
+	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
+	val |= 1;
+	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+	/* After writing into SOC_GLOBAL_RESET to put device into
+	 * reset and pulling out of reset pcie may not be stable
+	 * for any immediate pcie register access and cause bus error,
+	 * add delay before any pcie access request to fix this issue.
+	 */
+	msleep(20);
+
+	/* Pull Target, including PCIe, out of RESET. */
+	val &= ~1;
+	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+	msleep(20);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
+	return 0;
+}
+
+static int ath10k_pci_claim(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct pci_dev *pdev = ar_pci->pdev;
+	int ret;
+
+	pci_set_drvdata(pdev, ar);
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		ath10k_err(ar, "failed to enable pci device: %d\n", ret);
+		return ret;
+	}
+
+	ret = pci_request_region(pdev, BAR_NUM, "ath");
+	if (ret) {
+		ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
+			   ret);
+		goto err_device;
+	}
+
+	/* Target expects 32 bit DMA. Enforce it. */
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret) {
+		ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
+		goto err_region;
+	}
+
+	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret) {
+		ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
+			   ret);
+		goto err_region;
+	}
+
+	pci_set_master(pdev);
+
+	/* Arrange for access to Target SoC registers. */
+	ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
+	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
+	if (!ar_pci->mem) {
+		ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
+		ret = -EIO;
+		goto err_master;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+	return 0;
+
+err_master:
+	pci_clear_master(pdev);
+
+err_region:
+	pci_release_region(pdev, BAR_NUM);
+
+err_device:
+	pci_disable_device(pdev);
+
+	return ret;
+}
+
+static void ath10k_pci_release(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct pci_dev *pdev = ar_pci->pdev;
+
+	pci_iounmap(pdev, ar_pci->mem);
+	pci_release_region(pdev, BAR_NUM);
+	pci_clear_master(pdev);
+	pci_disable_device(pdev);
+}
+
+static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
+{
+	const struct ath10k_pci_supp_chip *supp_chip;
+	int i;
+	u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
+
+	for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
+		supp_chip = &ath10k_pci_supp_chips[i];
+
+		if (supp_chip->dev_id == dev_id &&
+		    supp_chip->rev_id == rev_id)
+			return true;
+	}
+
+	return false;
+}
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *pci_dev)
+{
+	int ret = 0;
+	struct ath10k *ar;
+	struct ath10k_pci *ar_pci;
+	enum ath10k_hw_rev hw_rev;
+	u32 chip_id;
+	bool pci_ps;
+
+	switch (pci_dev->device) {
+	case QCA988X_2_0_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA988X;
+		pci_ps = false;
+		break;
+	case QCA6164_2_1_DEVICE_ID:
+	case QCA6174_2_1_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA6174;
+		pci_ps = true;
+		break;
+	case QCA99X0_2_0_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA99X0;
+		pci_ps = false;
+		break;
+	case QCA9377_1_0_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA9377;
+		pci_ps = true;
+		break;
+	default:
+		WARN_ON(1);
+		return -ENOTSUPP;
+	}
+
+	ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
+				hw_rev, &ath10k_pci_hif_ops);
+	if (!ar) {
+		dev_err(&pdev->dev, "failed to allocate core\n");
+		return -ENOMEM;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+		   pdev->vendor, pdev->device,
+		   pdev->subsystem_vendor, pdev->subsystem_device);
+
+	ar_pci = ath10k_pci_priv(ar);
+	ar_pci->pdev = pdev;
+	ar_pci->dev = &pdev->dev;
+	ar_pci->ar = ar;
+	ar->dev_id = pci_dev->device;
+	ar_pci->pci_ps = pci_ps;
+
+	ar->id.vendor = pdev->vendor;
+	ar->id.device = pdev->device;
+	ar->id.subsystem_vendor = pdev->subsystem_vendor;
+	ar->id.subsystem_device = pdev->subsystem_device;
+
+	spin_lock_init(&ar_pci->ce_lock);
+	spin_lock_init(&ar_pci->ps_lock);
+
+	setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
+		    (unsigned long)ar);
+	setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
+		    (unsigned long)ar);
+
+	ret = ath10k_pci_claim(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to claim device: %d\n", ret);
+		goto err_core_destroy;
+	}
+
+	if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
+		ath10k_pci_override_ce_config(ar);
+
+	ret = ath10k_pci_alloc_pipes(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+			   ret);
+		goto err_sleep;
+	}
+
+	ath10k_pci_ce_deinit(ar);
+	ath10k_pci_irq_disable(ar);
+
+	if (ar_pci->pci_ps == 0) {
+		ret = ath10k_pci_force_wake(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to wake up device : %d\n", ret);
+			goto err_free_pipes;
+		}
+	}
+
+	ret = ath10k_pci_init_irq(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to init irqs: %d\n", ret);
+		goto err_free_pipes;
+	}
+
+	ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
+		    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+		    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
+
+	ret = ath10k_pci_request_irq(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+		goto err_deinit_irq;
+	}
+
+	ret = ath10k_pci_chip_reset(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to reset chip: %d\n", ret);
+		goto err_free_irq;
+	}
+
+	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+	if (chip_id == 0xffffffff) {
+		ath10k_err(ar, "failed to get chip id\n");
+		goto err_free_irq;
+	}
+
+	if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
+		ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+			   pdev->device, chip_id);
+		goto err_free_irq;
+	}
+
+	ret = ath10k_core_register(ar, chip_id);
+	if (ret) {
+		ath10k_err(ar, "failed to register driver core: %d\n", ret);
+		goto err_free_irq;
+	}
+
+	return 0;
+
+err_free_irq:
+	ath10k_pci_free_irq(ar);
+	ath10k_pci_kill_tasklet(ar);
+
+err_deinit_irq:
+	ath10k_pci_deinit_irq(ar);
+
+err_free_pipes:
+	ath10k_pci_free_pipes(ar);
+
+err_sleep:
+	ath10k_pci_sleep_sync(ar);
+	ath10k_pci_release(ar);
+
+err_core_destroy:
+	ath10k_core_destroy(ar);
+
+	return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+	struct ath10k *ar = pci_get_drvdata(pdev);
+	struct ath10k_pci *ar_pci;
+
+	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
+
+	if (!ar)
+		return;
+
+	ar_pci = ath10k_pci_priv(ar);
+
+	if (!ar_pci)
+		return;
+
+	ath10k_core_unregister(ar);
+	ath10k_pci_free_irq(ar);
+	ath10k_pci_kill_tasklet(ar);
+	ath10k_pci_deinit_irq(ar);
+	ath10k_pci_ce_deinit(ar);
+	ath10k_pci_free_pipes(ar);
+	ath10k_pci_sleep_sync(ar);
+	ath10k_pci_release(ar);
+	ath10k_core_destroy(ar);
+}
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static struct pci_driver ath10k_pci_driver = {
+	.name = "ath10k_pci",
+	.id_table = ath10k_pci_id_table,
+	.probe = ath10k_pci_probe,
+	.remove = ath10k_pci_remove,
+};
+
+static int __init ath10k_pci_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&ath10k_pci_driver);
+	if (ret)
+		printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+		       ret);
+
+	return ret;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+	pci_unregister_driver(&ath10k_pci_driver);
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA9377 1.0 firmware files */
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644
index 0000000..f91bf33
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+
+#include "hw.h"
+#include "ce.h"
+
+/*
+ * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+	bool tx_done;
+	bool rx_done;
+	bool wait_for_resp;
+	u32 resp_len;
+};
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+	/* Pipe configuration Target address */
+	/* NB: ce_pipe_config[CE_COUNT] */
+	u32 pipe_cfg_addr;
+
+	/* Service to pipe map Target address */
+	/* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+	u32 svc_to_pipe_map;
+
+	/* number of MSI interrupts requested */
+	u32 msi_requested;
+
+	/* number of MSI interrupts granted */
+	u32 msi_granted;
+
+	/* Message Signalled Interrupt address */
+	u32 msi_addr;
+
+	/* Base data */
+	u32 msi_data;
+
+	/*
+	 * Data for firmware interrupt;
+	 * MSI data for other interrupts are
+	 * in various SoC registers
+	 */
+	u32 msi_fw_intr_data;
+
+	/* PCIE_PWR_METHOD_* */
+	u32 power_mgmt_method;
+
+	/* PCIE_CONFIG_FLAG_* */
+	u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1  0x0000001
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+	__le32 pipenum;
+	__le32 pipedir;
+	__le32 nentries;
+	__le32 nbytes_max;
+	__le32 flags;
+	__le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE    0
+#define PIPEDIR_IN      1  /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT     2  /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT   3  /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+	__le32 service_id;
+	__le32 pipedir;
+	__le32 pipenum;
+};
+
+/* Per-pipe state. */
+struct ath10k_pci_pipe {
+	/* Handle of underlying Copy Engine */
+	struct ath10k_ce_pipe *ce_hdl;
+
+	/* Our pipe number; facilitiates use of pipe_info ptrs. */
+	u8 pipe_num;
+
+	/* Convenience back pointer to hif_ce_state. */
+	struct ath10k *hif_ce_state;
+
+	size_t buf_sz;
+
+	/* protects compl_free and num_send_allowed */
+	spinlock_t pipe_lock;
+
+	struct ath10k_pci *ar_pci;
+	struct tasklet_struct intr;
+};
+
+struct ath10k_pci_supp_chip {
+	u32 dev_id;
+	u32 rev_id;
+};
+
+struct ath10k_pci {
+	struct pci_dev *pdev;
+	struct device *dev;
+	struct ath10k *ar;
+	void __iomem *mem;
+	size_t mem_len;
+
+	/*
+	 * Number of MSI interrupts granted, 0 --> using legacy PCI line
+	 * interrupts.
+	 */
+	int num_msi_intrs;
+
+	struct tasklet_struct intr_tq;
+	struct tasklet_struct msi_fw_err;
+
+	struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
+
+	/* Copy Engine used for Diagnostic Accesses */
+	struct ath10k_ce_pipe *ce_diag;
+
+	/* FIXME: document what this really protects */
+	spinlock_t ce_lock;
+
+	/* Map CE id to ce_state */
+	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+	struct timer_list rx_post_retry;
+
+	/* Due to HW quirks it is recommended to disable ASPM during device
+	 * bootup. To do that the original PCI-E Link Control is stored before
+	 * device bootup is executed and re-programmed later.
+	 */
+	u16 link_ctl;
+
+	/* Protects ps_awake and ps_wake_refcount */
+	spinlock_t ps_lock;
+
+	/* The device has a special powersave-oriented register. When device is
+	 * considered asleep it drains less power and driver is forbidden from
+	 * accessing most MMIO registers. If host were to access them without
+	 * waking up the device might scribble over host memory or return
+	 * 0xdeadbeef readouts.
+	 */
+	unsigned long ps_wake_refcount;
+
+	/* Waking up takes some time (up to 2ms in some cases) so it can be bad
+	 * for latency. To mitigate this the device isn't immediately allowed
+	 * to sleep after all references are undone - instead there's a grace
+	 * period after which the powersave register is updated unless some
+	 * activity to/from device happened in the meantime.
+	 *
+	 * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+	 */
+	struct timer_list ps_timer;
+
+	/* MMIO registers are used to communicate with the device. With
+	 * intensive traffic accessing powersave register would be a bit
+	 * wasteful overhead and would needlessly stall CPU. It is far more
+	 * efficient to rely on a variable in RAM and update it only upon
+	 * powersave register state changes.
+	 */
+	bool ps_awake;
+
+	/* pci power save, disable for QCA988X and QCA99X0.
+	 * Writing 'false' to this variable avoids frequent locking
+	 * on MMIO read/write.
+	 */
+	bool pci_ps;
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+	return (struct ath10k_pci *)ar->drv_priv;
+}
+
+#define ATH10K_PCI_RX_POST_RETRY_MS 50
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 30000	/* 30ms */
+#define PCIE_WAKE_LATE_US 10000	/* 10ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR   0xceef0000
+#define CDC_WAR_DATA_CE     4
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
+
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
+ */
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644
index 0000000..ca8d168
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -0,0 +1,1215 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+enum rx_attention_flags {
+	RX_ATTENTION_FLAGS_FIRST_MPDU          = 1 << 0,
+	RX_ATTENTION_FLAGS_LAST_MPDU           = 1 << 1,
+	RX_ATTENTION_FLAGS_MCAST_BCAST         = 1 << 2,
+	RX_ATTENTION_FLAGS_PEER_IDX_INVALID    = 1 << 3,
+	RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT    = 1 << 4,
+	RX_ATTENTION_FLAGS_POWER_MGMT          = 1 << 5,
+	RX_ATTENTION_FLAGS_NON_QOS             = 1 << 6,
+	RX_ATTENTION_FLAGS_NULL_DATA           = 1 << 7,
+	RX_ATTENTION_FLAGS_MGMT_TYPE           = 1 << 8,
+	RX_ATTENTION_FLAGS_CTRL_TYPE           = 1 << 9,
+	RX_ATTENTION_FLAGS_MORE_DATA           = 1 << 10,
+	RX_ATTENTION_FLAGS_EOSP                = 1 << 11,
+	RX_ATTENTION_FLAGS_U_APSD_TRIGGER      = 1 << 12,
+	RX_ATTENTION_FLAGS_FRAGMENT            = 1 << 13,
+	RX_ATTENTION_FLAGS_ORDER               = 1 << 14,
+	RX_ATTENTION_FLAGS_CLASSIFICATION      = 1 << 15,
+	RX_ATTENTION_FLAGS_OVERFLOW_ERR        = 1 << 16,
+	RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR     = 1 << 17,
+	RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
+	RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL      = 1 << 19,
+	RX_ATTENTION_FLAGS_SA_IDX_INVALID      = 1 << 20,
+	RX_ATTENTION_FLAGS_DA_IDX_INVALID      = 1 << 21,
+	RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT      = 1 << 22,
+	RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT      = 1 << 23,
+	RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED    = 1 << 24,
+	RX_ATTENTION_FLAGS_DIRECTED            = 1 << 25,
+	RX_ATTENTION_FLAGS_BUFFER_FRAGMENT     = 1 << 26,
+	RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR     = 1 << 27,
+	RX_ATTENTION_FLAGS_TKIP_MIC_ERR        = 1 << 28,
+	RX_ATTENTION_FLAGS_DECRYPT_ERR         = 1 << 29,
+	RX_ATTENTION_FLAGS_FCS_ERR             = 1 << 30,
+	RX_ATTENTION_FLAGS_MSDU_DONE           = 1 << 31,
+};
+
+struct rx_attention {
+	__le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ *		Indicates the first MSDU of the PPDU.  If both first_mpdu
+ *		and last_mpdu are set in the MSDU then this is a not an
+ *		A-MPDU frame but a stand alone MPDU.  Interior MPDU in an
+ *		A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ *		0.  The PPDU start status will only be valid when this bit
+ *		is set.
+ *
+ * last_mpdu
+ *		Indicates the last MSDU of the last MPDU of the PPDU.  The
+ *		PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ *		Multicast / broadcast indicator.  Only set when the MAC
+ *		address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ *		matches one of the 4 BSSID registers. Only set when
+ *		first_msdu is set.
+ *
+ * peer_idx_invalid
+ *		Indicates no matching entries within the the max search
+ *		count.  Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ *		Indicates an unsuccessful search for the peer index due to
+ *		timeout.  Only set when first_msdu is set.
+ *
+ * power_mgmt
+ *		Power management bit set in the 802.11 header.  Only set
+ *		when first_msdu is set.
+ *
+ * non_qos
+ *		Set if packet is not a non-QoS data frame.  Only set when
+ *		first_msdu is set.
+ *
+ * null_data
+ *		Set if frame type indicates either null data or QoS null
+ *		data format.  Only set when first_msdu is set.
+ *
+ * mgmt_type
+ *		Set if packet is a management packet.  Only set when
+ *		first_msdu is set.
+ *
+ * ctrl_type
+ *		Set if packet is a control packet.  Only set when first_msdu
+ *		is set.
+ *
+ * more_data
+ *		Set if more bit in frame control is set.  Only set when
+ *		first_msdu is set.
+ *
+ * eosp
+ *		Set if the EOSP (end of service period) bit in the QoS
+ *		control field is set.  Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ *		Set if packet is U-APSD trigger.  Key table will have bits
+ *		per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ *		Indicates that this is an 802.11 fragment frame.  This is
+ *		set when either the more_frag bit is set in the frame
+ *		control or the fragment number is not zero.  Only set when
+ *		first_msdu is set.
+ *
+ * order
+ *		Set if the order bit in the frame control is set.  Only set
+ *		when first_msdu is set.
+ *
+ * classification
+ *		Indicates that this status has a corresponding MSDU that
+ *		requires FW processing.  The OLE will have classification
+ *		ring mask registers which will indicate the ring(s) for
+ *		packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ *		PCU Receive FIFO does not have enough space to store the
+ *		full receive packet.  Enough space is reserved in the
+ *		receive FIFO for the status is written.  This MPDU remaining
+ *		packets in the PPDU will be filtered and no Ack response
+ *		will be transmitted.
+ *
+ * msdu_length_err
+ *		Indicates that the MSDU length from the 802.3 encapsulated
+ *		length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ *		Indicates that the computed checksum (tcp_udp_chksum) did
+ *		not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ *		Indicates that the computed checksum did not match the
+ *		checksum in the IP header.
+ *
+ * sa_idx_invalid
+ *		Indicates no matching entry was found in the address search
+ *		table for the source MAC address.
+ *
+ * da_idx_invalid
+ *		Indicates no matching entry was found in the address search
+ *		table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ *		Indicates an unsuccessful search for the source MAC address
+ *		due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ *		Indicates an unsuccessful search for the destination MAC
+ *		address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ *		Indicates that this data type frame is not encrypted even if
+ *		the policy for this MPDU requires encryption as indicated in
+ *		the peer table key type.
+ *
+ * directed
+ *		MPDU is a directed packet which means that the RA matched
+ *		our STA addresses.  In proxySTA it means that the TA matched
+ *		an entry in our address search table with the corresponding
+ *		'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ *		Indicates that at least one of the rx buffers has been
+ *		fragmented.  If set the FW should look at the rx_frag_info
+ *		descriptor described below.
+ *
+ * mpdu_length_err
+ *		Indicates that the MPDU was pre-maturely terminated
+ *		resulting in a truncated MPDU.  Don't trust the MPDU length
+ *		field.
+ *
+ * tkip_mic_err
+ *		Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ *		Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ *		Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ *		If set indicates that the RX packet data, RX header data, RX
+ *		PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ *		start/end descriptors and RX Attention descriptor are all
+ *		valid.  This bit must be in the last octet of the
+ *		descriptor.
+ */
+
+struct rx_frag_info {
+	u8 ring0_more_count;
+	u8 ring1_more_count;
+	u8 ring2_more_count;
+	u8 ring3_more_count;
+} __packed;
+
+/*
+ * ring0_more_count
+ *		Indicates the number of more buffers associated with RX DMA
+ *		ring 0.  Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ *		Indicates the number of more buffers associated with RX DMA
+ *		ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ *		Indicates the number of more buffers associated with RX DMA
+ *		ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ *		Indicates the number of more buffers associated with RX DMA
+ *		ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+	HTT_RX_MPDU_ENCRYPT_WEP40            = 0,
+	HTT_RX_MPDU_ENCRYPT_WEP104           = 1,
+	HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+	HTT_RX_MPDU_ENCRYPT_WEP128           = 3,
+	HTT_RX_MPDU_ENCRYPT_TKIP_WPA         = 4,
+	HTT_RX_MPDU_ENCRYPT_WAPI             = 5,
+	HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2     = 6,
+	HTT_RX_MPDU_ENCRYPT_NONE             = 7,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK     0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB      0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK      0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB       16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB  28
+#define RX_MPDU_START_INFO0_FROM_DS           (1 << 11)
+#define RX_MPDU_START_INFO0_TO_DS             (1 << 12)
+#define RX_MPDU_START_INFO0_ENCRYPTED         (1 << 13)
+#define RX_MPDU_START_INFO0_RETRY             (1 << 14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO       (1 << 15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB  28
+#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
+
+struct rx_mpdu_start {
+	__le32 info0;
+	union {
+		struct {
+			__le32 pn31_0;
+			__le32 info1; /* %RX_MPDU_START_INFO1_ */
+		} __packed;
+		struct {
+			u8 pn[6];
+		} __packed;
+	} __packed;
+} __packed;
+
+/*
+ * peer_idx
+ *		The index of the address search table which associated with
+ *		the peer table entry corresponding to this MPDU.  Only valid
+ *		when first_msdu is set.
+ *
+ * fr_ds
+ *		Set if the from DS bit is set in the frame control.  Only
+ *		valid when first_msdu is set.
+ *
+ * to_ds
+ *		Set if the to DS bit is set in the frame control.  Only
+ *		valid when first_msdu is set.
+ *
+ * encrypted
+ *		Protected bit from the frame control.  Only valid when
+ *		first_msdu is set.
+ *
+ * retry
+ *		Retry bit from the frame control.  Only valid when
+ *		first_msdu is set.
+ *
+ * txbf_h_info
+ *		The MPDU data will contain H information.  Primarily used
+ *		for debug.
+ *
+ * seq_num
+ *		The sequence number from the 802.11 header.  Only valid when
+ *		first_msdu is set.
+ *
+ * encrypt_type
+ *		Indicates type of decrypt cipher used (as defined in the
+ *		peer table)
+ *		0: WEP40
+ *		1: WEP104
+ *		2: TKIP without MIC
+ *		3: WEP128
+ *		4: TKIP (WPA)
+ *		5: WAPI
+ *		6: AES-CCM (WPA2)
+ *		7: No cipher
+ *		Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ *		Bits [31:0] of the PN number extracted from the IV field
+ *		WEP: IV = {key_id_octet, pn2, pn1, pn0}.  Only pn[23:0] is
+ *		valid.
+ *		TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ *		WEPSeed[1], pn1}.  Only pn[47:0] is valid.
+ *		AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ *		pn0}.  Only pn[47:0] is valid.
+ *		WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ *		pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ *		The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ *		pn[47:0] are valid.
+ *		Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ *		Bits [47:32] of the PN number.   See description for
+ *		pn_31_0.  The remaining PN fields are in the rx_msdu_end
+ *		descriptor
+ *
+ * pn
+ *		Use this field to access the pn without worrying about
+ *		byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ *		See definition in RX attention descriptor
+ *
+ * reserved_2
+ *		Reserved: HW should fill with zero.  FW should ignore.
+ *
+ * tid
+ *		The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK     0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB      0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB  16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR        (1 << 13)
+#define RX_MPDU_END_INFO0_LAST_MPDU           (1 << 14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR      (1 << 15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR     (1 << 28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR        (1 << 29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR         (1 << 30)
+#define RX_MPDU_END_INFO0_FCS_ERR             (1 << 31)
+
+struct rx_mpdu_end {
+	__le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ *		Reserved
+ *
+ * overflow_err
+ *		PCU Receive FIFO does not have enough space to store the
+ *		full receive packet.  Enough space is reserved in the
+ *		receive FIFO for the status is written.  This MPDU remaining
+ *		packets in the PPDU will be filtered and no Ack response
+ *		will be transmitted.
+ *
+ * last_mpdu
+ *		Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ *		Indicates that a delimiter FCS error occurred after this
+ *		MPDU before the next MPDU.  Only valid when last_msdu is
+ *		set.
+ *
+ * post_delim_cnt
+ *		Count of the delimiters after this MPDU.  This requires the
+ *		last MPDU to be held until all the EOF descriptors have been
+ *		received.  This may be inefficient in the future when
+ *		ML-MIMO is used.  Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ *		See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ *		See definition in RX attention descriptor
+ *
+ * decrypt_err
+ *		See definition in RX attention descriptor
+ *
+ * fcs_err
+ *		See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK    0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB     0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK      0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB       14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK      0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB       20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB  24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK    0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB     0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK   0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB    8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK         0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB          16
+#define RX_MSDU_START_INFO1_IPV4_PROTO          (1 << 10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO          (1 << 11)
+#define RX_MSDU_START_INFO1_TCP_PROTO           (1 << 12)
+#define RX_MSDU_START_INFO1_UDP_PROTO           (1 << 13)
+#define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
+
+#define RX_MSDU_START_INFO2_DA_IDX_MASK         0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB          0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB  16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST      BIT(11)
+
+/* The decapped header (rx_hdr_status) contains the following:
+ *  a) 802.11 header
+ *  [padding to 4 bytes]
+ *  b) HW crypto parameter
+ *     - 0 bytes for no security
+ *     - 4 bytes for WEP
+ *     - 8 bytes for TKIP, AES
+ *  [padding to 4 bytes]
+ *  c) A-MSDU subframe header (14 bytes) if appliable
+ *  d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
+enum rx_msdu_decap_format {
+	RX_MSDU_DECAP_RAW = 0,
+
+	/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+	 * htt_rx_desc contains the original decapped 802.11 header. */
+	RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+	/* Payload contains an ethernet header (struct ethhdr). */
+	RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+	/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+	 * total), followed by an RFC1042 header (8 bytes). */
+	RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start_common {
+	__le32 info0; /* %RX_MSDU_START_INFO0_ */
+	__le32 flow_id_crc;
+	__le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+struct rx_msdu_start_qca99x0 {
+	__le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+	struct rx_msdu_start_common common;
+	union {
+		struct rx_msdu_start_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
+/*
+ * msdu_length
+ *		MSDU length in bytes after decapsulation.  This field is
+ *		still valid for MPDU frames without A-MSDU.  It still
+ *		represents MSDU length after decapsulation
+ *
+ * ip_offset
+ *		Indicates the IP offset in bytes from the start of the
+ *		packet after decapsulation.  Only valid if ipv4_proto or
+ *		ipv6_proto is set.
+ *
+ * ring_mask
+ *		Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ *		Indicates the offset in bytes to the start of TCP or UDP
+ *		header from the start of the IP header after decapsulation.
+ *		Only valid if tcp_prot or udp_prot is set.  The value 0
+ *		indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ *		Reserved: HW should fill with zero.  FW should ignore.
+ *
+ * flow_id_crc
+ *		The flow_id_crc runs CRC32 on the following information:
+ *		IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ *		protocol[7:0]}.
+ *		IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ *		next_header[7:0]}
+ *		UDP case: sort_port[15:0], dest_port[15:0]
+ *		TCP case: sort_port[15:0], dest_port[15:0],
+ *		{header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ *		{16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ *		timestamp.
+ *
+ * msdu_number
+ *		Indicates the MSDU number within a MPDU.  This value is
+ *		reset to zero at the start of each MPDU.  If the number of
+ *		MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ *		Indicates the format after decapsulation:
+ *		0: RAW: No decapsulation
+ *		1: Native WiFi
+ *		2: Ethernet 2 (DIX)
+ *		3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ *		Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ *		Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ *		Set if the ipv4_proto or ipv6_proto are set and the IP
+ *		protocol indicates TCP.
+ *
+ * udp_proto
+ *		Set if the ipv4_proto or ipv6_proto are set and the IP
+ *			protocol indicates UDP.
+ *
+ * ip_frag
+ *		Indicates that either the IP More frag bit is set or IP frag
+ *		number is non-zero.  If set indicates that this is a
+ *		fragmented IP packet.
+ *
+ * tcp_only_ack
+ *		Set if only the TCP Ack bit is set in the TCP flags and if
+ *		the TCP payload is 0.
+ *
+ * sa_idx
+ *		The offset in the address table which matches the MAC source
+ *		address.
+ *
+ * reserved_2b
+ *		Reserved: HW should fill with zero.  FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB  0
+#define RX_MSDU_END_INFO0_FIRST_MSDU                (1 << 14)
+#define RX_MSDU_END_INFO0_LAST_MSDU                 (1 << 15)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
+#define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
+
+struct rx_msdu_end_common {
+	__le16 ip_hdr_cksum;
+	__le16 tcp_hdr_cksum;
+	u8 key_id_octet;
+	u8 classification_filter;
+	u8 wapi_pn[10];
+	__le32 info0;
+} __packed;
+
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK     0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB      0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK   0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB    10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK  0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB   16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE      BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK    0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB     0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK    0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB     6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK  0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB   12
+
+struct rx_msdu_end_qca99x0 {
+	__le32 ipv6_crc;
+	__le32 tcp_seq_no;
+	__le32 tcp_ack_no;
+	__le32 info1;
+	__le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+	struct rx_msdu_end_common common;
+	union {
+		struct rx_msdu_end_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ *		This can include the IP header checksum or the pseudo header
+ *		checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ *		The value of the computed TCP/UDP checksum.  A mode bit
+ *		selects whether this checksum is the full checksum or the
+ *		partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ *		The key ID octet from the IV.  Only valid when first_msdu is
+ *		set.
+ *
+ *classification_filter
+ *		Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ *		Extension PN (packet number) which is only used by WAPI.
+ *		This corresponds to WAPI PN bits [63:48] (pn6 and pn7).  The
+ *		WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ *		descriptor.
+ *
+ *ext_wapi_pn_95_64
+ *		Extension PN (packet number) which is only used by WAPI.
+ *		This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ *		pn11).
+ *
+ *ext_wapi_pn_127_96
+ *		Extension PN (packet number) which is only used by WAPI.
+ *		This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ *		pn15).
+ *
+ *reported_mpdu_length
+ *		MPDU length before decapsulation.  Only valid when
+ *		first_msdu is set.  This field is taken directly from the
+ *		length field of the A-MPDU delimiter or the preamble length
+ *		field for non-A-MPDU frames.
+ *
+ *first_msdu
+ *		Indicates the first MSDU of A-MSDU.  If both first_msdu and
+ *		last_msdu are set in the MSDU then this is a non-aggregated
+ *		MSDU frame: normal MPDU.  Interior MSDU in an A-MSDU shall
+ *		have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ *		Indicates the last MSDU of the A-MSDU.  MPDU end status is
+ *		only valid when last_msdu is set.
+ *
+ *reserved_3a
+ *		Reserved: HW should fill with zero.  FW should ignore.
+ *
+ *pre_delim_err
+ *		Indicates that the first delimiter had a FCS failure.  Only
+ *		valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ *		Reserved: HW should fill with zero.  FW should ignore.
+ */
+
+#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
+#define RX_PPDU_START_SIG_RATE_SELECT_CCK  1
+
+#define RX_PPDU_START_SIG_RATE_OFDM_48 0
+#define RX_PPDU_START_SIG_RATE_OFDM_24 1
+#define RX_PPDU_START_SIG_RATE_OFDM_12 2
+#define RX_PPDU_START_SIG_RATE_OFDM_6  3
+#define RX_PPDU_START_SIG_RATE_OFDM_54 4
+#define RX_PPDU_START_SIG_RATE_OFDM_36 5
+#define RX_PPDU_START_SIG_RATE_OFDM_18 6
+#define RX_PPDU_START_SIG_RATE_OFDM_9  7
+
+#define RX_PPDU_START_SIG_RATE_CCK_LP_11  0
+#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
+#define RX_PPDU_START_SIG_RATE_CCK_LP_2   2
+#define RX_PPDU_START_SIG_RATE_CCK_LP_1   3
+#define RX_PPDU_START_SIG_RATE_CCK_SP_11  4
+#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
+#define RX_PPDU_START_SIG_RATE_CCK_SP_2   6
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY        0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT            0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF  0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT           0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK    0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB     0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK  0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB   5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK    0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB     18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB  24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT  (1 << 4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY       (1 << 17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB  0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB  0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO             (1 << 24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB  0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB  0
+
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+enum rx_ppdu_start_rate {
+	RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
+	RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
+	RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
+	RX_PPDU_START_RATE_OFDM_6M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
+	RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
+	RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
+	RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
+	RX_PPDU_START_RATE_OFDM_9M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
+
+	RX_PPDU_START_RATE_CCK_LP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
+	RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
+	RX_PPDU_START_RATE_CCK_LP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
+	RX_PPDU_START_RATE_CCK_LP_1M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
+	RX_PPDU_START_RATE_CCK_SP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
+	RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
+	RX_PPDU_START_RATE_CCK_SP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
+};
+
+struct rx_ppdu_start {
+	struct {
+		u8 pri20_mhz;
+		u8 ext20_mhz;
+		u8 ext40_mhz;
+		u8 ext80_mhz;
+	} rssi_chains[4];
+	u8 rssi_comb;
+	__le16 rsvd0;
+	u8 info0; /* %RX_PPDU_START_INFO0_ */
+	__le32 info1; /* %RX_PPDU_START_INFO1_ */
+	__le32 info2; /* %RX_PPDU_START_INFO2_ */
+	__le32 info3; /* %RX_PPDU_START_INFO3_ */
+	__le32 info4; /* %RX_PPDU_START_INFO4_ */
+	__le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ *		RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ *		RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ *		RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ *		RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ *		RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ *		RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ *		RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ *		RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ *		RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ *		RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ *		RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ *		RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ *		RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ *		RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ *		RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ *		RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ *		Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ *		The combined RSSI of RX PPDU of all active chains and
+ *		bandwidths.  Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ *		Do we really support this?
+ *
+ * reserved_4b
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ *		If l_sig_rate_select is 0:
+ *		0x8: OFDM 48 Mbps
+ *		0x9: OFDM 24 Mbps
+ *		0xA: OFDM 12 Mbps
+ *		0xB: OFDM 6 Mbps
+ *		0xC: OFDM 54 Mbps
+ *		0xD: OFDM 36 Mbps
+ *		0xE: OFDM 18 Mbps
+ *		0xF: OFDM 9 Mbps
+ *		If l_sig_rate_select is 1:
+ *		0x8: CCK 11 Mbps long preamble
+ *		0x9: CCK 5.5 Mbps long preamble
+ *		0xA: CCK 2 Mbps long preamble
+ *		0xB: CCK 1 Mbps long preamble
+ *		0xC: CCK 11 Mbps short preamble
+ *		0xD: CCK 5.5 Mbps short preamble
+ *		0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ *		Legacy signal rate select.  If set then l_sig_rate indicates
+ *		CCK rates.  If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ *		Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ *		Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ *		Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ *		Indicates the type of preamble ahead:
+ *		0x4: Legacy (OFDM/CCK)
+ *		0x8: HT
+ *		0x9: HT with TxBF
+ *		0xC: VHT
+ *		0xD: VHT with TxBF
+ *		0x80 - 0xFF: Reserved for special baseband data types such
+ *		as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ *		If preamble_type == 0x8 or 0x9
+ *		HT-SIG (first 24 bits)
+ *		If preamble_type == 0xC or 0xD
+ *		VHT-SIG A (first 24 bits)
+ *		Else
+ *		Reserved
+ *
+ * reserved_6
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ *		If preamble_type == 0x8 or 0x9
+ *		HT-SIG (last 24 bits)
+ *		If preamble_type == 0xC or 0xD
+ *		VHT-SIG A (last 24 bits)
+ *		Else
+ *		Reserved
+ *
+ * txbf_h_info
+ *		Indicates that the packet data carries H information which
+ *		is used for TxBF debug.
+ *
+ * reserved_7
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ *		WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ *		0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ *		Service field from BB for OFDM, HT and VHT packets.  CCK
+ *		packets will have service field of 0.
+ *
+ * reserved_9
+ *		Reserved: HW should fill with 0, FW should ignore.
+*/
+
+#define RX_PPDU_END_FLAGS_PHY_ERR             (1 << 0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION         (1 << 1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO         (1 << 2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK     0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB      0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
+
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK       0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB        2
+#define RX_PPDU_END_INFO1_BB_DATA             BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID      BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE           BIT(15)
+
+struct rx_ppdu_end_common {
+	__le32 evm_p0;
+	__le32 evm_p1;
+	__le32 evm_p2;
+	__le32 evm_p3;
+	__le32 evm_p4;
+	__le32 evm_p5;
+	__le32 evm_p6;
+	__le32 evm_p7;
+	__le32 evm_p8;
+	__le32 evm_p9;
+	__le32 evm_p10;
+	__le32 evm_p11;
+	__le32 evm_p12;
+	__le32 evm_p13;
+	__le32 evm_p14;
+	__le32 evm_p15;
+	__le32 tsf_timestamp;
+	__le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
+	u8 locationing_timestamp;
+	u8 phy_err_code;
+	__le16 flags; /* %RX_PPDU_END_FLAGS_ */
+	__le32 info0; /* %RX_PPDU_END_INFO0_ */
+	__le16 bb_length;
+	__le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB  0
+#define RX_PPDU_END_RTT_UNUSED_MASK            0x7f000000
+#define RX_PPDU_END_RTT_UNUSED_LSB             24
+#define RX_PPDU_END_RTT_NORMAL_MODE            BIT(31)
+
+struct rx_ppdu_end_qca6174 {
+	u8 locationing_timestamp;
+	u8 phy_err_code;
+	__le16 flags; /* %RX_PPDU_END_FLAGS_ */
+	__le32 info0; /* %RX_PPDU_END_INFO0_ */
+	__le32 rtt; /* %RX_PPDU_END_RTT_ */
+	__le16 bb_length;
+	__le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PKT_END_INFO0_RX_SUCCESS              BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX     BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP     BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART        BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP      BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART         BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK       0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB        0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK         0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB          18
+#define RX_LOCATION_INFO_PKT_BW_MASK             0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB              20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB  23
+#define RX_LOCATION_INFO_CIR_STATUS              BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE       BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X     BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE            BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID       BIT(31)
+
+struct rx_pkt_end {
+	__le32 info0; /* %RX_PKT_END_INFO0_ */
+	__le32 phy_timestamp_1;
+	__le32 phy_timestamp_2;
+	__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+	RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_NAP          = BIT(4),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING     = BIT(5),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY     = BIT(6),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE       = BIT(7),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH     = BIT(8),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART    = BIT(9),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE    = BIT(10),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER     = BIT(12),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING      = BIT(13),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC  = BIT(14),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE        = BIT(15),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH      = BIT(16),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART     = BIT(17),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE     = BIT(18),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP  = BIT(19),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_CRC          = BIT(20),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH       = BIT(21),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_RATE         = BIT(22),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF          = BIT(23),
+	RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+	RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD     = BIT(25),
+	RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN   = BIT(26),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW       = BIT(27),
+	RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC         = BIT(29),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA        = BIT(30),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG        = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP            = BIT(0),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM           = BIT(1),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM     = BIT(2),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0    = BIT(3),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63   = BIT(5),
+	RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER  = BIT(6),
+	RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP          = BIT(7),
+	RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT    = BIT(8),
+	RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK     = BIT(9),
+	RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION       = BIT(10),
+	RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK        = BIT(11),
+	RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX    = BIT(12),
+	RX_PHY_PPDU_END_INFO1_ERR_RX_CBF             = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+	__le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+	__le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK          0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB           0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK        0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB         0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK          BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID       BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID  BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL    BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC      BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE       BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+	struct rx_pkt_end rx_pkt_end;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+	__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+	__le16 bb_length;
+	__le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end {
+	struct rx_ppdu_end_common common;
+	union {
+		struct rx_ppdu_end_qca988x qca988x;
+		struct rx_ppdu_end_qca6174 qca6174;
+		struct rx_ppdu_end_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
+/*
+ * evm_p0
+ *		EVM for pilot 0.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ *		EVM for pilot 1.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ *		EVM for pilot 2.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ *		EVM for pilot 3.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ *		EVM for pilot 4.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ *		EVM for pilot 5.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ *		EVM for pilot 6.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ *		EVM for pilot 7.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ *		EVM for pilot 8.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ *		EVM for pilot 9.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ *		EVM for pilot 10.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ *		EVM for pilot 11.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ *		EVM for pilot 12.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ *		EVM for pilot 13.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ *		EVM for pilot 14.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ *		EVM for pilot 15.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ *		Receive TSF timestamp sampled on the rising edge of
+ *		rx_clear.  For PHY errors this may be the current TSF when
+ *		phy_error is asserted if the rx_clear does not assert before
+ *		the end of the PHY error.
+ *
+ * wb_timestamp
+ *		WLAN/BT timestamp is a 1 usec resolution timestamp which
+ *		does not get updated based on receive beacon like TSF.  The
+ *		same rules for capturing tsf_timestamp are used to capture
+ *		the wb_timestamp.
+ *
+ * locationing_timestamp
+ *		Timestamp used for locationing.  This timestamp is used to
+ *		indicate fractions of usec.  For example if the MAC clock is
+ *		running at 80 MHz, the timestamp will increment every 12.5
+ *		nsec.  The value starts at 0 and increments to 79 and
+ *		returns to 0 and repeats.  This information is valid for
+ *		every PPDU.  This information can be used in conjunction
+ *		with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ *		See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ *		Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ *		Indicates that location information was requested.
+ *
+ * txbf_h_info
+ *		Indicates that the packet data carries H information which
+ *		is used for TxBF debug.
+ *
+ * reserved_18
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ *		Receive antenna value
+ *
+ * tx_ht_vht_ack
+ *		Indicates that a HT or VHT Ack/BA frame was transmitted in
+ *		response to this receive packet.
+ *
+ * bb_captured_channel
+ *		Indicates that the BB has captured a channel dump.  FW can
+ *		then read the channel dump memory.  This may indicate that
+ *		the channel was captured either based on PCU setting the
+ *		capture_channel bit  BB descriptor or FW setting the
+ *		capture_channel mode bit.
+ *
+ * reserved_19
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ *		Indicates the number of bytes of baseband information for
+ *		PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ *		which indicates that this is not a normal PPDU but rather
+ *		contains baseband debug information.
+ *
+ * reserved_20
+ *		Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ *		PPDU end status is only valid when ppdu_done bit is set.
+ *		Every time HW sets this bit in memory FW/SW must clear this
+ *		bit in memory.  FW will initialize all the ppdu_done dword
+ *		to 0.
+*/
+
+#define FW_RX_DESC_INFO0_DISCARD  (1 << 0)
+#define FW_RX_DESC_INFO0_FORWARD  (1 << 1)
+#define FW_RX_DESC_INFO0_INSPECT  (1 << 5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB  6
+
+struct fw_rx_desc_base {
+	u8 info0;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
new file mode 100644
index 0000000..a0e7eeb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static void send_fft_sample(struct ath10k *ar,
+			    const struct fft_sample_tlv *fft_sample_tlv)
+{
+	int length;
+
+	if (!ar->spectral.rfs_chan_spec_scan)
+		return;
+
+	length = __be16_to_cpu(fft_sample_tlv->length) +
+		 sizeof(*fft_sample_tlv);
+	relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
+			   u8 *data)
+{
+	int dc_pos;
+	u8 max_exp;
+
+	dc_pos = bin_len / 2;
+
+	/* peak index outside of bins */
+	if (dc_pos < max_index || -dc_pos >= max_index)
+		return 0;
+
+	for (max_exp = 0; max_exp < 8; max_exp++) {
+		if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
+			break;
+	}
+
+	/* max_exp not found */
+	if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
+		return 0;
+
+	return max_exp;
+}
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+				struct wmi_phyerr_ev_arg *phyerr,
+				const struct phyerr_fft_report *fftr,
+				size_t bin_len, u64 tsf)
+{
+	struct fft_sample_ath10k *fft_sample;
+	u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
+	u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
+	u32 reg0, reg1;
+	u8 chain_idx, *bins;
+	int dc_pos;
+
+	fft_sample = (struct fft_sample_ath10k *)&buf;
+
+	if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
+		return -EINVAL;
+
+	/* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in
+	 * report mode 2. First 64 bytes carries inband tones (-32 to +31)
+	 * and last 4 byte carries band edge detection data (+32) mainly
+	 * used in radar detection purpose. Strip last 4 byte to make bin
+	 * size is valid one.
+	 */
+	if (bin_len == 68)
+		bin_len -= 4;
+
+	reg0 = __le32_to_cpu(fftr->reg0);
+	reg1 = __le32_to_cpu(fftr->reg1);
+
+	length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
+	fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
+	fft_sample->tlv.length = __cpu_to_be16(length);
+
+	/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
+	 * but the results/plots suggest that its actually 22/44/88 MHz.
+	 */
+	switch (phyerr->chan_width_mhz) {
+	case 20:
+		fft_sample->chan_width_mhz = 22;
+		break;
+	case 40:
+		fft_sample->chan_width_mhz = 44;
+		break;
+	case 80:
+		/* TODO: As experiments with an analogue sender and various
+		 * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
+		 * show, the particular configuration of 80 MHz/64 bins does
+		 * not match with the other smaples at all. Until the reason
+		 * for that is found, don't report these samples.
+		 */
+		if (bin_len == 64)
+			return -EINVAL;
+		fft_sample->chan_width_mhz = 88;
+		break;
+	default:
+		fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
+	}
+
+	fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
+	fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
+
+	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+	fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
+	fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
+	fft_sample->rssi = phyerr->rssi_combined;
+
+	total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
+	base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
+	fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
+	fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
+
+	freq1 = phyerr->freq1;
+	freq2 = phyerr->freq2;
+	fft_sample->freq1 = __cpu_to_be16(freq1);
+	fft_sample->freq2 = __cpu_to_be16(freq2);
+
+	chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
+
+	fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
+
+	bins = (u8 *)fftr;
+	bins += sizeof(*fftr);
+
+	fft_sample->tsf = __cpu_to_be64(tsf);
+
+	/* max_exp has been directly reported by previous hardware (ath9k),
+	 * maybe its possible to get it by other means?
+	 */
+	fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
+					  bin_len, bins);
+
+	memcpy(fft_sample->data, bins, bin_len);
+
+	/* DC value (value in the middle) is the blind spot of the spectral
+	 * sample and invalid, interpolate it.
+	 */
+	dc_pos = bin_len / 2;
+	fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
+				    fft_sample->data[dc_pos - 1]) / 2;
+
+	send_fft_sample(ar, &fft_sample->tlv);
+
+	return 0;
+}
+
+static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (list_empty(&ar->arvifs))
+		return NULL;
+
+	/* if there already is a vif doing spectral, return that. */
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		if (arvif->spectral_enabled)
+			return arvif;
+
+	/* otherwise, return the first vif. */
+	return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath10k_spectral_scan_trigger(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+	int res;
+	int vdev_id;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	arvif = ath10k_get_spectral_vdev(ar);
+	if (!arvif)
+		return -ENODEV;
+	vdev_id = arvif->vdev_id;
+
+	if (ar->spectral.mode == SPECTRAL_DISABLED)
+		return 0;
+
+	res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+					      WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+					      WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+	if (res < 0)
+		return res;
+
+	res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+					      WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+					      WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+	if (res < 0)
+		return res;
+
+	return 0;
+}
+
+static int ath10k_spectral_scan_config(struct ath10k *ar,
+				       enum ath10k_spectral_mode mode)
+{
+	struct wmi_vdev_spectral_conf_arg arg;
+	struct ath10k_vif *arvif;
+	int vdev_id, count, res = 0;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	arvif = ath10k_get_spectral_vdev(ar);
+	if (!arvif)
+		return -ENODEV;
+
+	vdev_id = arvif->vdev_id;
+
+	arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
+	ar->spectral.mode = mode;
+
+	res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+					      WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+					      WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+	if (res < 0) {
+		ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
+		return res;
+	}
+
+	if (mode == SPECTRAL_DISABLED)
+		return 0;
+
+	if (mode == SPECTRAL_BACKGROUND)
+		count = WMI_SPECTRAL_COUNT_DEFAULT;
+	else
+		count = max_t(u8, 1, ar->spectral.config.count);
+
+	arg.vdev_id = vdev_id;
+	arg.scan_count = count;
+	arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
+	arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
+	arg.scan_fft_size = ar->spectral.config.fft_size;
+	arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
+	arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+	arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+	arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+	arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+	arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+	arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+	arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+	arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
+	arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+	arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
+	arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+	arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+	arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+	res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
+	if (res < 0) {
+		ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
+		return res;
+	}
+
+	return 0;
+}
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char *mode = "";
+	unsigned int len;
+	enum ath10k_spectral_mode spectral_mode;
+
+	mutex_lock(&ar->conf_mutex);
+	spectral_mode = ar->spectral.mode;
+	mutex_unlock(&ar->conf_mutex);
+
+	switch (spectral_mode) {
+	case SPECTRAL_DISABLED:
+		mode = "disable";
+		break;
+	case SPECTRAL_BACKGROUND:
+		mode = "background";
+		break;
+	case SPECTRAL_MANUAL:
+		mode = "manual";
+		break;
+	}
+
+	len = strlen(mode);
+	return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	ssize_t len;
+	int res;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (strncmp("trigger", buf, 7) == 0) {
+		if (ar->spectral.mode == SPECTRAL_MANUAL ||
+		    ar->spectral.mode == SPECTRAL_BACKGROUND) {
+			/* reset the configuration to adopt possibly changed
+			 * debugfs parameters
+			 */
+			res = ath10k_spectral_scan_config(ar,
+							  ar->spectral.mode);
+			if (res < 0) {
+				ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
+					    res);
+			}
+			res = ath10k_spectral_scan_trigger(ar);
+			if (res < 0) {
+				ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
+					    res);
+			}
+		} else {
+			res = -EINVAL;
+		}
+	} else if (strncmp("background", buf, 10) == 0) {
+		res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
+	} else if (strncmp("manual", buf, 6) == 0) {
+		res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
+	} else if (strncmp("disable", buf, 7) == 0) {
+		res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
+	} else {
+		res = -EINVAL;
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+
+	if (res < 0)
+		return res;
+
+	return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+	.read = read_file_spec_scan_ctl,
+	.write = write_file_spec_scan_ctl,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	unsigned int len;
+	u8 spectral_count;
+
+	mutex_lock(&ar->conf_mutex);
+	spectral_count = ar->spectral.config.count;
+	mutex_unlock(&ar->conf_mutex);
+
+	len = sprintf(buf, "%d\n", spectral_count);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < 0 || val > 255)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	ar->spectral.config.count = val;
+	mutex_unlock(&ar->conf_mutex);
+
+	return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+	.read = read_file_spectral_count,
+	.write = write_file_spectral_count,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_bins(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	unsigned int len, bins, fft_size, bin_scale;
+
+	mutex_lock(&ar->conf_mutex);
+
+	fft_size = ar->spectral.config.fft_size;
+	bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+	bins = 1 << (fft_size - bin_scale);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	len = sprintf(buf, "%d\n", bins);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_bins(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
+		return -EINVAL;
+
+	if (!is_power_of_2(val))
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+	ar->spectral.config.fft_size = ilog2(val);
+	ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+	mutex_unlock(&ar->conf_mutex);
+
+	return count;
+}
+
+static const struct file_operations fops_spectral_bins = {
+	.read = read_file_spectral_bins,
+	.write = write_file_spectral_bins,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+					      struct dentry *parent,
+					      umode_t mode,
+					      struct rchan_buf *buf,
+					      int *is_global)
+{
+	struct dentry *buf_file;
+
+	buf_file = debugfs_create_file(filename, mode, parent, buf,
+				       &relay_file_operations);
+	*is_global = 1;
+	return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+	debugfs_remove(dentry);
+
+	return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+	.create_buf_file = create_buf_file_handler,
+	.remove_buf_file = remove_buf_file_handler,
+};
+
+int ath10k_spectral_start(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		arvif->spectral_enabled = 0;
+
+	ar->spectral.mode = SPECTRAL_DISABLED;
+	ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
+	ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+
+	return 0;
+}
+
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+	if (!arvif->spectral_enabled)
+		return 0;
+
+	return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
+}
+
+int ath10k_spectral_create(struct ath10k *ar)
+{
+	/* The buffer size covers whole channels in dual bands up to 128 bins.
+	 * Scan with bigger than 128 bins needs to be run on single band each.
+	 */
+	ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
+						     ar->debug.debugfs_phy,
+						     1140, 2500,
+						     &rfs_spec_scan_cb, NULL);
+	debugfs_create_file("spectral_scan_ctl",
+			    S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar,
+			    &fops_spec_scan_ctl);
+	debugfs_create_file("spectral_count",
+			    S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar,
+			    &fops_spectral_count);
+	debugfs_create_file("spectral_bins",
+			    S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar,
+			    &fops_spectral_bins);
+
+	return 0;
+}
+
+void ath10k_spectral_destroy(struct ath10k *ar)
+{
+	if (ar->spectral.rfs_chan_spec_scan) {
+		relay_close(ar->spectral.rfs_chan_spec_scan);
+		ar->spectral.rfs_chan_spec_scan = NULL;
+	}
+}
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
new file mode 100644
index 0000000..89b0ad7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+#include "../spectral_common.h"
+
+/**
+ * struct ath10k_spec_scan - parameters for Atheros spectral scan
+ *
+ * @count: number of scan results requested for manual mode
+ * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
+ */
+struct ath10k_spec_scan {
+	u8 count;
+	u8 fft_size;
+};
+
+/* enum ath10k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ *	something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ *	is performed manually.
+ */
+enum ath10k_spectral_mode {
+	SPECTRAL_DISABLED = 0,
+	SPECTRAL_BACKGROUND,
+	SPECTRAL_MANUAL,
+};
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+				struct wmi_phyerr_ev_arg *phyerr,
+				const struct phyerr_fft_report *fftr,
+				size_t bin_len, u64 tsf);
+int ath10k_spectral_start(struct ath10k *ar);
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
+int ath10k_spectral_create(struct ath10k *ar);
+void ath10k_spectral_destroy(struct ath10k *ar);
+
+#else
+
+static inline int
+ath10k_spectral_process_fft(struct ath10k *ar,
+			    struct wmi_phyerr_ev_arg *phyerr,
+			    const struct phyerr_fft_report *fftr,
+			    size_t bin_len, u64 tsf)
+{
+	return 0;
+}
+
+static inline int ath10k_spectral_start(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+	return 0;
+}
+
+static inline int ath10k_spectral_create(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_spectral_destroy(struct ath10k *ar)
+{
+}
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 0000000..3ca3fae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+				     struct ath10k_swap_code_seg_info *seg_info,
+				     const void *data, size_t data_len)
+{
+	u8 *virt_addr = seg_info->virt_address[0];
+	u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+	const u8 *fw_data = data;
+	union ath10k_swap_code_seg_item *swap_item;
+	u32 length = 0;
+	u32 payload_len;
+	u32 total_payload_len = 0;
+	u32 size_left = data_len;
+
+	/* Parse swap bin and copy the content to host allocated memory.
+	 * The format is Address, length and value. The last 4-bytes is
+	 * target write address. Currently address field is not used.
+	 */
+	seg_info->target_addr = -1;
+	while (size_left >= sizeof(*swap_item)) {
+		swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+		payload_len = __le32_to_cpu(swap_item->tlv.length);
+		if ((payload_len > size_left) ||
+		    (payload_len == 0 &&
+		     size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+			ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+				   payload_len);
+			return -EINVAL;
+		}
+
+		if (payload_len == 0) {
+			if (memcmp(swap_item->tail.magic_signature, swap_magic,
+				   ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+				ath10k_err(ar, "refusing an invalid swap file\n");
+				return -EINVAL;
+			}
+			seg_info->target_addr =
+				__le32_to_cpu(swap_item->tail.bmi_write_addr);
+			break;
+		}
+
+		memcpy(virt_addr, swap_item->tlv.data, payload_len);
+		virt_addr += payload_len;
+		length = payload_len +  sizeof(struct ath10k_swap_code_seg_tlv);
+		size_left -= length;
+		fw_data += length;
+		total_payload_len += payload_len;
+	}
+
+	if (seg_info->target_addr == -1) {
+		ath10k_err(ar, "failed to parse invalid swap file\n");
+		return -EINVAL;
+	}
+	seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+	return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+			  struct ath10k_swap_code_seg_info *seg_info)
+{
+	u32 seg_size;
+
+	if (!seg_info)
+		return;
+
+	if (!seg_info->virt_address[0])
+		return;
+
+	seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+	dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+			  seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+	struct ath10k_swap_code_seg_info *seg_info;
+	void *virt_addr;
+	dma_addr_t paddr;
+
+	swap_bin_len = roundup(swap_bin_len, 2);
+	if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+		ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+			   swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+		return NULL;
+	}
+
+	seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+	if (!seg_info)
+		return NULL;
+
+	virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+				       GFP_KERNEL);
+	if (!virt_addr) {
+		ath10k_err(ar, "failed to allocate dma coherent memory\n");
+		return NULL;
+	}
+
+	seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+	seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+	seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+	seg_info->seg_hw_info.num_segs =
+			__cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+	seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+	seg_info->virt_address[0] = virt_addr;
+	seg_info->paddr[0] = paddr;
+
+	return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+				   enum ath10k_swap_code_seg_bin_type type)
+{
+	int ret;
+	struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+	switch (type) {
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+		if (!ar->swap.firmware_swap_code_seg_info)
+			return 0;
+
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+		seg_info = ar->swap.firmware_swap_code_seg_info;
+		break;
+	default:
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+		ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+			    type);
+		return 0;
+	}
+
+	ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+				      &seg_info->seg_hw_info,
+				      sizeof(seg_info->seg_hw_info));
+	if (ret) {
+		ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+			   ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+	ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+	ar->swap.firmware_codeswap_data = NULL;
+	ar->swap.firmware_codeswap_len = 0;
+	ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+	int ret;
+	struct ath10k_swap_code_seg_info *seg_info;
+
+	if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+		return 0;
+
+	seg_info = ath10k_swap_code_seg_alloc(ar,
+					      ar->swap.firmware_codeswap_len);
+	if (!seg_info) {
+		ath10k_err(ar, "failed to allocate fw code swap segment\n");
+		return -ENOMEM;
+	}
+
+	ret = ath10k_swap_code_seg_fill(ar, seg_info,
+					ar->swap.firmware_codeswap_data,
+					ar->swap.firmware_codeswap_len);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+			    ret);
+		ath10k_swap_code_seg_free(ar, seg_info);
+		return ret;
+	}
+
+	ar->swap.firmware_swap_code_seg_info = seg_info;
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 0000000..5c89952
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX	(512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ	12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX		16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED	1
+
+struct ath10k_swap_code_seg_tlv {
+	__le32 address;
+	__le32 length;
+	u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+	u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+	__le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+	struct ath10k_swap_code_seg_tlv tlv;
+	struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+	/* Swap binary image size */
+	__le32 swap_size;
+	__le32 num_segs;
+
+	/* Swap data size */
+	__le32 size;
+	__le32 size_log2;
+	__le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+	__le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+	struct ath10k_swap_code_seg_hw_info seg_hw_info;
+	void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+	u32 target_addr;
+	dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+				   enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644
index 0000000..05a421b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+#include "hw.h"
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure.  It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS    0x00400800
+#define HOST_INTEREST_MAX_SIZE          0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+	/*
+	 * Pointer to application-defined area, if any.
+	 * Set by Target application during startup.
+	 */
+	u32 hi_app_host_interest;			/* 0x00 */
+
+	/* Pointer to register dump area, valid after Target crash. */
+	u32 hi_failure_state;				/* 0x04 */
+
+	/* Pointer to debug logging header */
+	u32 hi_dbglog_hdr;				/* 0x08 */
+
+	u32 hi_unused0c;				/* 0x0c */
+
+	/*
+	 * General-purpose flag bits, similar to SOC_OPTION_* flags.
+	 * Can be used by application rather than by OS.
+	 */
+	u32 hi_option_flag;				/* 0x10 */
+
+	/*
+	 * Boolean that determines whether or not to
+	 * display messages on the serial port.
+	 */
+	u32 hi_serial_enable;				/* 0x14 */
+
+	/* Start address of DataSet index, if any */
+	u32 hi_dset_list_head;				/* 0x18 */
+
+	/* Override Target application start address */
+	u32 hi_app_start;				/* 0x1c */
+
+	/* Clock and voltage tuning */
+	u32 hi_skip_clock_init;				/* 0x20 */
+	u32 hi_core_clock_setting;			/* 0x24 */
+	u32 hi_cpu_clock_setting;			/* 0x28 */
+	u32 hi_system_sleep_setting;			/* 0x2c */
+	u32 hi_xtal_control_setting;			/* 0x30 */
+	u32 hi_pll_ctrl_setting_24ghz;			/* 0x34 */
+	u32 hi_pll_ctrl_setting_5ghz;			/* 0x38 */
+	u32 hi_ref_voltage_trim_setting;		/* 0x3c */
+	u32 hi_clock_info;				/* 0x40 */
+
+	/* Host uses BE CPU or not */
+	u32 hi_be;					/* 0x44 */
+
+	u32 hi_stack;	/* normal stack */			/* 0x48 */
+	u32 hi_err_stack; /* error stack */		/* 0x4c */
+	u32 hi_desired_cpu_speed_hz;			/* 0x50 */
+
+	/* Pointer to Board Data  */
+	u32 hi_board_data;				/* 0x54 */
+
+	/*
+	 * Indication of Board Data state:
+	 *    0: board data is not yet initialized.
+	 *    1: board data is initialized; unknown size
+	 *   >1: number of bytes of initialized board data
+	 */
+	u32 hi_board_data_initialized;			/* 0x58 */
+
+	u32 hi_dset_ram_index_table;			/* 0x5c */
+
+	u32 hi_desired_baud_rate;			/* 0x60 */
+	u32 hi_dbglog_config;				/* 0x64 */
+	u32 hi_end_ram_reserve_sz;			/* 0x68 */
+	u32 hi_mbox_io_block_sz;			/* 0x6c */
+
+	u32 hi_num_bpatch_streams;			/* 0x70 -- unused */
+	u32 hi_mbox_isr_yield_limit;			/* 0x74 */
+
+	u32 hi_refclk_hz;				/* 0x78 */
+	u32 hi_ext_clk_detected;			/* 0x7c */
+	u32 hi_dbg_uart_txpin;				/* 0x80 */
+	u32 hi_dbg_uart_rxpin;				/* 0x84 */
+	u32 hi_hci_uart_baud;				/* 0x88 */
+	u32 hi_hci_uart_pin_assignments;		/* 0x8C */
+
+	u32 hi_hci_uart_baud_scale_val;			/* 0x90 */
+	u32 hi_hci_uart_baud_step_val;			/* 0x94 */
+
+	u32 hi_allocram_start;				/* 0x98 */
+	u32 hi_allocram_sz;				/* 0x9c */
+	u32 hi_hci_bridge_flags;			/* 0xa0 */
+	u32 hi_hci_uart_support_pins;			/* 0xa4 */
+
+	u32 hi_hci_uart_pwr_mgmt_params;		/* 0xa8 */
+
+	/*
+	 * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+	 *        [31:16]: wakeup timeout in ms
+	 */
+	/* Pointer to extended board Data  */
+	u32 hi_board_ext_data;				/* 0xac */
+	u32 hi_board_ext_data_config;			/* 0xb0 */
+	/*
+	 * Bit [0]  :   valid
+	 * Bit[31:16:   size
+	 */
+	/*
+	 * hi_reset_flag is used to do some stuff when target reset.
+	 * such as restore app_start after warm reset or
+	 * preserve host Interest area, or preserve ROM data, literals etc.
+	 */
+	u32  hi_reset_flag;				/* 0xb4 */
+	/* indicate hi_reset_flag is valid */
+	u32  hi_reset_flag_valid;			/* 0xb8 */
+	u32 hi_hci_uart_pwr_mgmt_params_ext;		/* 0xbc */
+	/* 0xbc - [31:0]: idle timeout in ms */
+	/* ACS flags */
+	u32 hi_acs_flags;				/* 0xc0 */
+	u32 hi_console_flags;				/* 0xc4 */
+	u32 hi_nvram_state;				/* 0xc8 */
+	u32 hi_option_flag2;				/* 0xcc */
+
+	/* If non-zero, override values sent to Host in WMI_READY event. */
+	u32 hi_sw_version_override;			/* 0xd0 */
+	u32 hi_abi_version_override;			/* 0xd4 */
+
+	/*
+	 * Percentage of high priority RX traffic to total expected RX traffic
+	 * applicable only to ar6004
+	 */
+	u32 hi_hp_rx_traffic_ratio;			/* 0xd8 */
+
+	/* test applications flags */
+	u32 hi_test_apps_related;			/* 0xdc */
+	/* location of test script */
+	u32 hi_ota_testscript;				/* 0xe0 */
+	/* location of CAL data */
+	u32 hi_cal_data;				/* 0xe4 */
+
+	/* Number of packet log buffers */
+	u32 hi_pktlog_num_buffers;			/* 0xe8 */
+
+	/* wow extension configuration */
+	u32 hi_wow_ext_config;				/* 0xec */
+	u32 hi_pwr_save_flags;				/* 0xf0 */
+
+	/* Spatial Multiplexing Power Save (SMPS) options */
+	u32 hi_smps_options;				/* 0xf4 */
+
+	/* Interconnect-specific state */
+	u32 hi_interconnect_state;			/* 0xf8 */
+
+	/* Coex configuration flags */
+	u32 hi_coex_config;				/* 0xfc */
+
+	/* Early allocation support */
+	u32 hi_early_alloc;				/* 0x100 */
+	/* FW swap field */
+	/*
+	 * Bits of this 32bit word will be used to pass specific swap
+	 * instruction to FW
+	 */
+	/*
+	 * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+	 * FW will not swap TX descriptor. Meaning packets are formed
+	 * on the target processor.
+	 */
+	/* Bit 1 - unused */
+	u32 hi_fw_swap;					/* 0x104 */
+} __packed;
+
+#define HI_ITEM(item)  offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR         0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT    0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR   0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD   0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE         0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE    0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG    0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE     0x100
+#define HI_OPTION_NUM_DEV_LSB       0x200
+#define HI_OPTION_NUM_DEV_MSB       0x800
+#define HI_OPTION_DEV_MODE_LSB      0x1000
+#define HI_OPTION_DEV_MODE_MSB      0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL       0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN     0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN     0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP       0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS    0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP      0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE    0x0  /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV  0x1  /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO   0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK    0x7
+#define HI_OPTION_NUM_DEV_SHIFT   0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+Fw Mode/SubMode Mask
+|-----------------------------------------------------------------------------|
+|  SUB   |   SUB   |   SUB   |  SUB    |         |         |         |        |
+|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
+|  (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)  |
+|-----------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_BITS         0x2
+#define HI_OPTION_FW_MODE_MASK         0x3
+#define HI_OPTION_FW_MODE_SHIFT        0xC
+#define HI_OPTION_ALL_FW_MODE_MASK     0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS      0x2
+#define HI_OPTION_FW_SUBMODE_MASK      0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT     0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK  0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU     0x01
+#define HI_OPTION_DFS_SUPPORT       0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL     0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE    0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT     0x2
+#define HI_OPTION_RF_KILL_MASK      0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START         0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST     0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA           0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE       0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO         0x10
+#define HI_RESET_FLAG_WARM_RESET	0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT	0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID  0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED        (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN       (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP       (1 << 2)
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range  Meaning
+ * ---------  --------------------------------
+ *   2..0     UART ID (0 = Default)
+ *    3       Baud Select (0 = 9600, 1 = 115200)
+ *   30..4    Reserved
+ *    31      Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE       (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK    (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT   0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT  (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK            (0x00000001)
+#define HI_SMPS_MODE_MASK             (0x00000002)
+#define HI_SMPS_MODE_STATIC           (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC          (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE     (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK      (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT     (3)
+#define HI_SMPS_RSSI_THRESH_MASK      (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT     (11)
+#define HI_SMPS_LOWPWR_CM_MASK        (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT       (15)
+#define HI_SMPS_HIPWR_CM_MASK         (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT        (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range  Meaning
+ * ---------  --------------------------------
+ *   8..0     Size of each WOW pattern (max 511)
+ *   15..9    Number of patterns per list (max 127)
+ *   17..16   Number of lists (max 4)
+ *   30..18   Reserved
+ *   31       Enabled
+ *
+ *  set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK        (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT      16
+#define HI_WOW_EXT_NUM_LIST_MASK       (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT  9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK   (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT  0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK   (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+	((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+		HI_WOW_EXT_NUM_LIST_MASK) | \
+	(((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+		HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+	(((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+		HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+	(((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+	(((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+		HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+	(((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+		HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range  Meaning
+ * ---------  ----------------------------------
+ * [0:3]      number of bank assigned to be IRAM
+ * [4:15]     reserved
+ * [16:31]    magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ *    would consider the bits[0:15] are valid and base on that to calculate
+ *    the end of DRAM. Early allocation would be located at that area and
+ *    may be reclaimed when necesary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ *    symbol of ROM which is located before the app-data and might NOT be
+ *    re-claimable. If this is adopted, link script should keep this in
+ *    mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC		0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK	0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT	16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK	0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT	0
+
+#define HI_EARLY_ALLOC_VALID() \
+	((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+	HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+	(((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+	>> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED   0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+			  1- Reduce Pwr Search
+			  2- Reduce Pwr Listen*/
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB   4
+#define HI_PWR_SAVE_LPL_DEV_MASK   0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+	((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+	(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+	 (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+	((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ     7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#define QCA6174_BOARD_DATA_SZ     8192
+#define QCA6174_BOARD_EXT_DATA_SZ 0
+
+#define QCA9377_BOARD_DATA_SZ     QCA6174_BOARD_DATA_SZ
+#define QCA9377_BOARD_EXT_DATA_SZ 0
+
+#define QCA99X0_BOARD_DATA_SZ	  12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
new file mode 100644
index 0000000..1d5a2fd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "testmode.h"
+
+#include <net/netlink.h>
+#include <linux/firmware.h>
+
+#include "debug.h"
+#include "wmi.h"
+#include "hif.h"
+#include "hw.h"
+
+#include "testmode_i.h"
+
+static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
+	[ATH10K_TM_ATTR_CMD]		= { .type = NLA_U32 },
+	[ATH10K_TM_ATTR_DATA]		= { .type = NLA_BINARY,
+					    .len = ATH10K_TM_DATA_MAX_LEN },
+	[ATH10K_TM_ATTR_WMI_CMDID]	= { .type = NLA_U32 },
+	[ATH10K_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
+	[ATH10K_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+};
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+	struct sk_buff *nl_skb;
+	bool consumed;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+		   cmd_id, skb, skb->len);
+
+	ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+	spin_lock_bh(&ar->data_lock);
+
+	if (!ar->testmode.utf_monitor) {
+		consumed = false;
+		goto out;
+	}
+
+	/* Only testmode.c should be handling events from utf firmware,
+	 * otherwise all sort of problems will arise as mac80211 operations
+	 * are not initialised.
+	 */
+	consumed = true;
+
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+						   2 * sizeof(u32) + skb->len,
+						   GFP_ATOMIC);
+	if (!nl_skb) {
+		ath10k_warn(ar,
+			    "failed to allocate skb for testmode wmi event\n");
+		goto out;
+	}
+
+	ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
+	if (ret) {
+		ath10k_warn(ar,
+			    "failed to to put testmode wmi event cmd attribute: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		goto out;
+	}
+
+	ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+	if (ret) {
+		ath10k_warn(ar,
+			    "failed to to put testmode wmi even cmd_id: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		goto out;
+	}
+
+	ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
+	if (ret) {
+		ath10k_warn(ar,
+			    "failed to copy skb to testmode wmi event: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		goto out;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+	spin_unlock_bh(&ar->data_lock);
+
+	return consumed;
+}
+
+static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
+{
+	struct sk_buff *skb;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+		   "testmode cmd get version_major %d version_minor %d\n",
+		   ATH10K_TESTMODE_VERSION_MAJOR,
+		   ATH10K_TESTMODE_VERSION_MINOR);
+
+	skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+						nla_total_size(sizeof(u32)));
+	if (!skb)
+		return -ENOMEM;
+
+	ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
+			  ATH10K_TESTMODE_VERSION_MAJOR);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
+			  ATH10K_TESTMODE_VERSION_MINOR);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return cfg80211_testmode_reply(skb);
+}
+
+static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
+{
+	size_t len, magic_len, ie_len;
+	struct ath10k_fw_ie *hdr;
+	char filename[100];
+	__le32 *version;
+	const u8 *data;
+	int ie_id, ret;
+
+	snprintf(filename, sizeof(filename), "%s/%s",
+		 ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
+
+	/* load utf firmware image */
+	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	if (ret) {
+		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+			    filename, ret);
+		return ret;
+	}
+
+	data = ar->testmode.utf->data;
+	len = ar->testmode.utf->size;
+
+	/* FIXME: call release_firmware() in error cases */
+
+	/* magic also includes the null byte, check that as well */
+	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+	if (len < magic_len) {
+		ath10k_err(ar, "utf firmware file is too small to contain magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+		ath10k_err(ar, "invalid firmware magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* jump over the padding */
+	magic_len = ALIGN(magic_len, 4);
+
+	len -= magic_len;
+	data += magic_len;
+
+	/* loop elements */
+	while (len > sizeof(struct ath10k_fw_ie)) {
+		hdr = (struct ath10k_fw_ie *)data;
+
+		ie_id = le32_to_cpu(hdr->id);
+		ie_len = le32_to_cpu(hdr->len);
+
+		len -= sizeof(*hdr);
+		data += sizeof(*hdr);
+
+		if (len < ie_len) {
+			ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
+				   ie_id, len, ie_len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		switch (ie_id) {
+		case ATH10K_FW_IE_FW_VERSION:
+			if (ie_len > sizeof(ar->testmode.utf_version) - 1)
+				break;
+
+			memcpy(ar->testmode.utf_version, data, ie_len);
+			ar->testmode.utf_version[ie_len] = '\0';
+
+			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+				   "testmode found fw utf version %s\n",
+				   ar->testmode.utf_version);
+			break;
+		case ATH10K_FW_IE_TIMESTAMP:
+			/* ignore timestamp, but don't warn about it either */
+			break;
+		case ATH10K_FW_IE_FW_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+				   "testmode found fw image ie (%zd B)\n",
+				   ie_len);
+
+			ar->testmode.utf_firmware_data = data;
+			ar->testmode.utf_firmware_len = ie_len;
+			break;
+		case ATH10K_FW_IE_WMI_OP_VERSION:
+			if (ie_len != sizeof(u32))
+				break;
+			version = (__le32 *)data;
+			ar->testmode.op_version = le32_to_cpup(version);
+			ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
+				   ar->testmode.op_version);
+			break;
+		default:
+			ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
+				    le32_to_cpu(hdr->id));
+			break;
+		}
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		len -= ie_len;
+		data += ie_len;
+	}
+
+	if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
+		ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	release_firmware(ar->testmode.utf);
+
+	return ret;
+}
+
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+{
+	char filename[100];
+	int ret;
+
+	snprintf(filename, sizeof(filename), "%s/%s",
+		 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
+
+	/* load utf firmware image */
+	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	if (ret) {
+		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+			    filename, ret);
+		return ret;
+	}
+
+	/* We didn't find FW UTF API 1 ("utf.bin") does not advertise
+	 * firmware features. Do an ugly hack where we force the firmware
+	 * features to match with 10.1 branch so that wmi.c will use the
+	 * correct WMI interface.
+	 */
+
+	ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+	ar->testmode.utf_firmware_data = ar->testmode.utf->data;
+	ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+
+	return 0;
+}
+
+static int ath10k_tm_fetch_firmware(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+	if (ret == 0) {
+		ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
+		return 0;
+	}
+
+	ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+
+	return 0;
+}
+
+static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
+{
+	const char *ver;
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state == ATH10K_STATE_UTF) {
+		ret = -EALREADY;
+		goto err;
+	}
+
+	/* start utf only when the driver is not in use  */
+	if (ar->state != ATH10K_STATE_OFF) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	if (WARN_ON(ar->testmode.utf != NULL)) {
+		/* utf image is already downloaded, it shouldn't be */
+		ret = -EEXIST;
+		goto err;
+	}
+
+	ret = ath10k_tm_fetch_firmware(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch UTF firmware: %d", ret);
+		goto err;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	ar->testmode.utf_monitor = true;
+	spin_unlock_bh(&ar->data_lock);
+	BUILD_BUG_ON(sizeof(ar->fw_features) !=
+		     sizeof(ar->testmode.orig_fw_features));
+
+	memcpy(ar->testmode.orig_fw_features, ar->fw_features,
+	       sizeof(ar->fw_features));
+	ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
+	memset(ar->fw_features, 0, sizeof(ar->fw_features));
+
+	ar->wmi.op_version = ar->testmode.op_version;
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
+		   ar->wmi.op_version);
+
+	ret = ath10k_hif_power_up(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
+		ar->state = ATH10K_STATE_OFF;
+		goto err_fw_features;
+	}
+
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+	if (ret) {
+		ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
+		ar->state = ATH10K_STATE_OFF;
+		goto err_power_down;
+	}
+
+	ar->state = ATH10K_STATE_UTF;
+
+	if (strlen(ar->testmode.utf_version) > 0)
+		ver = ar->testmode.utf_version;
+	else
+		ver = "API 1";
+
+	ath10k_info(ar, "UTF firmware %s started\n", ver);
+
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+
+err_power_down:
+	ath10k_hif_power_down(ar);
+
+err_fw_features:
+	/* return the original firmware features */
+	memcpy(ar->fw_features, ar->testmode.orig_fw_features,
+	       sizeof(ar->fw_features));
+	ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+
+	release_firmware(ar->testmode.utf);
+	ar->testmode.utf = NULL;
+
+err:
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->conf_mutex);
+
+	ath10k_core_stop(ar);
+	ath10k_hif_power_down(ar);
+
+	spin_lock_bh(&ar->data_lock);
+
+	ar->testmode.utf_monitor = false;
+
+	spin_unlock_bh(&ar->data_lock);
+
+	/* return the original firmware features */
+	memcpy(ar->fw_features, ar->testmode.orig_fw_features,
+	       sizeof(ar->fw_features));
+	ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+
+	release_firmware(ar->testmode.utf);
+	ar->testmode.utf = NULL;
+
+	ar->state = ATH10K_STATE_OFF;
+}
+
+static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
+{
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	__ath10k_tm_cmd_utf_stop(ar);
+
+	ret = 0;
+
+	ath10k_info(ar, "UTF firmware stopped\n");
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
+{
+	struct sk_buff *skb;
+	int ret, buf_len;
+	u32 cmd_id;
+	void *buf;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_UTF) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	if (!tb[ATH10K_TM_ATTR_DATA]) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+	cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+		   "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+		   cmd_id, buf, buf_len);
+
+	ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+	skb = ath10k_wmi_alloc_skb(ar, buf_len);
+	if (!skb) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(skb->data, buf, buf_len);
+
+	ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+	if (ret) {
+		ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = 0;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  void *data, int len)
+{
+	struct ath10k *ar = hw->priv;
+	struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
+	int ret;
+
+	ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
+			ath10k_tm_policy);
+	if (ret)
+		return ret;
+
+	if (!tb[ATH10K_TM_ATTR_CMD])
+		return -EINVAL;
+
+	switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
+	case ATH10K_TM_CMD_GET_VERSION:
+		return ath10k_tm_cmd_get_version(ar, tb);
+	case ATH10K_TM_CMD_UTF_START:
+		return ath10k_tm_cmd_utf_start(ar, tb);
+	case ATH10K_TM_CMD_UTF_STOP:
+		return ath10k_tm_cmd_utf_stop(ar, tb);
+	case ATH10K_TM_CMD_WMI:
+		return ath10k_tm_cmd_wmi(ar, tb);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+void ath10k_testmode_destroy(struct ath10k *ar)
+{
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_UTF) {
+		/* utf firmware is not running, nothing to do */
+		goto out;
+	}
+
+	__ath10k_tm_cmd_utf_stop(ar);
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+}
diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h
new file mode 100644
index 0000000..9cdd150
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath10k_testmode_destroy(struct ath10k *ar);
+
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  void *data, int len);
+
+#else
+
+static inline void ath10k_testmode_destroy(struct ath10k *ar)
+{
+}
+
+static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
+				       struct sk_buff *skb)
+{
+	return false;
+}
+
+static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				void *data, int len)
+{
+	return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
new file mode 100644
index 0000000..ba81bf6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* "API" level of the ath10k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH10K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH10K_TESTMODE_VERSION_MINOR 0
+
+#define ATH10K_TM_DATA_MAX_LEN		5000
+
+enum ath10k_tm_attr {
+	__ATH10K_TM_ATTR_INVALID	= 0,
+	ATH10K_TM_ATTR_CMD		= 1,
+	ATH10K_TM_ATTR_DATA		= 2,
+	ATH10K_TM_ATTR_WMI_CMDID	= 3,
+	ATH10K_TM_ATTR_VERSION_MAJOR	= 4,
+	ATH10K_TM_ATTR_VERSION_MINOR	= 5,
+
+	/* keep last */
+	__ATH10K_TM_ATTR_AFTER_LAST,
+	ATH10K_TM_ATTR_MAX		= __ATH10K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath10k testmode interface commands specified in
+ * ATH10K_TM_ATTR_CMD
+ */
+enum ath10k_tm_cmd {
+	/* Returns the supported ath10k testmode interface version in
+	 * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
+	 * uses this to verify it's using the correct version of the
+	 * testmode interface
+	 */
+	ATH10K_TM_CMD_GET_VERSION = 0,
+
+	/* Boots the UTF firmware, the netdev interface must be down at the
+	 * time.
+	 */
+	ATH10K_TM_CMD_UTF_START = 1,
+
+	/* Shuts down the UTF firmware and puts the driver back into OFF
+	 * state.
+	 */
+	ATH10K_TM_CMD_UTF_STOP = 2,
+
+	/* The command used to transmit a WMI command to the firmware and
+	 * the event to receive WMI events from the firmware. Without
+	 * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+	 * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
+	 * ATH10K_TM_ATTR_DATA.
+	 */
+	ATH10K_TM_CMD_WMI = 3,
+};
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
new file mode 100644
index 0000000..60fe562
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long *state)
+{
+	*state = ATH10K_THERMAL_THROTTLE_MAX;
+
+	return 0;
+}
+
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long *state)
+{
+	struct ath10k *ar = cdev->devdata;
+
+	mutex_lock(&ar->conf_mutex);
+	*state = ar->thermal.throttle_state;
+	mutex_unlock(&ar->conf_mutex);
+
+	return 0;
+}
+
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+				      unsigned long throttle_state)
+{
+	struct ath10k *ar = cdev->devdata;
+
+	if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+		ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+			    throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+		return -EINVAL;
+	}
+	mutex_lock(&ar->conf_mutex);
+	ar->thermal.throttle_state = throttle_state;
+	ath10k_thermal_set_throttling(ar);
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+}
+
+static struct thermal_cooling_device_ops ath10k_thermal_ops = {
+	.get_max_state = ath10k_thermal_get_max_throttle_state,
+	.get_cur_state = ath10k_thermal_get_cur_throttle_state,
+	.set_cur_state = ath10k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath10k_thermal_show_temp(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct ath10k *ar = dev_get_drvdata(dev);
+	int ret, temperature;
+	unsigned long time_left;
+
+	mutex_lock(&ar->conf_mutex);
+
+	/* Can't get temperature when the card is off */
+	if (ar->state != ATH10K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	reinit_completion(&ar->thermal.wmi_sync);
+	ret = ath10k_wmi_pdev_get_temperature(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to read temperature %d\n", ret);
+		goto out;
+	}
+
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+						ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+	if (!time_left) {
+		ath10k_warn(ar, "failed to synchronize thermal read\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	temperature = ar->thermal.temperature;
+	spin_unlock_bh(&ar->data_lock);
+
+	/* display in millidegree celcius */
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
+{
+	spin_lock_bh(&ar->data_lock);
+	ar->thermal.temperature = temperature;
+	spin_unlock_bh(&ar->data_lock);
+	complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
+			  NULL, 0);
+
+static struct attribute *ath10k_hwmon_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ath10k_hwmon);
+
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+	u32 period, duration, enabled;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+		return;
+
+	if (ar->state != ATH10K_STATE_ON)
+		return;
+
+	period = ar->thermal.quiet_period;
+	duration = (period * ar->thermal.throttle_state) / 100;
+	enabled = duration ? 1 : 0;
+
+	ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+					     ATH10K_QUIET_START_OFFSET,
+					     enabled);
+	if (ret) {
+		ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+			    period, duration, enabled, ret);
+	}
+}
+
+int ath10k_thermal_register(struct ath10k *ar)
+{
+	struct thermal_cooling_device *cdev;
+	struct device *hwmon_dev;
+	int ret;
+
+	cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+					       &ath10k_thermal_ops);
+
+	if (IS_ERR(cdev)) {
+		ath10k_err(ar, "failed to setup thermal device result: %ld\n",
+			   PTR_ERR(cdev));
+		return -EINVAL;
+	}
+
+	ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
+				"cooling_device");
+	if (ret) {
+		ath10k_err(ar, "failed to create cooling device symlink\n");
+		goto err_cooling_destroy;
+	}
+
+	ar->thermal.cdev = cdev;
+	ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
+
+	/* Do not register hwmon device when temperature reading is not
+	 * supported by firmware
+	 */
+	if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
+		return 0;
+
+	/* Avoid linking error on devm_hwmon_device_register_with_groups, I
+	 * guess linux/hwmon.h is missing proper stubs. */
+	if (!config_enabled(CONFIG_HWMON))
+		return 0;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
+							   "ath10k_hwmon", ar,
+							   ath10k_hwmon_groups);
+	if (IS_ERR(hwmon_dev)) {
+		ath10k_err(ar, "failed to register hwmon device: %ld\n",
+			   PTR_ERR(hwmon_dev));
+		ret = -EINVAL;
+		goto err_remove_link;
+	}
+	return 0;
+
+err_remove_link:
+	sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+err_cooling_destroy:
+	thermal_cooling_device_unregister(cdev);
+	return ret;
+}
+
+void ath10k_thermal_unregister(struct ath10k *ar)
+{
+	sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+	thermal_cooling_device_unregister(ar->thermal.cdev);
+}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
new file mode 100644
index 0000000..b610ea5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _THERMAL_
+#define _THERMAL_
+
+#define ATH10K_QUIET_PERIOD_DEFAULT     100
+#define ATH10K_QUIET_PERIOD_MIN         25
+#define ATH10K_QUIET_START_OFFSET       10
+#define ATH10K_HWMON_NAME_LEN           15
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX     100
+
+struct ath10k_thermal {
+	struct thermal_cooling_device *cdev;
+	struct completion wmi_sync;
+
+	/* protected by conf_mutex */
+	u32 throttle_state;
+	u32 quiet_period;
+	/* temperature value in Celcius degree
+	 * protected by data_lock
+	 */
+	int temperature;
+};
+
+#ifdef CONFIG_THERMAL
+int ath10k_thermal_register(struct ath10k *ar);
+void ath10k_thermal_unregister(struct ath10k *ar);
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
+#else
+static inline int ath10k_thermal_register(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_thermal_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
+						    int temperature)
+{
+}
+
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
+#endif
+#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644
index 0000000..4a31e2c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644
index 0000000..71bdb36
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#if !defined(_TRACE_H_)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
+{
+	const struct ieee80211_hdr *hdr = buf;
+
+	/* In some rare cases (e.g. fcs error) device reports frame buffer
+	 * shorter than what frame header implies (e.g. len = 0). The buffer
+	 * can still be accessed so do a simple min() to guarantee caller
+	 * doesn't get value greater than len.
+	 */
+	return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
+}
+#endif
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 400
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+	TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+	TP_ARGS(ar, vaf),
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__dynamic_array(char, msg, ATH10K_MSG_MAX)
+	),
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       ATH10K_MSG_MAX,
+				       vaf->fmt,
+				       *vaf->va) >= ATH10K_MSG_MAX);
+	),
+	TP_printk(
+		"%s %s %s",
+		__get_str(driver),
+		__get_str(device),
+		__get_str(msg)
+	)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+	     TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+	     TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+	     TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+	     TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+	     TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+	     TP_ARGS(ar, vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+	TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
+	TP_ARGS(ar, level, vaf),
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(unsigned int, level)
+		__dynamic_array(char, msg, ATH10K_MSG_MAX)
+	),
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->level = level;
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       ATH10K_MSG_MAX,
+				       vaf->fmt,
+				       *vaf->va) >= ATH10K_MSG_MAX);
+	),
+	TP_printk(
+		"%s %s %s",
+		__get_str(driver),
+		__get_str(device),
+		__get_str(msg)
+	)
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+	TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
+		 const void *buf, size_t buf_len),
+
+	TP_ARGS(ar, msg, prefix, buf, buf_len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__string(msg, msg)
+		__string(prefix, prefix)
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__assign_str(msg, msg);
+		__assign_str(prefix, prefix);
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"%s %s %s/%s\n",
+		__get_str(driver),
+		__get_str(device),
+		__get_str(prefix),
+		__get_str(msg)
+	)
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+	TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
+		 int ret),
+
+	TP_ARGS(ar, id, buf, buf_len, ret),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(unsigned int, id)
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->id = id;
+		__entry->buf_len = buf_len;
+		__entry->ret = ret;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"%s %s id %d len %zu ret %d",
+		__get_str(driver),
+		__get_str(device),
+		__entry->id,
+		__entry->buf_len,
+		__entry->ret
+	)
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+	TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
+
+	TP_ARGS(ar, id, buf, buf_len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(unsigned int, id)
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->id = id;
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"%s %s id %d len %zu",
+		__get_str(driver),
+		__get_str(device),
+		__entry->id,
+		__entry->buf_len
+	)
+);
+
+TRACE_EVENT(ath10k_htt_stats,
+	TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+	TP_ARGS(ar, buf, buf_len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"%s %s len %zu",
+		__get_str(driver),
+		__get_str(device),
+		__entry->buf_len
+	)
+);
+
+TRACE_EVENT(ath10k_wmi_dbglog,
+	TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+	TP_ARGS(ar, buf, buf_len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(size_t, buf_len)
+		__dynamic_array(u8, buf, buf_len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(buf), buf, buf_len);
+	),
+
+	TP_printk(
+		"%s %s len %zu",
+		__get_str(driver),
+		__get_str(device),
+		__entry->buf_len
+	)
+);
+
+TRACE_EVENT(ath10k_htt_pktlog,
+	    TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
+
+	TP_ARGS(ar, buf, buf_len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(u16, buf_len)
+		__dynamic_array(u8, pktlog, buf_len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->buf_len = buf_len;
+		memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+	),
+
+	TP_printk(
+		"%s %s size %hu",
+		__get_str(driver),
+		__get_str(device),
+		__entry->buf_len
+	 )
+);
+
+TRACE_EVENT(ath10k_htt_tx,
+	    TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
+		     u8 vdev_id, u8 tid),
+
+	TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(u16, msdu_id)
+		__field(u16, msdu_len)
+		__field(u8, vdev_id)
+		__field(u8, tid)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->msdu_id = msdu_id;
+		__entry->msdu_len = msdu_len;
+		__entry->vdev_id = vdev_id;
+		__entry->tid = tid;
+	),
+
+	TP_printk(
+		"%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
+		__get_str(driver),
+		__get_str(device),
+		__entry->msdu_id,
+		__entry->msdu_len,
+		__entry->vdev_id,
+		__entry->tid
+	 )
+);
+
+TRACE_EVENT(ath10k_txrx_tx_unref,
+	    TP_PROTO(struct ath10k *ar, u16 msdu_id),
+
+	TP_ARGS(ar, msdu_id),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(u16, msdu_id)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->msdu_id = msdu_id;
+	),
+
+	TP_printk(
+		"%s %s msdu_id %d",
+		__get_str(driver),
+		__get_str(device),
+		__entry->msdu_id
+	 )
+);
+
+DECLARE_EVENT_CLASS(ath10k_hdr_event,
+		    TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+	TP_ARGS(ar, data, len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(size_t, len)
+		__dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->len = ath10k_frm_hdr_len(data, len);
+		memcpy(__get_dynamic_array(data), data, __entry->len);
+	),
+
+	TP_printk(
+		"%s %s len %zu\n",
+		__get_str(driver),
+		__get_str(device),
+		__entry->len
+	)
+);
+
+DECLARE_EVENT_CLASS(ath10k_payload_event,
+		    TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+	TP_ARGS(ar, data, len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(size_t, len)
+		__dynamic_array(u8, payload, (len -
+					      ath10k_frm_hdr_len(data, len)))
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->len = len - ath10k_frm_hdr_len(data, len);
+		memcpy(__get_dynamic_array(payload),
+		       data + ath10k_frm_hdr_len(data, len), __entry->len);
+	),
+
+	TP_printk(
+		"%s %s len %zu\n",
+		__get_str(driver),
+		__get_str(device),
+		__entry->len
+	)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
+	     TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+	     TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
+	     TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+	     TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
+	     TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+	     TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
+	     TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+	     TP_ARGS(ar, data, len)
+);
+
+TRACE_EVENT(ath10k_htt_rx_desc,
+	    TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+	TP_ARGS(ar, data, len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(u16, len)
+		__dynamic_array(u8, rxdesc, len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->len = len;
+		memcpy(__get_dynamic_array(rxdesc), data, len);
+	),
+
+	TP_printk(
+		"%s %s rxdesc len %d",
+		__get_str(driver),
+		__get_str(device),
+		__entry->len
+	 )
+);
+
+TRACE_EVENT(ath10k_wmi_diag_container,
+	    TP_PROTO(struct ath10k *ar,
+		     u8 type,
+		     u32 timestamp,
+		     u32 code,
+		     u16 len,
+		     const void *data),
+
+	TP_ARGS(ar, type, timestamp, code, len, data),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(u8, type)
+		__field(u32, timestamp)
+		__field(u32, code)
+		__field(u16, len)
+		__dynamic_array(u8, data, len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->type = type;
+		__entry->timestamp = timestamp;
+		__entry->code = code;
+		__entry->len = len;
+		memcpy(__get_dynamic_array(data), data, len);
+	),
+
+	TP_printk(
+		"%s %s diag container type %hhu timestamp %u code %u len %d",
+		__get_str(driver),
+		__get_str(device),
+		__entry->type,
+		__entry->timestamp,
+		__entry->code,
+		__entry->len
+	)
+);
+
+TRACE_EVENT(ath10k_wmi_diag,
+	    TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+	TP_ARGS(ar, data, len),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(ar->dev))
+		__string(driver, dev_driver_string(ar->dev))
+		__field(u16, len)
+		__dynamic_array(u8, data, len)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(ar->dev));
+		__assign_str(driver, dev_driver_string(ar->dev));
+		__entry->len = len;
+		memcpy(__get_dynamic_array(data), data, len);
+	),
+
+	TP_printk(
+		"%s %s tlv diag len %d",
+		__get_str(driver),
+		__get_str(device),
+		__entry->len
+	)
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644
index 0000000..6d1105a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+	if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+		return;
+
+	/* If the original wait_for_completion() timed out before
+	 * {data,mgmt}_tx_completed() was called then we could complete
+	 * offchan_tx_completed for a different skb. Prevent this by using
+	 * offchan_tx_skb. */
+	spin_lock_bh(&ar->data_lock);
+	if (ar->offchan_tx_skb != skb) {
+		ath10k_warn(ar, "completed old offchannel frame\n");
+		goto out;
+	}
+
+	complete(&ar->offchan_tx_completed);
+	ar->offchan_tx_skb = NULL; /* just for sanity */
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+out:
+	spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+			  const struct htt_tx_done *tx_done)
+{
+	struct ath10k *ar = htt->ar;
+	struct device *dev = ar->dev;
+	struct ieee80211_tx_info *info;
+	struct ath10k_skb_cb *skb_cb;
+	struct sk_buff *msdu;
+	struct ieee80211_hdr *hdr;
+	__le16 fc;
+	bool limit_mgmt_desc = false;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
+		   tx_done->msdu_id, !!tx_done->discard,
+		   !!tx_done->no_ack, !!tx_done->success);
+
+	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+		ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
+			    tx_done->msdu_id);
+		return;
+	}
+
+	spin_lock_bh(&htt->tx_lock);
+	msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
+	if (!msdu) {
+		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
+			    tx_done->msdu_id);
+		spin_unlock_bh(&htt->tx_lock);
+		return;
+	}
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	fc = hdr->frame_control;
+
+	if (unlikely(ieee80211_is_mgmt(fc)) &&
+	    ar->hw_params.max_probe_resp_desc_thres)
+		limit_mgmt_desc = true;
+
+	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+	if (htt->num_pending_tx == 0)
+		wake_up(&htt->empty_tx_wq);
+	spin_unlock_bh(&htt->tx_lock);
+
+	skb_cb = ATH10K_SKB_CB(msdu);
+	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+	ath10k_report_offchan_tx(htt->ar, msdu);
+
+	info = IEEE80211_SKB_CB(msdu);
+	memset(&info->status, 0, sizeof(info->status));
+	trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
+
+	if (tx_done->discard) {
+		ieee80211_free_txskb(htt->ar->hw, msdu);
+		return;
+	}
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+	if (tx_done->no_ack)
+		info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+	if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+	ieee80211_tx_status(htt->ar->hw, msdu);
+	/* we do not own the msdu anymore */
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+				     const u8 *addr)
+{
+	struct ath10k_peer *peer;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	list_for_each_entry(peer, &ar->peers, list) {
+		if (peer->vdev_id != vdev_id)
+			continue;
+		if (memcmp(peer->addr, addr, ETH_ALEN))
+			continue;
+
+		return peer;
+	}
+
+	return NULL;
+}
+
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
+{
+	struct ath10k_peer *peer;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	list_for_each_entry(peer, &ar->peers, list)
+		if (test_bit(peer_id, peer->peer_ids))
+			return peer;
+
+	return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+				       const u8 *addr, bool expect_mapped)
+{
+	long time_left;
+
+	time_left = wait_event_timeout(ar->peer_mapping_wq, ({
+			bool mapped;
+
+			spin_lock_bh(&ar->data_lock);
+			mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+			spin_unlock_bh(&ar->data_lock);
+
+			(mapped == expect_mapped ||
+			 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
+		}), 3*HZ);
+
+	if (time_left == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+	return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+	return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+			   struct htt_peer_map_event *ev)
+{
+	struct ath10k *ar = htt->ar;
+	struct ath10k_peer *peer;
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+	if (!peer) {
+		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+		if (!peer)
+			goto exit;
+
+		peer->vdev_id = ev->vdev_id;
+		ether_addr_copy(peer->addr, ev->addr);
+		list_add(&peer->list, &ar->peers);
+		wake_up(&ar->peer_mapping_wq);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+		   ev->vdev_id, ev->addr, ev->peer_id);
+
+	set_bit(ev->peer_id, peer->peer_ids);
+exit:
+	spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+			     struct htt_peer_unmap_event *ev)
+{
+	struct ath10k *ar = htt->ar;
+	struct ath10k_peer *peer;
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+	if (!peer) {
+		ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
+			    ev->peer_id);
+		goto exit;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+		   peer->vdev_id, peer->addr, ev->peer_id);
+
+	clear_bit(ev->peer_id, peer->peer_ids);
+
+	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+		list_del(&peer->list);
+		kfree(peer);
+		wake_up(&ar->peer_mapping_wq);
+	}
+
+exit:
+	spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644
index 0000000..a90e09f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+			  const struct htt_tx_done *tx_done);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+				     const u8 *addr);
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+				 const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+				 const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+			   struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+			     struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
new file mode 100644
index 0000000..cfed580
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -0,0 +1,1336 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_OPS_H_
+#define _WMI_OPS_H_
+
+struct ath10k;
+struct sk_buff;
+
+struct wmi_ops {
+	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
+	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+
+	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
+			 struct wmi_scan_ev_arg *arg);
+	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
+			    struct wmi_mgmt_rx_ev_arg *arg);
+	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
+			    struct wmi_ch_info_ev_arg *arg);
+	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
+			       struct wmi_vdev_start_ev_arg *arg);
+	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
+			      struct wmi_peer_kick_ev_arg *arg);
+	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
+			 struct wmi_swba_ev_arg *arg);
+	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
+			       struct wmi_phyerr_hdr_arg *arg);
+	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
+			   int left_len, struct wmi_phyerr_ev_arg *arg);
+	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
+			    struct wmi_svc_rdy_ev_arg *arg);
+	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_rdy_ev_arg *arg);
+	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
+			     struct ath10k_fw_stats *stats);
+	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+			    struct wmi_roam_ev_arg *arg);
+	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+			      struct wmi_wow_ev_arg *arg);
+	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
+
+	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
+	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
+	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
+					   u16 rd5g, u16 ctl2g, u16 ctl5g,
+					   enum wmi_dfs_region dfs_reg);
+	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
+					      u32 value);
+	struct sk_buff *(*gen_init)(struct ath10k *ar);
+	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
+					  const struct wmi_start_scan_arg *arg);
+	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
+					 const struct wmi_stop_scan_arg *arg);
+	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
+					   enum wmi_vdev_type type,
+					   enum wmi_vdev_subtype subtype,
+					   const u8 macaddr[ETH_ALEN]);
+	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
+	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
+					  const struct wmi_vdev_start_request_arg *arg,
+					  bool restart);
+	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
+	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
+				       const u8 *bssid);
+	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
+	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
+					      u32 param_id, u32 param_value);
+	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
+						const struct wmi_vdev_install_key_arg *arg);
+	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
+						  const struct wmi_vdev_spectral_conf_arg *arg);
+	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
+						    u32 trigger, u32 enable);
+	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
+					     const struct wmi_wmm_params_all_arg *arg);
+	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
+					   const u8 peer_addr[ETH_ALEN],
+					   enum wmi_peer_type peer_type);
+	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
+					   const u8 peer_addr[ETH_ALEN]);
+	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
+					  const u8 peer_addr[ETH_ALEN],
+					  u32 tid_bitmap);
+	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
+					      const u8 *peer_addr,
+					      enum wmi_peer_param param_id,
+					      u32 param_value);
+	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
+					  const struct wmi_peer_assoc_complete_arg *arg);
+	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
+					  enum wmi_sta_ps_mode psmode);
+	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
+					  enum wmi_sta_powersave_param param_id,
+					  u32 value);
+	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
+					 const u8 *mac,
+					 enum wmi_ap_ps_peer_param param_id,
+					 u32 value);
+	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
+					      const struct wmi_scan_chan_list_arg *arg);
+	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
+					  const void *bcn, size_t bcn_len,
+					  u32 bcn_paddr, bool dtim_zero,
+					  bool deliver_cab);
+	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
+					    const struct wmi_wmm_params_all_arg *arg);
+	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
+	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
+					     enum wmi_force_fw_hang_type type,
+					     u32 delay_ms);
+	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
+	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+					  u32 log_level);
+	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
+	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
+	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
+						   u32 period, u32 duration,
+						   u32 next_offset,
+						   u32 enabled);
+	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
+	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
+						const u8 *mac);
+	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
+					  const u8 *mac, u32 tid, u32 buf_size);
+	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
+					      const u8 *mac, u32 tid,
+					      u32 status);
+	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
+					  const u8 *mac, u32 tid, u32 initiator,
+					  u32 reason);
+	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
+					u32 tim_ie_offset, struct sk_buff *bcn,
+					u32 prb_caps, u32 prb_erp,
+					void *prb_ies, size_t prb_ies_len);
+	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
+					struct sk_buff *bcn);
+	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
+					     const u8 *p2p_ie);
+	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
+					      const u8 peer_addr[ETH_ALEN],
+					      const struct wmi_sta_uapsd_auto_trig_arg *args,
+					      u32 num_ac);
+	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
+					     const struct wmi_sta_keepalive_arg *arg);
+	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+						    enum wmi_wow_wakeup_event event,
+						    u32 enable);
+	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+					       u32 pattern_id,
+					       const u8 *pattern,
+					       const u8 *mask,
+					       int pattern_len,
+					       int pattern_offset);
+	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+					       u32 pattern_id);
+	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+						    u32 vdev_id,
+						    enum wmi_tdls_state state);
+	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+						const struct wmi_tdls_peer_update_cmd_arg *arg,
+						const struct wmi_tdls_peer_capab_arg *cap,
+						const struct wmi_channel_arg *chan);
+	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
+	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
+						   u32 param);
+	void (*fw_stats_fill)(struct ath10k *ar,
+			      struct ath10k_fw_stats *fw_stats,
+			      char *buf);
+	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
+							u8 enable,
+							u32 detect_level,
+							u32 detect_margin);
+};
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+
+static inline int
+ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
+		return -EOPNOTSUPP;
+
+	ar->wmi.ops->rx(ar, skb);
+	return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
+		   size_t len)
+{
+	if (!ar->wmi.ops->map_svc)
+		return -EOPNOTSUPP;
+
+	ar->wmi.ops->map_svc(in, out, len);
+	return 0;
+}
+
+static inline int
+ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
+		     struct wmi_scan_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_scan)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_scan(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_mgmt_rx_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_mgmt_rx)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_ch_info_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_ch_info)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
+			   struct wmi_vdev_start_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_vdev_start)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
+			  struct wmi_peer_kick_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_peer_kick)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
+		     struct wmi_swba_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_swba)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_swba(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
+			   struct wmi_phyerr_hdr_arg *arg)
+{
+	if (!ar->wmi.ops->pull_phyerr_hdr)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
+		       int left_len, struct wmi_phyerr_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_phyerr)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_svc_rdy_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_svc_rdy)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
+		    struct wmi_rdy_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_rdy)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+			 struct ath10k_fw_stats *stats)
+{
+	if (!ar->wmi.ops->pull_fw_stats)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
+}
+
+static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+			struct wmi_roam_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_roam_ev)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+			  struct wmi_wow_ev_arg *arg)
+{
+	if (!ar->wmi.ops->pull_wow_event)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+	if (!ar->wmi.ops->get_txbf_conf_scheme)
+		return WMI_TXBF_CONF_UNSUPPORTED;
+
+	return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
+static inline int
+ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+	struct sk_buff *skb;
+	int ret;
+
+	if (!ar->wmi.ops->gen_mgmt_tx)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+	if (ret)
+		return ret;
+
+	/* FIXME There's no ACK event for Management Tx. This probably
+	 * shouldn't be called here either. */
+	info->flags |= IEEE80211_TX_STAT_ACK;
+	ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+	return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+			      u16 ctl2g, u16 ctl5g,
+			      enum wmi_dfs_region dfs_reg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_set_rd)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
+					   dfs_reg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_suspend)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_resume)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_resume(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_set_param)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_init)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_init(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
+}
+
+static inline int
+ath10k_wmi_start_scan(struct ath10k *ar,
+		      const struct wmi_start_scan_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_start_scan)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_start_scan(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_stop_scan)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+		       enum wmi_vdev_type type,
+		       enum wmi_vdev_subtype subtype,
+		       const u8 macaddr[ETH_ALEN])
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_create)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_delete)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_start(struct ath10k *ar,
+		      const struct wmi_vdev_start_request_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_start)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->vdev_start_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_restart(struct ath10k *ar,
+			const struct wmi_vdev_start_request_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_start)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->vdev_restart_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_stop)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_up)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_down)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
+			  u32 param_value)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_set_param)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
+					      param_value);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_install_key(struct ath10k *ar,
+			    const struct wmi_vdev_install_key_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_vdev_install_key)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
+			      const struct wmi_vdev_spectral_conf_arg *arg)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_vdev_spectral_conf)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
+				u32 enable)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_vdev_spectral_enable)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
+						    enable);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+			  const u8 peer_addr[ETH_ALEN],
+			  const struct wmi_sta_uapsd_auto_trig_arg *args,
+			  u32 num_ac)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
+					      num_ac);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+			 const struct wmi_wmm_params_all_arg *arg)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+		       const u8 peer_addr[ETH_ALEN],
+		       enum wmi_peer_type peer_type)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_peer_create)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+		       const u8 peer_addr[ETH_ALEN])
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_peer_delete)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_peer_flush)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
+			  enum wmi_peer_param param_id, u32 param_value)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_peer_set_param)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
+					      param_value);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+		      enum wmi_sta_ps_mode psmode)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_set_psmode)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->sta_powersave_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+			    enum wmi_sta_powersave_param param_id, u32 value)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_set_sta_ps)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->sta_powersave_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+			   enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_set_ap_ps)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_chan_list(struct ath10k *ar,
+			  const struct wmi_scan_chan_list_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_scan_chan_list)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_assoc(struct ath10k *ar,
+		      const struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_peer_assoc)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
+}
+
+static inline int
+ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
+				  const void *bcn, size_t bcn_len,
+				  u32 bcn_paddr, bool dtim_zero,
+				  bool deliver_cab)
+{
+	struct sk_buff *skb;
+	int ret;
+
+	if (!ar->wmi.ops->gen_beacon_dma)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
+					  dtim_zero, deliver_cab);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+					 ar->wmi.cmd->pdev_send_bcn_cmdid);
+	if (ret) {
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+			       const struct wmi_wmm_params_all_arg *arg)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_set_wmm)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
+}
+
+static inline int
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_request_stats)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
+}
+
+static inline int
+ath10k_wmi_force_fw_hang(struct ath10k *ar,
+			 enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_force_fw_hang)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
+}
+
+static inline int
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_dbglog_cfg)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pktlog_enable)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pktlog_disable)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pktlog_disable(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
+			       u32 next_offset, u32 enabled)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
+						   next_offset, enabled);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_get_temperature)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_get_temperature_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_addba_clear_resp)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->addba_clear_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+		      u32 tid, u32 buf_size)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_addba_send)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->addba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+			  u32 tid, u32 status)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_addba_set_resp)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->addba_set_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+		      u32 tid, u32 initiator, u32 reason)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_delba_send)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
+					  reason);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->delba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
+		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
+		    void *prb_ies, size_t prb_ies_len)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_bcn_tmpl)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
+					prb_caps, prb_erp, prb_ies,
+					prb_ies_len);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_prb_tmpl)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
+}
+
+static inline int
+ath10k_wmi_sta_keepalive(struct ath10k *ar,
+			 const struct wmi_sta_keepalive_arg *arg)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_sta_keepalive)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_enable)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_enable(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+				enum wmi_wow_wakeup_event event,
+				u32 enable)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+			   const u8 *pattern, const u8 *mask,
+			   int pattern_len, int pattern_offset)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_add_pattern)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+					       pattern, mask, pattern_len,
+					       pattern_offset);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+	struct sk_buff *skb;
+	u32 cmd_id;
+
+	if (!ar->wmi.ops->gen_wow_del_pattern)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+				enum wmi_tdls_state state)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_update_fw_tdls_state)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+			    const struct wmi_tdls_peer_update_cmd_arg *arg,
+			    const struct wmi_tdls_peer_capab_arg *cap,
+			    const struct wmi_channel_arg *chan)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_tdls_peer_update)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_adaptive_qcs)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
+}
+
+static inline int
+ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
+			 char *buf)
+{
+	if (!ar->wmi.ops->fw_stats_fill)
+		return -EOPNOTSUPP;
+
+	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
+	return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+				    u32 detect_level, u32 detect_margin)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
+							detect_level,
+							detect_margin);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
new file mode 100644
index 0000000..6fbd17b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -0,0 +1,3498 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "mac.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+#include "wmi-tlv.h"
+#include "p2p.h"
+#include "testmode.h"
+
+/***************/
+/* TLV helpers */
+/**************/
+
+struct wmi_tlv_policy {
+	size_t min_len;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+	[WMI_TLV_TAG_ARRAY_BYTE]
+		= { .min_len = 0 },
+	[WMI_TLV_TAG_ARRAY_UINT32]
+		= { .min_len = 0 },
+	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
+		= { .min_len = sizeof(struct wmi_scan_event) },
+	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
+		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
+	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
+		= { .min_len = sizeof(struct wmi_chan_info_event) },
+	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
+		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
+	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
+		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
+		= { .min_len = sizeof(struct wmi_host_swba_event) },
+	[WMI_TLV_TAG_STRUCT_TIM_INFO]
+		= { .min_len = sizeof(struct wmi_tim_info) },
+	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
+		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
+	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
+	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
+		= { .min_len = sizeof(struct hal_reg_capabilities) },
+	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
+		= { .min_len = sizeof(struct wlan_host_mem_req) },
+	[WMI_TLV_TAG_STRUCT_READY_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
+	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
+	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
+};
+
+static int
+ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
+		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
+				const void *ptr, void *data),
+		    void *data)
+{
+	const void *begin = ptr;
+	const struct wmi_tlv *tlv;
+	u16 tlv_tag, tlv_len;
+	int ret;
+
+	while (len > 0) {
+		if (len < sizeof(*tlv)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI,
+				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+				   ptr - begin, len, sizeof(*tlv));
+			return -EINVAL;
+		}
+
+		tlv = ptr;
+		tlv_tag = __le16_to_cpu(tlv->tag);
+		tlv_len = __le16_to_cpu(tlv->len);
+		ptr += sizeof(*tlv);
+		len -= sizeof(*tlv);
+
+		if (tlv_len > len) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI,
+				   "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+				   tlv_tag, ptr - begin, len, tlv_len);
+			return -EINVAL;
+		}
+
+		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+		    wmi_tlv_policies[tlv_tag].min_len &&
+		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI,
+				   "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+				   tlv_tag, ptr - begin, tlv_len,
+				   wmi_tlv_policies[tlv_tag].min_len);
+			return -EINVAL;
+		}
+
+		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
+		if (ret)
+			return ret;
+
+		ptr += tlv_len;
+		len -= tlv_len;
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
+				     const void *ptr, void *data)
+{
+	const void **tb = data;
+
+	if (tag < WMI_TLV_TAG_MAX)
+		tb[tag] = ptr;
+
+	return 0;
+}
+
+static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
+				const void *ptr, size_t len)
+{
+	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
+				   (void *)tb);
+}
+
+static const void **
+ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
+			   size_t len, gfp_t gfp)
+{
+	const void **tb;
+	int ret;
+
+	tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
+	if (!tb)
+		return ERR_PTR(-ENOMEM);
+
+	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
+	if (ret) {
+		kfree(tb);
+		return ERR_PTR(ret);
+	}
+
+	return tb;
+}
+
+static u16 ath10k_wmi_tlv_len(const void *ptr)
+{
+	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
+}
+
+/**************/
+/* TLV events */
+/**************/
+static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
+					      struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_tlv_bcn_tx_status_ev *ev;
+	struct ath10k_vif *arvif;
+	u32 vdev_id, tx_status;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	tx_status = __le32_to_cpu(ev->tx_status);
+	vdev_id = __le32_to_cpu(ev->vdev_id);
+
+	switch (tx_status) {
+	case WMI_TLV_BCN_TX_STATUS_OK:
+		break;
+	case WMI_TLV_BCN_TX_STATUS_XRETRY:
+	case WMI_TLV_BCN_TX_STATUS_DROP:
+	case WMI_TLV_BCN_TX_STATUS_FILTERED:
+		/* FIXME: It's probably worth telling mac80211 to stop the
+		 * interface as it is crippled.
+		 */
+		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
+			    vdev_id, tx_status);
+		break;
+	}
+
+	arvif = ath10k_get_arvif(ar, vdev_id);
+	if (arvif && arvif->is_up && arvif->vif->csa_active)
+		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
+					  struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_tlv_diag_data_ev *ev;
+	const struct wmi_tlv_diag_item *item;
+	const void *data;
+	int ret, num_items, len;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
+	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+	if (!ev || !data) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	num_items = __le32_to_cpu(ev->num_items);
+	len = ath10k_wmi_tlv_len(data);
+
+	while (num_items--) {
+		if (len == 0)
+			break;
+		if (len < sizeof(*item)) {
+			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
+			break;
+		}
+
+		item = data;
+
+		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
+			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
+			break;
+		}
+
+		trace_ath10k_wmi_diag_container(ar,
+						item->type,
+						__le32_to_cpu(item->timestamp),
+						__le32_to_cpu(item->code),
+						__le16_to_cpu(item->len),
+						item->payload);
+
+		len -= sizeof(*item);
+		len -= roundup(__le16_to_cpu(item->len), 4);
+
+		data += sizeof(*item);
+		data += roundup(__le16_to_cpu(item->len), 4);
+	}
+
+	if (num_items != -1 || len != 0)
+		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
+			    num_items, len);
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
+				     struct sk_buff *skb)
+{
+	const void **tb;
+	const void *data;
+	int ret, len;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+	if (!data) {
+		kfree(tb);
+		return -EPROTO;
+	}
+	len = ath10k_wmi_tlv_len(data);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
+	trace_ath10k_wmi_diag(ar, data, len);
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+					struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_tlv_p2p_noa_ev *ev;
+	const struct wmi_p2p_noa_info *noa;
+	int ret, vdev_id;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+	if (!ev || !noa) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	vdev_id = __le32_to_cpu(ev->vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+		   vdev_id, noa->num_descriptors);
+
+	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+					 struct sk_buff *skb)
+{
+	const void **tb;
+	const struct wmi_tlv_tx_pause_ev *ev;
+	int ret, vdev_id;
+	u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	pause_id = __le32_to_cpu(ev->pause_id);
+	action = __le32_to_cpu(ev->action);
+	vdev_map = __le32_to_cpu(ev->vdev_map);
+	peer_id = __le32_to_cpu(ev->peer_id);
+	tid_map = __le32_to_cpu(ev->tid_map);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+		   pause_id, action, vdev_map, peer_id, tid_map);
+
+	switch (pause_id) {
+	case WMI_TLV_TX_PAUSE_ID_MCC:
+	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PS:
+	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+		for (vdev_id = 0; vdev_map; vdev_id++) {
+			if (!(vdev_map & BIT(vdev_id)))
+				continue;
+
+			vdev_map &= ~BIT(vdev_id);
+			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+							action);
+		}
+		break;
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+	case WMI_TLV_TX_PAUSE_ID_HOST:
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unsupported tx pause id %d\n",
+			   pause_id);
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unknown tx pause vdev %d\n",
+			   pause_id);
+		break;
+	}
+
+	kfree(tb);
+	return 0;
+}
+
+/***********/
+/* TLV ops */
+/***********/
+
+static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_tlv_event_id id;
+	bool consumed;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+	/* Ready event must be handled normally also in UTF mode so that we
+	 * know the UTF firmware has booted, others we are just bypass WMI
+	 * events to testmode.
+	 */
+	if (consumed && id != WMI_TLV_READY_EVENTID) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi tlv testmode consumed 0x%x\n", id);
+		goto out;
+	}
+
+	switch (id) {
+	case WMI_TLV_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_TLV_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_TLV_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_TLV_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_TLV_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_TLV_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
+	case WMI_TLV_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_TLV_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_TLV_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_TLV_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_TLV_ROAM_EVENTID:
+		ath10k_wmi_event_roam(ar, skb);
+		break;
+	case WMI_TLV_PROFILE_MATCH:
+		ath10k_wmi_event_profile_match(ar, skb);
+		break;
+	case WMI_TLV_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_TLV_PDEV_QVIT_EVENTID:
+		ath10k_wmi_event_pdev_qvit(ar, skb);
+		break;
+	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
+		ath10k_wmi_event_wlan_profile_data(ar, skb);
+		break;
+	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_measurement_report(ar, skb);
+		break;
+	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_tsf_measurement_report(ar, skb);
+		break;
+	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_error_report(ar, skb);
+		break;
+	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_wmi_event_wow_wakeup_host(ar, skb);
+		break;
+	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
+		ath10k_wmi_event_dcs_interference(ar, skb);
+		break;
+	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
+		ath10k_wmi_event_pdev_tpc_config(ar, skb);
+		break;
+	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
+		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+		break;
+	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
+		ath10k_wmi_event_gtk_offload_status(ar, skb);
+		break;
+	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
+		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+		break;
+	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
+		ath10k_wmi_event_delba_complete(ar, skb);
+		break;
+	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
+		ath10k_wmi_event_addba_complete(ar, skb);
+		break;
+	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+		break;
+	case WMI_TLV_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_TLV_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
+		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
+		break;
+	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
+		ath10k_wmi_tlv_event_diag_data(ar, skb);
+		break;
+	case WMI_TLV_DIAG_EVENTID:
+		ath10k_wmi_tlv_event_diag(ar, skb);
+		break;
+	case WMI_TLV_P2P_NOA_EVENTID:
+		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+		break;
+	case WMI_TLV_TX_PAUSE_EVENTID:
+		ath10k_wmi_tlv_event_tx_pause(ar, skb);
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
+static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
+					  struct sk_buff *skb,
+					  struct wmi_scan_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_scan_event *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->event_type = ev->event_type;
+	arg->reason = ev->reason;
+	arg->channel_freq = ev->channel_freq;
+	arg->scan_req_id = ev->scan_req_id;
+	arg->scan_id = ev->scan_id;
+	arg->vdev_id = ev->vdev_id;
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
+					     struct sk_buff *skb,
+					     struct wmi_mgmt_rx_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_mgmt_rx_ev *ev;
+	const u8 *frame;
+	u32 msdu_len;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
+	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+	if (!ev || !frame) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->channel = ev->channel;
+	arg->buf_len = ev->buf_len;
+	arg->status = ev->status;
+	arg->snr = ev->snr;
+	arg->phy_mode = ev->phy_mode;
+	arg->rate = ev->rate;
+
+	msdu_len = __le32_to_cpu(arg->buf_len);
+
+	if (skb->len < (frame - skb->data) + msdu_len) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	/* shift the sk_buff to point to `frame` */
+	skb_trim(skb, 0);
+	skb_put(skb, frame - skb->data);
+	skb_pull(skb, frame - skb->data);
+	skb_put(skb, msdu_len);
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
+					     struct sk_buff *skb,
+					     struct wmi_ch_info_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_chan_info_event *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->err_code = ev->err_code;
+	arg->freq = ev->freq;
+	arg->cmd_flags = ev->cmd_flags;
+	arg->noise_floor = ev->noise_floor;
+	arg->rx_clear_count = ev->rx_clear_count;
+	arg->cycle_count = ev->cycle_count;
+
+	kfree(tb);
+	return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+				     struct wmi_vdev_start_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_vdev_start_response_event *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_id = ev->vdev_id;
+	arg->req_id = ev->req_id;
+	arg->resp_type = ev->resp_type;
+	arg->status = ev->status;
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
+					       struct sk_buff *skb,
+					       struct wmi_peer_kick_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_peer_sta_kickout_event *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->mac_addr = ev->peer_macaddr.addr;
+
+	kfree(tb);
+	return 0;
+}
+
+struct wmi_tlv_swba_parse {
+	const struct wmi_host_swba_event *ev;
+	bool tim_done;
+	bool noa_done;
+	size_t n_tim;
+	size_t n_noa;
+	struct wmi_swba_ev_arg *arg;
+};
+
+static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
+					 const void *ptr, void *data)
+{
+	struct wmi_tlv_swba_parse *swba = data;
+	struct wmi_tim_info_arg *tim_info_arg;
+	const struct wmi_tim_info *tim_info_ev = ptr;
+
+	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
+		return -EPROTO;
+
+	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
+		return -ENOBUFS;
+
+	if (__le32_to_cpu(tim_info_ev->tim_len) >
+	     sizeof(tim_info_ev->tim_bitmap)) {
+		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+		return -EPROTO;
+	}
+
+	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+	tim_info_arg->tim_len = tim_info_ev->tim_len;
+	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+	swba->n_tim++;
+
+	return 0;
+}
+
+static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
+					 const void *ptr, void *data)
+{
+	struct wmi_tlv_swba_parse *swba = data;
+
+	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
+		return -EPROTO;
+
+	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
+		return -ENOBUFS;
+
+	swba->arg->noa_info[swba->n_noa++] = ptr;
+	return 0;
+}
+
+static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
+				     const void *ptr, void *data)
+{
+	struct wmi_tlv_swba_parse *swba = data;
+	int ret;
+
+	switch (tag) {
+	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
+		swba->ev = ptr;
+		break;
+	case WMI_TLV_TAG_ARRAY_STRUCT:
+		if (!swba->tim_done) {
+			swba->tim_done = true;
+			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+						  ath10k_wmi_tlv_swba_tim_parse,
+						  swba);
+			if (ret)
+				return ret;
+		} else if (!swba->noa_done) {
+			swba->noa_done = true;
+			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+						  ath10k_wmi_tlv_swba_noa_parse,
+						  swba);
+			if (ret)
+				return ret;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
+					  struct sk_buff *skb,
+					  struct wmi_swba_ev_arg *arg)
+{
+	struct wmi_tlv_swba_parse swba = { .arg = arg };
+	u32 map;
+	size_t n_vdevs;
+	int ret;
+
+	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+				  ath10k_wmi_tlv_swba_parse, &swba);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	if (!swba.ev)
+		return -EPROTO;
+
+	arg->vdev_map = swba.ev->vdev_map;
+
+	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
+		if (map & BIT(0))
+			n_vdevs++;
+
+	if (n_vdevs != swba.n_tim ||
+	    n_vdevs != swba.n_noa)
+		return -EPROTO;
+
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+						struct sk_buff *skb,
+						struct wmi_phyerr_hdr_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_phyerr_ev *ev;
+	const void *phyerrs;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
+	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+	if (!ev || !phyerrs) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = __le32_to_cpu(ev->buf_len);
+	arg->phyerrs = phyerrs;
+
+	kfree(tb);
+	return 0;
+}
+
+#define WMI_TLV_ABI_VER_NS0 0x5F414351
+#define WMI_TLV_ABI_VER_NS1 0x00004C4D
+#define WMI_TLV_ABI_VER_NS2 0x00000000
+#define WMI_TLV_ABI_VER_NS3 0x00000000
+
+#define WMI_TLV_ABI_VER0_MAJOR 1
+#define WMI_TLV_ABI_VER0_MINOR 0
+#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
+			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
+#define WMI_TLV_ABI_VER1 53
+
+static int
+ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
+			      const void *ptr, void *data)
+{
+	struct wmi_svc_rdy_ev_arg *arg = data;
+	int i;
+
+	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
+		return -EPROTO;
+
+	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
+		if (!arg->mem_reqs[i]) {
+			arg->mem_reqs[i] = ptr;
+			return 0;
+		}
+	}
+
+	return -ENOMEM;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
+					     struct sk_buff *skb,
+					     struct wmi_svc_rdy_ev_arg *arg)
+{
+	const void **tb;
+	const struct hal_reg_capabilities *reg;
+	const struct wmi_tlv_svc_rdy_ev *ev;
+	const __le32 *svc_bmap;
+	const struct wlan_host_mem_req *mem_reqs;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
+	reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
+	svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
+	mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+
+	if (!ev || !reg || !svc_bmap || !mem_reqs) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	/* This is an internal ABI compatibility check for WMI TLV so check it
+	 * here instead of the generic WMI code.
+	 */
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
+		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
+		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
+		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
+		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
+		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
+
+	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
+	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
+	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
+	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
+	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
+		kfree(tb);
+		return -ENOTSUPP;
+	}
+
+	arg->min_tx_power = ev->hw_min_tx_power;
+	arg->max_tx_power = ev->hw_max_tx_power;
+	arg->ht_cap = ev->ht_cap_info;
+	arg->vht_cap = ev->vht_cap_info;
+	arg->sw_ver0 = ev->abi.abi_ver0;
+	arg->sw_ver1 = ev->abi.abi_ver1;
+	arg->fw_build = ev->fw_build_vers;
+	arg->phy_capab = ev->phy_capability;
+	arg->num_rf_chains = ev->num_rf_chains;
+	arg->eeprom_rd = reg->eeprom_rd;
+	arg->num_mem_reqs = ev->num_mem_reqs;
+	arg->service_map = svc_bmap;
+	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+
+	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
+				  ath10k_wmi_tlv_parse_mem_reqs, arg);
+	if (ret) {
+		kfree(tb);
+		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
+		return ret;
+	}
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
+					 struct sk_buff *skb,
+					 struct wmi_rdy_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_rdy_ev *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->sw_version = ev->abi.abi_ver0;
+	arg->abi_version = ev->abi.abi_ver1;
+	arg->status = ev->status;
+	arg->mac_addr = ev->mac_addr.addr;
+
+	kfree(tb);
+	return 0;
+}
+
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+					   struct ath10k_fw_stats_vdev *dst)
+{
+	int i;
+
+	dst->vdev_id = __le32_to_cpu(src->vdev_id);
+	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+	dst->data_snr = __le32_to_cpu(src->data_snr);
+	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+		dst->num_tx_frames[i] =
+			__le32_to_cpu(src->num_tx_frames[i]);
+
+	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+		dst->num_tx_frames_retries[i] =
+			__le32_to_cpu(src->num_tx_frames_retries[i]);
+
+	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+		dst->num_tx_frames_failures[i] =
+			__le32_to_cpu(src->num_tx_frames_failures[i]);
+
+	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+		dst->tx_rate_history[i] =
+			__le32_to_cpu(src->tx_rate_history[i]);
+
+	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+		dst->beacon_rssi_history[i] =
+			__le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+					   struct sk_buff *skb,
+					   struct ath10k_fw_stats *stats)
+{
+	const void **tb;
+	const struct wmi_tlv_stats_ev *ev;
+	const void *data;
+	u32 num_pdev_stats;
+	u32 num_vdev_stats;
+	u32 num_peer_stats;
+	u32 num_bcnflt_stats;
+	u32 num_chan_stats;
+	size_t data_len;
+	int ret;
+	int i;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
+	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+	if (!ev || !data) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	data_len = ath10k_wmi_tlv_len(data);
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+		   num_pdev_stats, num_vdev_stats, num_peer_stats,
+		   num_bcnflt_stats, num_chan_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = data;
+		if (data_len < sizeof(*src))
+			return -EPROTO;
+
+		data += sizeof(*src);
+		data_len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	for (i = 0; i < num_vdev_stats; i++) {
+		const struct wmi_tlv_vdev_stats *src;
+		struct ath10k_fw_stats_vdev *dst;
+
+		src = data;
+		if (data_len < sizeof(*src))
+			return -EPROTO;
+
+		data += sizeof(*src);
+		data_len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+		list_add_tail(&dst->list, &stats->vdevs);
+	}
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10x_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = data;
+		if (data_len < sizeof(*src))
+			return -EPROTO;
+
+		data += sizeof(*src);
+		data_len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_peer_stats(&src->old, dst);
+		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	kfree(tb);
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+					  struct sk_buff *skb,
+					  struct wmi_roam_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_roam_ev *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->vdev_id = ev->vdev_id;
+	arg->reason = ev->reason;
+	arg->rssi = ev->rssi;
+
+	kfree(tb);
+	return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+			      struct wmi_wow_ev_arg *arg)
+{
+	const void **tb;
+	const struct wmi_tlv_wow_event_info *ev;
+	int ret;
+
+	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+		return ret;
+	}
+
+	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+	if (!ev) {
+		kfree(tb);
+		return -EPROTO;
+	}
+
+	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+	arg->flag = __le32_to_cpu(ev->flag);
+	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+	arg->data_len = __le32_to_cpu(ev->data_len);
+
+	kfree(tb);
+	return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
+{
+	struct wmi_tlv_pdev_suspend *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->opt = __cpu_to_le32(opt);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
+{
+	struct wmi_tlv_resume_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->reserved = __cpu_to_le32(0);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
+				  u16 rd, u16 rd2g, u16 rd5g,
+				  u16 ctl2g, u16 ctl5g,
+				  enum wmi_dfs_region dfs_reg)
+{
+	struct wmi_tlv_pdev_set_rd_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->regd = __cpu_to_le32(rd);
+	cmd->regd_2ghz = __cpu_to_le32(rd2g);
+	cmd->regd_5ghz = __cpu_to_le32(rd5g);
+	cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
+	cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
+	return skb;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+	return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
+				     u32 param_value)
+{
+	struct wmi_tlv_pdev_set_param_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->param_id = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(param_value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
+	return skb;
+}
+
+static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	struct wmi_tlv_init_cmd *cmd;
+	struct wmi_tlv_resource_config *cfg;
+	struct wmi_host_mem_chunks *chunks;
+	size_t len, chunks_len;
+	void *ptr;
+
+	chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
+	len = (sizeof(*tlv) + sizeof(*cmd)) +
+	      (sizeof(*tlv) + sizeof(*cfg)) +
+	      (sizeof(*tlv) + chunks_len);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = skb->data;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
+	tlv->len = __cpu_to_le16(sizeof(*cfg));
+	cfg = (void *)tlv->value;
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cfg);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(chunks_len);
+	chunks = (void *)tlv->value;
+
+	ptr += sizeof(*tlv);
+	ptr += chunks_len;
+
+	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
+	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
+	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
+	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
+	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
+	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
+	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+	cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
+		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+	} else {
+		cfg->num_offload_peers = __cpu_to_le32(0);
+		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
+	}
+
+	cfg->num_peer_keys = __cpu_to_le32(2);
+	cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+	cfg->ast_skid_limit = __cpu_to_le32(0x10);
+	cfg->tx_chain_mask = __cpu_to_le32(0x7);
+	cfg->rx_chain_mask = __cpu_to_le32(0x7);
+	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
+	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
+	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
+	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
+	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
+	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
+	cfg->num_mcast_groups = __cpu_to_le32(0);
+	cfg->num_mcast_table_elems = __cpu_to_le32(0);
+	cfg->mcast2ucast_mode = __cpu_to_le32(0);
+	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
+	cfg->num_wds_entries = __cpu_to_le32(0x20);
+	cfg->dma_burst_size = __cpu_to_le32(0);
+	cfg->mac_aggr_delim = __cpu_to_le32(0);
+	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
+	cfg->vow_config = __cpu_to_le32(0);
+	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
+	cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
+	cfg->max_frag_entries = __cpu_to_le32(2);
+	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
+	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
+	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
+	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
+	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
+	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
+	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
+	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
+	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+
+	ath10k_wmi_put_host_mem_chunks(ar, chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
+				 const struct wmi_start_scan_arg *arg)
+{
+	struct wmi_tlv_start_scan_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len, chan_len, ssid_len, bssid_len, ie_len;
+	__le32 *chans;
+	struct wmi_ssid *ssids;
+	struct wmi_mac_addr *addrs;
+	void *ptr;
+	int i, ret;
+
+	ret = ath10k_wmi_start_scan_verify(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	chan_len = arg->n_channels * sizeof(__le32);
+	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
+	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
+	ie_len = roundup(arg->ie_len, 4);
+	len = (sizeof(*tlv) + sizeof(*cmd)) +
+	      (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
+	      (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
+	      (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
+	      (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
+	cmd->num_channels = __cpu_to_le32(arg->n_channels);
+	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
+	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
+	cmd->ie_len = __cpu_to_le32(arg->ie_len);
+	cmd->num_probes = __cpu_to_le32(3);
+
+	/* FIXME: There are some scan flag inconsistencies across firmwares,
+	 * e.g. WMI-TLV inverts the logic behind the following flag.
+	 */
+	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(chan_len);
+	chans = (void *)tlv->value;
+	for (i = 0; i < arg->n_channels; i++)
+		chans[i] = __cpu_to_le32(arg->channels[i]);
+
+	ptr += sizeof(*tlv);
+	ptr += chan_len;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+	tlv->len = __cpu_to_le16(ssid_len);
+	ssids = (void *)tlv->value;
+	for (i = 0; i < arg->n_ssids; i++) {
+		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
+		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
+	}
+
+	ptr += sizeof(*tlv);
+	ptr += ssid_len;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+	tlv->len = __cpu_to_le16(bssid_len);
+	addrs = (void *)tlv->value;
+	for (i = 0; i < arg->n_bssids; i++)
+		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
+
+	ptr += sizeof(*tlv);
+	ptr += bssid_len;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(ie_len);
+	memcpy(tlv->value, arg->ie, arg->ie_len);
+
+	ptr += sizeof(*tlv);
+	ptr += ie_len;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
+				const struct wmi_stop_scan_arg *arg)
+{
+	struct wmi_stop_scan_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	u32 scan_id;
+	u32 req_id;
+
+	if (arg->req_id > 0xFFF)
+		return ERR_PTR(-EINVAL);
+	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	scan_id = arg->u.scan_id;
+	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+	req_id = arg->req_id;
+	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->req_type = __cpu_to_le32(arg->req_type);
+	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+	cmd->scan_id = __cpu_to_le32(scan_id);
+	cmd->scan_req_id = __cpu_to_le32(req_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
+				  u32 vdev_id,
+				  enum wmi_vdev_type vdev_type,
+				  enum wmi_vdev_subtype vdev_subtype,
+				  const u8 mac_addr[ETH_ALEN])
+{
+	struct wmi_vdev_create_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->vdev_type = __cpu_to_le32(vdev_type);
+	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
+	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+	struct wmi_vdev_delete_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
+				 const struct wmi_vdev_start_request_arg *arg,
+				 bool restart)
+{
+	struct wmi_tlv_vdev_start_cmd *cmd;
+	struct wmi_channel *ch;
+	struct wmi_p2p_noa_descriptor *noa;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+	void *ptr;
+	u32 flags = 0;
+
+	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+		return ERR_PTR(-EINVAL);
+
+	len = (sizeof(*tlv) + sizeof(*cmd)) +
+	      (sizeof(*tlv) + sizeof(*ch)) +
+	      (sizeof(*tlv) + 0);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	if (arg->hidden_ssid)
+		flags |= WMI_VDEV_START_HIDDEN_SSID;
+	if (arg->pmf_enabled)
+		flags |= WMI_VDEV_START_PMF_ENABLED;
+
+	ptr = (void *)skb->data;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
+	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+	cmd->flags = __cpu_to_le32(flags);
+	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+
+	if (arg->ssid) {
+		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+	}
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+	tlv->len = __cpu_to_le16(sizeof(*ch));
+	ch = (void *)tlv->value;
+	ath10k_wmi_put_wmi_channel(ch, &arg->channel);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*ch);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = 0;
+	noa = (void *)tlv->value;
+
+	/* Note: This is a nested TLV containing:
+	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+	 */
+
+	ptr += sizeof(*tlv);
+	ptr += 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+	struct wmi_vdev_stop_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+			      const u8 *bssid)
+
+{
+	struct wmi_vdev_up_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->vdev_assoc_id = __cpu_to_le32(aid);
+	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+	struct wmi_vdev_down_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+				     u32 param_id, u32 param_value)
+{
+	struct wmi_vdev_set_param_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->param_id = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(param_value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
+				       const struct wmi_vdev_install_key_arg *arg)
+{
+	struct wmi_vdev_install_key_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+	void *ptr;
+
+	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+		return ERR_PTR(-EINVAL);
+	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+		return ERR_PTR(-EINVAL);
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	cmd->key_idx = __cpu_to_le32(arg->key_idx);
+	cmd->key_flags = __cpu_to_le32(arg->key_flags);
+	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+	cmd->key_len = __cpu_to_le32(arg->key_len);
+	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+	if (arg->macaddr)
+		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
+	if (arg->key_data)
+		memcpy(tlv->value, arg->key_data, arg->key_len);
+
+	ptr += sizeof(*tlv);
+	ptr += roundup(arg->key_len, sizeof(__le32));
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
+	return skb;
+}
+
+static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
+					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
+{
+	struct wmi_sta_uapsd_auto_trig_param *ac;
+	struct wmi_tlv *tlv;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
+	tlv->len = __cpu_to_le16(sizeof(*ac));
+	ac = (void *)tlv->value;
+
+	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
+	ac->user_priority = __cpu_to_le32(arg->user_priority);
+	ac->service_interval = __cpu_to_le32(arg->service_interval);
+	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
+	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
+		   ac->wmm_ac, ac->user_priority, ac->service_interval,
+		   ac->suspend_interval, ac->delay_interval);
+
+	return ptr + sizeof(*tlv) + sizeof(*ac);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+				     const u8 peer_addr[ETH_ALEN],
+				     const struct wmi_sta_uapsd_auto_trig_arg *args,
+				     u32 num_ac)
+{
+	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
+	struct wmi_sta_uapsd_auto_trig_param *ac;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+	size_t ac_tlv_len;
+	void *ptr;
+	int i;
+
+	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + ac_tlv_len;
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->num_ac = __cpu_to_le32(num_ac);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(ac_tlv_len);
+	ac = (void *)tlv->value;
+
+	ptr += sizeof(*tlv);
+	for (i = 0; i < num_ac; i++)
+		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
+	return skb;
+}
+
+static void *ath10k_wmi_tlv_put_wmm(void *ptr,
+				    const struct wmi_wmm_params_arg *arg)
+{
+	struct wmi_wmm_params *wmm;
+	struct wmi_tlv *tlv;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
+	tlv->len = __cpu_to_le16(sizeof(*wmm));
+	wmm = (void *)tlv->value;
+	ath10k_wmi_set_wmm_param(wmm, arg);
+
+	return ptr + sizeof(*tlv) + sizeof(*wmm);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+				    const struct wmi_wmm_params_all_arg *arg)
+{
+	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+	void *ptr;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
+				    const struct wmi_sta_keepalive_arg *arg)
+{
+	struct wmi_tlv_sta_keepalive_cmd *cmd;
+	struct wmi_sta_keepalive_arp_resp *arp;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + sizeof(*arp);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	cmd->enabled = __cpu_to_le32(arg->enabled);
+	cmd->method = __cpu_to_le32(arg->method);
+	cmd->interval = __cpu_to_le32(arg->interval);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
+	tlv->len = __cpu_to_le16(sizeof(*arp));
+	arp = (void *)tlv->value;
+
+	arp->src_ip4_addr = arg->src_ip4_addr;
+	arp->dest_ip4_addr = arg->dest_ip4_addr;
+	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
+		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+				  const u8 peer_addr[ETH_ALEN],
+				  enum wmi_peer_type peer_type)
+{
+	struct wmi_tlv_peer_create_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->peer_type = __cpu_to_le32(peer_type);
+	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+				  const u8 peer_addr[ETH_ALEN])
+{
+	struct wmi_peer_delete_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+	struct wmi_peer_flush_tids_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+				     const u8 *peer_addr,
+				     enum wmi_peer_param param_id,
+				     u32 param_value)
+{
+	struct wmi_peer_set_param_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->param_id = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(param_value);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
+				 const struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct wmi_tlv_peer_assoc_cmd *cmd;
+	struct wmi_vht_rate_set *vht_rate;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len, legacy_rate_len, ht_rate_len;
+	void *ptr;
+
+	if (arg->peer_mpdu_density > 16)
+		return ERR_PTR(-EINVAL);
+	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+		return ERR_PTR(-EINVAL);
+	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+		return ERR_PTR(-EINVAL);
+
+	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
+				  sizeof(__le32));
+	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
+	len = (sizeof(*tlv) + sizeof(*cmd)) +
+	      (sizeof(*tlv) + legacy_rate_len) +
+	      (sizeof(*tlv) + ht_rate_len) +
+	      (sizeof(*tlv) + sizeof(*vht_rate));
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
+	cmd->flags = __cpu_to_le32(arg->peer_flags);
+	cmd->caps = __cpu_to_le32(arg->peer_caps);
+	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
+	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
+	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(legacy_rate_len);
+	memcpy(tlv->value, arg->peer_legacy_rates.rates,
+	       arg->peer_legacy_rates.num_rates);
+
+	ptr += sizeof(*tlv);
+	ptr += legacy_rate_len;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(ht_rate_len);
+	memcpy(tlv->value, arg->peer_ht_rates.rates,
+	       arg->peer_ht_rates.num_rates);
+
+	ptr += sizeof(*tlv);
+	ptr += ht_rate_len;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
+	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
+	vht_rate = (void *)tlv->value;
+
+	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*vht_rate);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+				 enum wmi_sta_ps_mode psmode)
+{
+	struct wmi_sta_powersave_mode_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+				 enum wmi_sta_powersave_param param_id,
+				 u32 param_value)
+{
+	struct wmi_sta_powersave_param_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->param_id = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(param_value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+				enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+	struct wmi_ap_ps_peer_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	if (!mac)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->param_id = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(value);
+	ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
+				     const struct wmi_scan_chan_list_arg *arg)
+{
+	struct wmi_tlv_scan_chan_list_cmd *cmd;
+	struct wmi_channel *ci;
+	struct wmi_channel_arg *ch;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t chans_len, len;
+	int i;
+	void *ptr, *chans;
+
+	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
+	len = (sizeof(*tlv) + sizeof(*cmd)) +
+	      (sizeof(*tlv) + chans_len);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(chans_len);
+	chans = (void *)tlv->value;
+
+	for (i = 0; i < arg->n_channels; i++) {
+		ch = &arg->channels[i];
+
+		tlv = chans;
+		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+		tlv->len = __cpu_to_le16(sizeof(*ci));
+		ci = (void *)tlv->value;
+
+		ath10k_wmi_put_wmi_channel(ci, ch);
+
+		chans += sizeof(*tlv);
+		chans += sizeof(*ci);
+	}
+
+	ptr += sizeof(*tlv);
+	ptr += chans_len;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
+				 const void *bcn, size_t bcn_len,
+				 u32 bcn_paddr, bool dtim_zero,
+				 bool deliver_cab)
+
+{
+	struct wmi_bcn_tx_ref_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	hdr = (struct ieee80211_hdr *)bcn;
+	fc = le16_to_cpu(hdr->frame_control);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->data_len = __cpu_to_le32(bcn_len);
+	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+	cmd->msdu_id = 0;
+	cmd->frame_control = __cpu_to_le32(fc);
+	cmd->flags = 0;
+
+	if (dtim_zero)
+		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+	if (deliver_cab)
+		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
+				   const struct wmi_wmm_params_all_arg *arg)
+{
+	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
+	struct wmi_wmm_params *wmm;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+	void *ptr;
+
+	len = (sizeof(*tlv) + sizeof(*cmd)) +
+	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	/* nothing to set here */
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
+	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
+	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
+	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+	struct wmi_request_stats_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->stats_id = __cpu_to_le32(stats_mask);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
+				    enum wmi_force_fw_hang_type type,
+				    u32 delay_ms)
+{
+	struct wmi_force_fw_hang_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (void *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->type = __cpu_to_le32(type);
+	cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+				 u32 log_level) {
+	struct wmi_tlv_dbglog_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len, bmap_len;
+	u32 value;
+	void *ptr;
+
+	if (module_enable) {
+		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+				module_enable,
+				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
+	} else {
+		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+				WMI_TLV_DBGLOG_ALL_MODULES,
+				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
+	}
+
+	bmap_len = 0;
+	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
+	cmd->value = __cpu_to_le32(value);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(bmap_len);
+
+	/* nothing to do here */
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(bmap_len);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+	struct wmi_tlv_pktlog_enable *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->filter = __cpu_to_le32(filter);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
+		   filter);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
+{
+	struct wmi_tlv_pktlog_disable *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
+			       u32 tim_ie_offset, struct sk_buff *bcn,
+			       u32 prb_caps, u32 prb_erp, void *prb_ies,
+			       size_t prb_ies_len)
+{
+	struct wmi_tlv_bcn_tmpl_cmd *cmd;
+	struct wmi_tlv_bcn_prb_info *info;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
+		return ERR_PTR(-EINVAL);
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
+	      sizeof(*tlv) + roundup(bcn->len, 4);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
+	cmd->buf_len = __cpu_to_le32(bcn->len);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
+	 * then it is then impossible to pass original ie len.
+	 * This chunk is not used yet so if setting probe resp template yields
+	 * problems with beaconing or crashes firmware look here.
+	 */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
+	info = (void *)tlv->value;
+	info->caps = __cpu_to_le32(prb_caps);
+	info->erp = __cpu_to_le32(prb_erp);
+	memcpy(info->ies, prb_ies, prb_ies_len);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*info);
+	ptr += prb_ies_len;
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
+	memcpy(tlv->value, bcn->data, bcn->len);
+
+	/* FIXME: Adjust TSF? */
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
+		   vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
+			       struct sk_buff *prb)
+{
+	struct wmi_tlv_prb_tmpl_cmd *cmd;
+	struct wmi_tlv_bcn_prb_info *info;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + sizeof(*info) +
+	      sizeof(*tlv) + roundup(prb->len, 4);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->buf_len = __cpu_to_le32(prb->len);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+	tlv->len = __cpu_to_le16(sizeof(*info));
+	info = (void *)tlv->value;
+	info->caps = 0;
+	info->erp = 0;
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*info);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
+	memcpy(tlv->value, prb->data, prb->len);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
+		   vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
+				    const u8 *p2p_ie)
+{
+	struct wmi_tlv_p2p_go_bcn_ie *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
+	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
+
+	ptr += sizeof(*tlv);
+	ptr += roundup(p2p_ie[1] + 2, 4);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
+		   vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+					   enum wmi_tdls_state state)
+{
+	struct wmi_tdls_set_state_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+	/* Set to options from wmi_tlv_tdls_options,
+	 * for now none of them are enabled.
+	 */
+	u32 options = 0;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->state = __cpu_to_le32(state);
+	cmd->notification_interval_ms = __cpu_to_le32(5000);
+	cmd->tx_discovery_threshold = __cpu_to_le32(100);
+	cmd->tx_teardown_threshold = __cpu_to_le32(5);
+	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+	cmd->rssi_delta = __cpu_to_le32(-20);
+	cmd->tdls_options = __cpu_to_le32(options);
+	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+		   state, vdev_id);
+	return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+	u32 peer_qos = 0;
+
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+	return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+				       const struct wmi_tdls_peer_update_cmd_arg *arg,
+				       const struct wmi_tdls_peer_capab_arg *cap,
+				       const struct wmi_channel_arg *chan_arg)
+{
+	struct wmi_tdls_peer_update_cmd *cmd;
+	struct wmi_tdls_peer_capab *peer_cap;
+	struct wmi_channel *chan;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	u32 peer_qos;
+	void *ptr;
+	int len;
+	int i;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) + sizeof(*peer_cap) +
+	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+	cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+	peer_cap = (void *)tlv->value;
+	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+						   cap->peer_max_sp);
+	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*peer_cap);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+	ptr += sizeof(*tlv);
+
+	for (i = 0; i < cap->peer_chan_len; i++) {
+		tlv = ptr;
+		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+		tlv->len = __cpu_to_le16(sizeof(*chan));
+		chan = (void *)tlv->value;
+		ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+		ptr += sizeof(*tlv);
+		ptr += sizeof(*chan);
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+	struct wmi_tlv_wow_enable_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->enable = __cpu_to_le32(1);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+					   u32 vdev_id,
+					   enum wmi_wow_wakeup_event event,
+					   u32 enable)
+{
+	struct wmi_tlv_wow_add_del_event_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->is_add = __cpu_to_le32(enable);
+	cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+		   wow_wakeup_event(event), enable, vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+	struct wmi_tlv_wow_host_wakeup_ind *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+				      u32 pattern_id, const u8 *pattern,
+				      const u8 *bitmask, int pattern_len,
+				      int pattern_offset)
+{
+	struct wmi_tlv_wow_add_pattern_cmd *cmd;
+	struct wmi_tlv_wow_bitmap_pattern *bitmap;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd) +
+	      sizeof(*tlv) +			/* array struct */
+	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
+	      sizeof(*tlv) +			/* empty ipv4 sync */
+	      sizeof(*tlv) +			/* empty ipv6 sync */
+	      sizeof(*tlv) +			/* empty magic */
+	      sizeof(*tlv) +			/* empty info timeout */
+	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	/* cmd */
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->pattern_id = __cpu_to_le32(pattern_id);
+	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	/* bitmap */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+	ptr += sizeof(*tlv);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+	tlv->len = __cpu_to_le16(sizeof(*bitmap));
+	bitmap = (void *)tlv->value;
+
+	memcpy(bitmap->patternbuf, pattern, pattern_len);
+	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+	bitmap->pattern_len = __cpu_to_le32(pattern_len);
+	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+	bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*bitmap);
+
+	/* ipv4 sync */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* ipv6 sync */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* magic */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* pattern info timeout */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(0);
+
+	ptr += sizeof(*tlv);
+
+	/* ratelimit interval */
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+	tlv->len = __cpu_to_le16(sizeof(u32));
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+		   vdev_id, pattern_id, pattern_offset);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+				      u32 pattern_id)
+{
+	struct wmi_tlv_wow_del_pattern_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->pattern_id = __cpu_to_le32(pattern_id);
+	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+		   vdev_id, pattern_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+	struct wmi_tlv_adaptive_qcs *cmd;
+	struct wmi_tlv *tlv;
+	struct sk_buff *skb;
+	void *ptr;
+	size_t len;
+
+	len = sizeof(*tlv) + sizeof(*cmd);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+	return skb;
+}
+
+/****************/
+/* TLV mappings */
+/****************/
+
+static struct wmi_cmd_map wmi_tlv_cmd_map = {
+	.init_cmdid = WMI_TLV_INIT_CMDID,
+	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
+	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
+	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
+	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
+	.network_list_offload_config_cmdid =
+				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
+	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
+	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
+	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
+	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
+	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
+	.echo_cmdid = WMI_TLV_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
+	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
+	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
+	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
+	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+	.dcs = WMI_TLV_PDEV_PARAM_DCS,
+	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
+	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
+	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
+	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_TLV_VDEV_PARAM_WDS,
+	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_TLV_VDEV_PARAM_SGI,
+	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_TLV_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
+	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static const struct wmi_ops wmi_tlv_ops = {
+	.rx = ath10k_wmi_tlv_op_rx,
+	.map_svc = wmi_tlv_svc_map,
+
+	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
+	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
+
+	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
+	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
+	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
+	.gen_init = ath10k_wmi_tlv_op_gen_init,
+	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
+	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
+	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
+	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
+	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
+	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
+	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
+	/* .gen_mgmt_tx = not implemented; HTT is used */
+	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
+	/* .gen_pdev_set_quiet_mode not implemented */
+	/* .gen_pdev_get_temperature not implemented */
+	/* .gen_addba_clear_resp not implemented */
+	/* .gen_addba_send not implemented */
+	/* .gen_addba_set_resp not implemented */
+	/* .gen_delba_send not implemented */
+	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
+	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
+	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
+	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
+	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+};
+
+/************/
+/* TLV init */
+/************/
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar)
+{
+	ar->wmi.cmd = &wmi_tlv_cmd_map;
+	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
+	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+	ar->wmi.ops = &wmi_tlv_ops;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
new file mode 100644
index 0000000..ad655c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -0,0 +1,1627 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WMI_TLV_H
+#define _WMI_TLV_H
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_tlv_grp_id {
+	WMI_TLV_GRP_START = 0x3,
+	WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
+	WMI_TLV_GRP_PDEV,
+	WMI_TLV_GRP_VDEV,
+	WMI_TLV_GRP_PEER,
+	WMI_TLV_GRP_MGMT,
+	WMI_TLV_GRP_BA_NEG,
+	WMI_TLV_GRP_STA_PS,
+	WMI_TLV_GRP_DFS,
+	WMI_TLV_GRP_ROAM,
+	WMI_TLV_GRP_OFL_SCAN,
+	WMI_TLV_GRP_P2P,
+	WMI_TLV_GRP_AP_PS,
+	WMI_TLV_GRP_RATECTL,
+	WMI_TLV_GRP_PROFILE,
+	WMI_TLV_GRP_SUSPEND,
+	WMI_TLV_GRP_BCN_FILTER,
+	WMI_TLV_GRP_WOW,
+	WMI_TLV_GRP_RTT,
+	WMI_TLV_GRP_SPECTRAL,
+	WMI_TLV_GRP_STATS,
+	WMI_TLV_GRP_ARP_NS_OFL,
+	WMI_TLV_GRP_NLO_OFL,
+	WMI_TLV_GRP_GTK_OFL,
+	WMI_TLV_GRP_CSA_OFL,
+	WMI_TLV_GRP_CHATTER,
+	WMI_TLV_GRP_TID_ADDBA,
+	WMI_TLV_GRP_MISC,
+	WMI_TLV_GRP_GPIO,
+	WMI_TLV_GRP_FWTEST,
+	WMI_TLV_GRP_TDLS,
+	WMI_TLV_GRP_RESMGR,
+	WMI_TLV_GRP_STA_SMPS,
+	WMI_TLV_GRP_WLAN_HB,
+	WMI_TLV_GRP_RMC,
+	WMI_TLV_GRP_MHF_OFL,
+	WMI_TLV_GRP_LOCATION_SCAN,
+	WMI_TLV_GRP_OEM,
+	WMI_TLV_GRP_NAN,
+	WMI_TLV_GRP_COEX,
+	WMI_TLV_GRP_OBSS_OFL,
+	WMI_TLV_GRP_LPI,
+	WMI_TLV_GRP_EXTSCAN,
+	WMI_TLV_GRP_DHCP_OFL,
+	WMI_TLV_GRP_IPA,
+	WMI_TLV_GRP_MDNS_OFL,
+	WMI_TLV_GRP_SAP_OFL,
+};
+
+enum wmi_tlv_cmd_id {
+	WMI_TLV_INIT_CMDID = 0x1,
+	WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
+	WMI_TLV_STOP_SCAN_CMDID,
+	WMI_TLV_SCAN_CHAN_LIST_CMDID,
+	WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+	WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
+	WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+	WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
+	WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+	WMI_TLV_PDEV_SET_PARAM_CMDID,
+	WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+	WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_TLV_PDEV_DUMP_CMDID,
+	WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
+	WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+	WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
+	WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
+	WMI_TLV_VDEV_DELETE_CMDID,
+	WMI_TLV_VDEV_START_REQUEST_CMDID,
+	WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+	WMI_TLV_VDEV_UP_CMDID,
+	WMI_TLV_VDEV_STOP_CMDID,
+	WMI_TLV_VDEV_DOWN_CMDID,
+	WMI_TLV_VDEV_SET_PARAM_CMDID,
+	WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+	WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
+	WMI_TLV_VDEV_WMM_ADDTS_CMDID,
+	WMI_TLV_VDEV_WMM_DELTS_CMDID,
+	WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+	WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
+	WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+	WMI_TLV_VDEV_PLMREQ_START_CMDID,
+	WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
+	WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
+	WMI_TLV_PEER_DELETE_CMDID,
+	WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+	WMI_TLV_PEER_SET_PARAM_CMDID,
+	WMI_TLV_PEER_ASSOC_CMDID,
+	WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_TLV_PEER_MCAST_GROUP_CMDID,
+	WMI_TLV_PEER_INFO_REQ_CMDID,
+	WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+	WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
+	WMI_TLV_PDEV_SEND_BCN_CMDID,
+	WMI_TLV_BCN_TMPL_CMDID,
+	WMI_TLV_BCN_FILTER_RX_CMDID,
+	WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+	WMI_TLV_MGMT_TX_CMDID,
+	WMI_TLV_PRB_TMPL_CMDID,
+	WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
+	WMI_TLV_ADDBA_SEND_CMDID,
+	WMI_TLV_ADDBA_STATUS_CMDID,
+	WMI_TLV_DELBA_SEND_CMDID,
+	WMI_TLV_ADDBA_SET_RESP_CMDID,
+	WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+	WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
+	WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+	WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+	WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
+	WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+	WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
+	WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
+	WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
+	WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_TLV_ROAM_SCAN_PERIOD,
+	WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_TLV_ROAM_AP_PROFILE,
+	WMI_TLV_ROAM_CHAN_LIST,
+	WMI_TLV_ROAM_SCAN_CMD,
+	WMI_TLV_ROAM_SYNCH_COMPLETE,
+	WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
+	WMI_TLV_ROAM_INVOKE_CMDID,
+	WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
+	WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_TLV_OFL_SCAN_PERIOD,
+	WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
+	WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_TLV_P2P_GO_SET_BEACON_IE,
+	WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+	WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+	WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+	WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
+	WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+	WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
+	WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
+	WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+	WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
+	WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
+	WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
+	WMI_TLV_PDEV_RESUME_CMDID,
+	WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
+	WMI_TLV_RMV_BCN_FILTER_CMDID,
+	WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
+	WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_TLV_WOW_ENABLE_CMDID,
+	WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
+	WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
+	WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
+	WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
+	WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
+	WMI_TLV_EXTWOW_ENABLE_CMDID,
+	WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+	WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+	WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
+	WMI_TLV_RTT_TSF_CMDID,
+	WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
+	WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
+	WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
+	WMI_TLV_REQUEST_STATS_EXT_CMDID,
+	WMI_TLV_REQUEST_LINK_STATS_CMDID,
+	WMI_TLV_START_LINK_STATS_CMDID,
+	WMI_TLV_CLEAR_LINK_STATS_CMDID,
+	WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
+	WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+	WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+	WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
+			WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
+	WMI_TLV_APFIND_CMDID,
+	WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
+	WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
+	WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
+	WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
+	WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+	WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
+	WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
+	WMI_TLV_PEER_TID_DELBA_CMDID,
+	WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+	WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+	WMI_TLV_STA_KEEPALIVE_CMDID,
+	WMI_TLV_BA_REQ_SSN_CMDID,
+	WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
+	WMI_TLV_PDEV_UTF_CMDID,
+	WMI_TLV_DBGLOG_CFG_CMDID,
+	WMI_TLV_PDEV_QVIT_CMDID,
+	WMI_TLV_PDEV_FTM_INTG_CMDID,
+	WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+	WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+	WMI_TLV_FORCE_FW_HANG_CMDID,
+	WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
+	WMI_TLV_THERMAL_MGMT_CMDID,
+	WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+	WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
+	WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
+	WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
+	WMI_TLV_GPIO_OUTPUT_CMDID,
+	WMI_TLV_TXBF_CMDID,
+	WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
+			WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
+	WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+	WMI_TLV_UNIT_TEST_CMDID,
+	WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
+	WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+	WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
+	WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
+	WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+	WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
+	WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
+	WMI_TLV_STA_SMPS_PARAM_CMDID,
+	WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
+	WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
+	WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
+	WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
+	WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
+	WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
+	WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
+	WMI_TLV_RMC_CONFIG_CMDID,
+	WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
+	WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+	WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
+			WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
+	WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
+	WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+	WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
+	WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
+	WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
+	WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
+	WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
+	WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
+	WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
+	WMI_TLV_LPI_START_SCAN_CMDID,
+	WMI_TLV_LPI_STOP_SCAN_CMDID,
+	WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
+	WMI_TLV_EXTSCAN_STOP_CMDID,
+	WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+	WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+	WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+	WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+	WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
+	WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
+	WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
+			WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
+	WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
+	WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
+	WMI_TLV_MDNS_SET_FQDN_CMDID,
+	WMI_TLV_MDNS_SET_RESPONSE_CMDID,
+	WMI_TLV_MDNS_GET_STATS_CMDID,
+	WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
+};
+
+enum wmi_tlv_event_id {
+	WMI_TLV_SERVICE_READY_EVENTID = 0x1,
+	WMI_TLV_READY_EVENTID,
+	WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
+	WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
+	WMI_TLV_CHAN_INFO_EVENTID,
+	WMI_TLV_PHYERR_EVENTID,
+	WMI_TLV_PDEV_DUMP_EVENTID,
+	WMI_TLV_TX_PAUSE_EVENTID,
+	WMI_TLV_DFS_RADAR_EVENTID,
+	WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
+	WMI_TLV_PDEV_TEMPERATURE_EVENTID,
+	WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
+	WMI_TLV_VDEV_STOPPED_EVENTID,
+	WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+	WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+	WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
+	WMI_TLV_PEER_INFO_EVENTID,
+	WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
+	WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
+	WMI_TLV_PEER_STATE_EVENTID,
+	WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
+	WMI_TLV_HOST_SWBA_EVENTID,
+	WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
+	WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+	WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
+	WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
+	WMI_TLV_BA_RSP_SSN_EVENTID,
+	WMI_TLV_AGGR_STATE_TRIG_EVENTID,
+	WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
+	WMI_TLV_PROFILE_MATCH,
+	WMI_TLV_ROAM_SYNCH_EVENTID,
+	WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
+	WMI_TLV_P2P_NOA_EVENTID,
+	WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
+	WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
+	WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
+	WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
+	WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_TLV_RTT_ERROR_REPORT_EVENTID,
+	WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
+	WMI_TLV_IFACE_LINK_STATS_EVENTID,
+	WMI_TLV_PEER_LINK_STATS_EVENTID,
+	WMI_TLV_RADIO_LINK_STATS_EVENTID,
+	WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
+	WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
+	WMI_TLV_APFIND_EVENTID,
+	WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
+	WMI_TLV_GTK_REKEY_FAIL_EVENTID,
+	WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
+	WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
+	WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
+	WMI_TLV_PDEV_UTF_EVENTID,
+	WMI_TLV_DEBUG_MESG_EVENTID,
+	WMI_TLV_UPDATE_STATS_EVENTID,
+	WMI_TLV_DEBUG_PRINT_EVENTID,
+	WMI_TLV_DCS_INTERFERENCE_EVENTID,
+	WMI_TLV_PDEV_QVIT_EVENTID,
+	WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
+	WMI_TLV_PDEV_FTM_INTG_EVENTID,
+	WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
+	WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
+	WMI_TLV_THERMAL_MGMT_EVENTID,
+	WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
+	WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
+	WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
+	WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
+	WMI_TLV_DIAG_EVENTID,
+	WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
+	WMI_TLV_UPLOADH_EVENTID,
+	WMI_TLV_CAPTUREH_EVENTID,
+	WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
+	WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
+	WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
+			WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
+	WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
+	WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
+	WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
+	WMI_TLV_OEM_ERROR_REPORT_EVENTID,
+	WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
+	WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
+	WMI_TLV_LPI_STATUS_EVENTID,
+	WMI_TLV_LPI_HANDOFF_EVENTID,
+	WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
+	WMI_TLV_EXTSCAN_OPERATION_EVENTID,
+	WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
+	WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
+	WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+	WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
+	WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
+	WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
+	WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
+	WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+	WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+	WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+	WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+	WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+	WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+	WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+	WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+	WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+	WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+	WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+	WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+	WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+	WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+	WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+	WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	WMI_TLV_PDEV_PARAM_PMF_QOS,
+	WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+	WMI_TLV_PDEV_PARAM_DCS,
+	WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+	WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+	WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+	WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+	WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+	WMI_TLV_PDEV_PARAM_PROXY_STA,
+	WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+	WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+	WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+	WMI_TLV_PDEV_PARAM_BURST_DUR,
+	WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+	WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+	WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+	WMI_TLV_PDEV_PARAM_L1SS_TRACK,
+	WMI_TLV_PDEV_PARAM_HYST_EN,
+	WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+	WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
+	WMI_TLV_PDEV_PARAM_LED_ENABLE,
+	WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+	WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+	WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+	WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+	WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
+	WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+	WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
+};
+
+enum wmi_tlv_vdev_param {
+	WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+	WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+	WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+	WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+	WMI_TLV_VDEV_PARAM_SLOT_TIME,
+	WMI_TLV_VDEV_PARAM_PREAMBLE,
+	WMI_TLV_VDEV_PARAM_SWBA_TIME,
+	WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+	WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+	WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+	WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+	WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	WMI_TLV_VDEV_PARAM_WDS,
+	WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+	WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+	WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+	WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+	WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+	WMI_TLV_VDEV_PARAM_CHWIDTH,
+	WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+	WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+	WMI_TLV_VDEV_PARAM_MGMT_RATE,
+	WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+	WMI_TLV_VDEV_PARAM_FIXED_RATE,
+	WMI_TLV_VDEV_PARAM_SGI,
+	WMI_TLV_VDEV_PARAM_LDPC,
+	WMI_TLV_VDEV_PARAM_TX_STBC,
+	WMI_TLV_VDEV_PARAM_RX_STBC,
+	WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+	WMI_TLV_VDEV_PARAM_DEF_KEYID,
+	WMI_TLV_VDEV_PARAM_NSS,
+	WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+	WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+	WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+	WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+	WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+	WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+	WMI_TLV_VDEV_PARAM_TXBF,
+	WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+	WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+	WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+	WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
+	WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
+	WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
+	WMI_TLV_VDEV_PARAM_ENABLE_RMC,
+	WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+	WMI_TLV_VDEV_PARAM_MAX_RATE,
+	WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+	WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+	WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+	WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+	WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+	WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+	WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+	WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
+	WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+	WMI_TLV_VDEV_PARAM_DTIM_POLICY,
+	WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+	WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+};
+
+enum wmi_tlv_tag {
+	WMI_TLV_TAG_LAST_RESERVED = 15,
+
+	WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+	WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+	WMI_TLV_TAG_ARRAY_BYTE,
+	WMI_TLV_TAG_ARRAY_STRUCT,
+	WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
+	WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
+
+	WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
+	WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
+	WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
+	WMI_TLV_TAG_STRUCT_READY_EVENT,
+	WMI_TLV_TAG_STRUCT_SCAN_EVENT,
+	WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
+	WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
+	WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
+	WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+	WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
+	WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
+	WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
+	WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
+	WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
+	WMI_TLV_TAG_STRUCT_ROAM_EVENT,
+	WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
+	WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
+	WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
+	WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
+	WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
+	WMI_TLV_TAG_STRUCT_ECHO_EVENT,
+	WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
+	WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
+	WMI_TLV_TAG_STRUCT_CSA_EVENT,
+	WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
+	WMI_TLV_TAG_STRUCT_IGTK_INFO,
+	WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
+	WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
+	WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
+	WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
+	WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
+	WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
+	WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
+	WMI_TLV_TAG_STRUCT_TIM_INFO,
+	WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
+	WMI_TLV_TAG_STRUCT_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
+	WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
+	WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
+	WMI_TLV_TAG_STRUCT_INIT_CMD,
+	WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
+	WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
+	WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
+	WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
+	WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
+	WMI_TLV_TAG_STRUCT_CHANNEL,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_WMM_PARAMS,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
+	WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
+	WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
+	WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
+	WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
+	WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
+	WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
+	WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
+	WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
+	WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
+	WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+	WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
+	WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
+	WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
+	WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
+	WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
+	WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
+	WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
+	WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
+	WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
+	WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
+	WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
+	WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+	WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
+	WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
+	WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
+	WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
+	WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
+	WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
+	WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_ECHO_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
+	WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
+	WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
+	WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
+	WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
+	WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
+	WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
+	WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
+	WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
+	WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
+	WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
+	WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
+	WMI_TLV_TAG_STRUCT_AP_PROFILE,
+	WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
+	WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
+	WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
+	WMI_TLV_TAG_STRUCT_TXBF_CMD,
+	WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_NLO_EVENT,
+	WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
+	WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
+	WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
+	WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
+	WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
+	WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
+	WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
+	WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+	WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
+	WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+	WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
+	WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
+	WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
+	WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
+	WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
+	WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+	WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+	WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
+	WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
+	WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
+	WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
+	WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
+	WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
+	WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
+	WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
+	WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
+	WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
+	WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
+	WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
+	WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
+	WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
+	WMI_TLV_TAG_STRUCT_PEER_INFO,
+	WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
+	WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
+	WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+	WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
+	WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
+	WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
+	WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
+	WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+	WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
+	WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
+	WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
+	WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
+	WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
+	WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
+	WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
+	WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
+	WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
+	WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
+	WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
+	WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
+	WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
+	WMI_TLV_TAG_STRUCT_RATE_STATS,
+	WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
+	WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
+	WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
+	WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
+	WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
+	WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
+	WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+	WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
+	WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
+	WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
+	WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
+	WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
+	WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
+	WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
+	WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
+	WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
+	WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+	WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
+	WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
+	WMI_TLV_TAG_STRUCT_RIC_REQUEST,
+	WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
+	WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
+	WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_RIC_TSPEC,
+	WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
+	WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
+	WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
+	WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
+	WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
+	WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
+	WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
+	WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
+	WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
+	WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
+	WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+	WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
+	WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
+	WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
+	WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
+	WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+
+	WMI_TLV_TAG_MAX
+};
+
+enum wmi_tlv_service {
+	WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_TLV_SERVICE_SCAN_OFFLOAD,
+	WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+	WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_TLV_SERVICE_STA_PWRSAVE,
+	WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_TLV_SERVICE_AP_UAPSD,
+	WMI_TLV_SERVICE_AP_DFS,
+	WMI_TLV_SERVICE_11AC,
+	WMI_TLV_SERVICE_BLOCKACK,
+	WMI_TLV_SERVICE_PHYERR,
+	WMI_TLV_SERVICE_BCN_FILTER,
+	WMI_TLV_SERVICE_RTT,
+	WMI_TLV_SERVICE_WOW,
+	WMI_TLV_SERVICE_RATECTRL_CACHE,
+	WMI_TLV_SERVICE_IRAM_TIDS,
+	WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+	WMI_TLV_SERVICE_NLO,
+	WMI_TLV_SERVICE_GTK_OFFLOAD,
+	WMI_TLV_SERVICE_SCAN_SCH,
+	WMI_TLV_SERVICE_CSA_OFFLOAD,
+	WMI_TLV_SERVICE_CHATTER,
+	WMI_TLV_SERVICE_COEX_FREQAVOID,
+	WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+	WMI_TLV_SERVICE_FORCE_FW_HANG,
+	WMI_TLV_SERVICE_GPIO,
+	WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+	WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+	WMI_TLV_SERVICE_TX_ENCAP,
+	WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	WMI_TLV_SERVICE_EARLY_RX,
+	WMI_TLV_SERVICE_STA_SMPS,
+	WMI_TLV_SERVICE_FWTEST,
+	WMI_TLV_SERVICE_STA_WMMAC,
+	WMI_TLV_SERVICE_TDLS,
+	WMI_TLV_SERVICE_BURST,
+	WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+	WMI_TLV_SERVICE_ADAPTIVE_OCS,
+	WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+	WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+	WMI_TLV_SERVICE_WLAN_HB,
+	WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+	WMI_TLV_SERVICE_BATCH_SCAN,
+	WMI_TLV_SERVICE_QPOWER,
+	WMI_TLV_SERVICE_PLMREQ,
+	WMI_TLV_SERVICE_THERMAL_MGMT,
+	WMI_TLV_SERVICE_RMC,
+	WMI_TLV_SERVICE_MHF_OFFLOAD,
+	WMI_TLV_SERVICE_COEX_SAR,
+	WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+	WMI_TLV_SERVICE_NAN,
+	WMI_TLV_SERVICE_L1SS_STAT,
+	WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+	WMI_TLV_SERVICE_OBSS_SCAN,
+	WMI_TLV_SERVICE_TDLS_OFFCHAN,
+	WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+	WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+	WMI_TLV_SERVICE_IBSS_PWRSAVE,
+	WMI_TLV_SERVICE_LPASS,
+	WMI_TLV_SERVICE_EXTSCAN,
+	WMI_TLV_SERVICE_D0WOW,
+	WMI_TLV_SERVICE_HSOFFLOAD,
+	WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+	WMI_TLV_SERVICE_RX_FULL_REORDER,
+	WMI_TLV_SERVICE_DHCP_OFFLOAD,
+	WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+	WMI_TLV_SERVICE_MDNS_OFFLOAD,
+	WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+};
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+	((svc_id) < (len) && \
+	 __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+	 BIT((svc_id)%(sizeof(u32))))
+
+#define SVCMAP(x, y, len) \
+	do { \
+		if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+			__set_bit(y, out); \
+	} while (0)
+
+static inline void
+wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
+{
+	SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
+	       WMI_SERVICE_BEACON_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
+	       WMI_SERVICE_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+	       WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+	       WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
+	       WMI_SERVICE_STA_PWRSAVE, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+	       WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+	SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
+	       WMI_SERVICE_AP_UAPSD, len);
+	SVCMAP(WMI_TLV_SERVICE_AP_DFS,
+	       WMI_SERVICE_AP_DFS, len);
+	SVCMAP(WMI_TLV_SERVICE_11AC,
+	       WMI_SERVICE_11AC, len);
+	SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
+	       WMI_SERVICE_BLOCKACK, len);
+	SVCMAP(WMI_TLV_SERVICE_PHYERR,
+	       WMI_SERVICE_PHYERR, len);
+	SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
+	       WMI_SERVICE_BCN_FILTER, len);
+	SVCMAP(WMI_TLV_SERVICE_RTT,
+	       WMI_SERVICE_RTT, len);
+	SVCMAP(WMI_TLV_SERVICE_WOW,
+	       WMI_SERVICE_WOW, len);
+	SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
+	       WMI_SERVICE_RATECTRL_CACHE, len);
+	SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
+	       WMI_SERVICE_IRAM_TIDS, len);
+	SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+	       WMI_SERVICE_ARPNS_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_NLO,
+	       WMI_SERVICE_NLO, len);
+	SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
+	       WMI_SERVICE_GTK_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
+	       WMI_SERVICE_SCAN_SCH, len);
+	SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
+	       WMI_SERVICE_CSA_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_CHATTER,
+	       WMI_SERVICE_CHATTER, len);
+	SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
+	       WMI_SERVICE_COEX_FREQAVOID, len);
+	SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+	       WMI_SERVICE_PACKET_POWER_SAVE, len);
+	SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
+	       WMI_SERVICE_FORCE_FW_HANG, len);
+	SVCMAP(WMI_TLV_SERVICE_GPIO,
+	       WMI_SERVICE_GPIO, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+	       WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+	       WMI_SERVICE_STA_KEEP_ALIVE, len);
+	SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
+	       WMI_SERVICE_TX_ENCAP, len);
+	SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	       WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+	SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
+	       WMI_SERVICE_EARLY_RX, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
+	       WMI_SERVICE_STA_SMPS, len);
+	SVCMAP(WMI_TLV_SERVICE_FWTEST,
+	       WMI_SERVICE_FWTEST, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
+	       WMI_SERVICE_STA_WMMAC, len);
+	SVCMAP(WMI_TLV_SERVICE_TDLS,
+	       WMI_SERVICE_TDLS, len);
+	SVCMAP(WMI_TLV_SERVICE_BURST,
+	       WMI_SERVICE_BURST, len);
+	SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+	       WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
+	SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
+	       WMI_SERVICE_ADAPTIVE_OCS, len);
+	SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+	       WMI_SERVICE_BA_SSN_SUPPORT, len);
+	SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+	       WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
+	SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
+	       WMI_SERVICE_WLAN_HB, len);
+	SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+	       WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
+	SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
+	       WMI_SERVICE_BATCH_SCAN, len);
+	SVCMAP(WMI_TLV_SERVICE_QPOWER,
+	       WMI_SERVICE_QPOWER, len);
+	SVCMAP(WMI_TLV_SERVICE_PLMREQ,
+	       WMI_SERVICE_PLMREQ, len);
+	SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
+	       WMI_SERVICE_THERMAL_MGMT, len);
+	SVCMAP(WMI_TLV_SERVICE_RMC,
+	       WMI_SERVICE_RMC, len);
+	SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
+	       WMI_SERVICE_MHF_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
+	       WMI_SERVICE_COEX_SAR, len);
+	SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+	       WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
+	SVCMAP(WMI_TLV_SERVICE_NAN,
+	       WMI_SERVICE_NAN, len);
+	SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
+	       WMI_SERVICE_L1SS_STAT, len);
+	SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+	       WMI_SERVICE_ESTIMATE_LINKSPEED, len);
+	SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
+	       WMI_SERVICE_OBSS_SCAN, len);
+	SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
+	       WMI_SERVICE_TDLS_OFFCHAN, len);
+	SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+	       WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+	SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+	       WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+	SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
+	       WMI_SERVICE_IBSS_PWRSAVE, len);
+	SVCMAP(WMI_TLV_SERVICE_LPASS,
+	       WMI_SERVICE_LPASS, len);
+	SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
+	       WMI_SERVICE_EXTSCAN, len);
+	SVCMAP(WMI_TLV_SERVICE_D0WOW,
+	       WMI_SERVICE_D0WOW, len);
+	SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
+	       WMI_SERVICE_HSOFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+	       WMI_SERVICE_ROAM_HO_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
+	       WMI_SERVICE_RX_FULL_REORDER, len);
+	SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
+	       WMI_SERVICE_DHCP_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+	       WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
+	SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
+	       WMI_SERVICE_MDNS_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+	       WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+}
+
+#undef SVCMAP
+
+struct wmi_tlv {
+	__le16 len;
+	__le16 tag;
+	u8 value[0];
+} __packed;
+
+#define WMI_TLV_MGMT_RX_NUM_RSSI 4
+
+struct wmi_tlv_mgmt_rx_ev {
+	__le32 channel;
+	__le32 snr;
+	__le32 rate;
+	__le32 phy_mode;
+	__le32 buf_len;
+	__le32 status;
+	__le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
+} __packed;
+
+struct wmi_tlv_abi_version {
+	__le32 abi_ver0;
+	__le32 abi_ver1;
+	__le32 abi_ver_ns0;
+	__le32 abi_ver_ns1;
+	__le32 abi_ver_ns2;
+	__le32 abi_ver_ns3;
+} __packed;
+
+enum wmi_tlv_hw_bd_id {
+	WMI_TLV_HW_BD_LEGACY = 0,
+	WMI_TLV_HW_BD_QCA6174 = 1,
+	WMI_TLV_HW_BD_QCA2582 = 2,
+};
+
+struct wmi_tlv_hw_bd_info {
+	u8 rev;
+	u8 project_id;
+	u8 custom_id;
+	u8 reference_design_id;
+} __packed;
+
+struct wmi_tlv_svc_rdy_ev {
+	__le32 fw_build_vers;
+	struct wmi_tlv_abi_version abi;
+	__le32 phy_capability;
+	__le32 max_frag_entry;
+	__le32 num_rf_chains;
+	__le32 ht_cap_info;
+	__le32 vht_cap_info;
+	__le32 vht_supp_mcs;
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+	__le32 sys_cap_info;
+	__le32 min_pkt_size_enable;
+	__le32 max_bcn_ie_size;
+	__le32 num_mem_reqs;
+	__le32 max_num_scan_chans;
+	__le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
+	struct wmi_tlv_hw_bd_info hw_bd_info[5];
+} __packed;
+
+struct wmi_tlv_rdy_ev {
+	struct wmi_tlv_abi_version abi;
+	struct wmi_mac_addr mac_addr;
+	__le32 status;
+} __packed;
+
+struct wmi_tlv_resource_config {
+	__le32 num_vdevs;
+	__le32 num_peers;
+	__le32 num_offload_peers;
+	__le32 num_offload_reorder_bufs;
+	__le32 num_peer_keys;
+	__le32 num_tids;
+	__le32 ast_skid_limit;
+	__le32 tx_chain_mask;
+	__le32 rx_chain_mask;
+	__le32 rx_timeout_pri[4];
+	__le32 rx_decap_mode;
+	__le32 scan_max_pending_reqs;
+	__le32 bmiss_offload_max_vdev;
+	__le32 roam_offload_max_vdev;
+	__le32 roam_offload_max_ap_profiles;
+	__le32 num_mcast_groups;
+	__le32 num_mcast_table_elems;
+	__le32 mcast2ucast_mode;
+	__le32 tx_dbg_log_size;
+	__le32 num_wds_entries;
+	__le32 dma_burst_size;
+	__le32 mac_aggr_delim;
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+	__le32 vow_config;
+	__le32 gtk_offload_max_vdev;
+	__le32 num_msdu_desc;
+	__le32 max_frag_entries;
+	__le32 num_tdls_vdevs;
+	__le32 num_tdls_conn_table_entries;
+	__le32 beacon_tx_offload_max_vdev;
+	__le32 num_multicast_filter_entries;
+	__le32 num_wow_filters;
+	__le32 num_keep_alive_pattern;
+	__le32 keep_alive_pattern_size;
+	__le32 max_tdls_concurrent_sleep_sta;
+	__le32 max_tdls_concurrent_buffer_sta;
+} __packed;
+
+struct wmi_tlv_init_cmd {
+	struct wmi_tlv_abi_version abi;
+	__le32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_tlv_pdev_set_param_cmd {
+	__le32 pdev_id; /* not used yet */
+	__le32 param_id;
+	__le32 param_value;
+} __packed;
+
+struct wmi_tlv_pdev_set_rd_cmd {
+	__le32 pdev_id; /* not used yet */
+	__le32 regd;
+	__le32 regd_2ghz;
+	__le32 regd_5ghz;
+	__le32 conform_limit_2ghz;
+	__le32 conform_limit_5ghz;
+} __packed;
+
+struct wmi_tlv_scan_chan_list_cmd {
+	__le32 num_scan_chans;
+} __packed;
+
+struct wmi_tlv_start_scan_cmd {
+	struct wmi_start_scan_common common;
+	__le32 burst_duration_ms;
+	__le32 num_channels;
+	__le32 num_bssids;
+	__le32 num_ssids;
+	__le32 ie_len;
+	__le32 num_probes;
+} __packed;
+
+struct wmi_tlv_vdev_start_cmd {
+	__le32 vdev_id;
+	__le32 requestor_id;
+	__le32 bcn_intval;
+	__le32 dtim_period;
+	__le32 flags;
+	struct wmi_ssid ssid;
+	__le32 bcn_tx_rate;
+	__le32 bcn_tx_power;
+	__le32 num_noa_descr;
+	__le32 disable_hw_ack;
+} __packed;
+
+enum {
+	WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
+	WMI_TLV_PEER_TYPE_BSS = 1,
+	WMI_TLV_PEER_TYPE_TDLS = 2,
+	WMI_TLV_PEER_TYPE_HOST_MAX = 127,
+	WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
+};
+
+struct wmi_tlv_peer_create_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_addr;
+	__le32 peer_type;
+} __packed;
+
+struct wmi_tlv_peer_assoc_cmd {
+	struct wmi_mac_addr mac_addr;
+	__le32 vdev_id;
+	__le32 new_assoc;
+	__le32 assoc_id;
+	__le32 flags;
+	__le32 caps;
+	__le32 listen_intval;
+	__le32 ht_caps;
+	__le32 max_mpdu;
+	__le32 mpdu_density;
+	__le32 rate_caps;
+	__le32 nss;
+	__le32 vht_caps;
+	__le32 phy_mode;
+	__le32 ht_info[2];
+	__le32 num_legacy_rates;
+	__le32 num_ht_rates;
+} __packed;
+
+struct wmi_tlv_pdev_suspend {
+	__le32 pdev_id; /* not used yet */
+	__le32 opt;
+} __packed;
+
+struct wmi_tlv_pdev_set_wmm_cmd {
+	__le32 pdev_id; /* not used yet */
+	__le32 dg_type; /* no idea.. */
+} __packed;
+
+struct wmi_tlv_vdev_wmm_params {
+	__le32 dummy;
+	struct wmi_wmm_params params;
+} __packed;
+
+struct wmi_tlv_vdev_set_wmm_cmd {
+	__le32 vdev_id;
+	struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
+} __packed;
+
+struct wmi_tlv_phyerr_ev {
+	__le32 num_phyerrs;
+	__le32 tsf_l32;
+	__le32 tsf_u32;
+	__le32 buf_len;
+} __packed;
+
+enum wmi_tlv_dbglog_param {
+	WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
+	WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
+	WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
+	WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
+	WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
+};
+
+enum wmi_tlv_dbglog_log_level {
+	WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
+	WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
+	WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
+	WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
+	WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
+	WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
+};
+
+#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
+#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
+					 sizeof(__le32))
+#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
+#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
+		(((module_id << 16) & 0xffff0000) | \
+		 ((log_level <<  0) & 0x000000ff))
+
+struct wmi_tlv_dbglog_cmd {
+	__le32 param;
+	__le32 value;
+} __packed;
+
+struct wmi_tlv_resume_cmd {
+	__le32 reserved;
+} __packed;
+
+struct wmi_tlv_req_stats_cmd {
+	__le32 stats_id; /* wmi_stats_id */
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_tlv_vdev_stats {
+	__le32 vdev_id;
+	__le32 beacon_snr;
+	__le32 data_snr;
+	__le32 num_tx_frames[4]; /* per-AC */
+	__le32 num_rx_frames;
+	__le32 num_tx_frames_retries[4];
+	__le32 num_tx_frames_failures[4];
+	__le32 num_rts_fail;
+	__le32 num_rts_success;
+	__le32 num_rx_err;
+	__le32 num_rx_discard;
+	__le32 num_tx_not_acked;
+	__le32 tx_rate_history[10];
+	__le32 beacon_rssi_history[10];
+} __packed;
+
+struct wmi_tlv_pktlog_enable {
+	__le32 reserved;
+	__le32 filter;
+} __packed;
+
+struct wmi_tlv_pktlog_disable {
+	__le32 reserved;
+} __packed;
+
+enum wmi_tlv_bcn_tx_status {
+	WMI_TLV_BCN_TX_STATUS_OK,
+	WMI_TLV_BCN_TX_STATUS_XRETRY,
+	WMI_TLV_BCN_TX_STATUS_DROP,
+	WMI_TLV_BCN_TX_STATUS_FILTERED,
+};
+
+struct wmi_tlv_bcn_tx_status_ev {
+	__le32 vdev_id;
+	__le32 tx_status;
+} __packed;
+
+struct wmi_tlv_bcn_prb_info {
+	__le32 caps;
+	__le32 erp;
+	u8 ies[0];
+} __packed;
+
+struct wmi_tlv_bcn_tmpl_cmd {
+	__le32 vdev_id;
+	__le32 tim_ie_offset;
+	__le32 buf_len;
+} __packed;
+
+struct wmi_tlv_prb_tmpl_cmd {
+	__le32 vdev_id;
+	__le32 buf_len;
+} __packed;
+
+struct wmi_tlv_p2p_go_bcn_ie {
+	__le32 vdev_id;
+	__le32 ie_len;
+} __packed;
+
+enum wmi_tlv_diag_item_type {
+	WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
+	WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
+	WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
+};
+
+struct wmi_tlv_diag_item {
+	u8 type;
+	u8 reserved;
+	__le16 len;
+	__le32 timestamp;
+	__le32 code;
+	u8 payload[0];
+} __packed;
+
+struct wmi_tlv_diag_data_ev {
+	__le32 num_items;
+} __packed;
+
+struct wmi_tlv_sta_keepalive_cmd {
+	__le32 vdev_id;
+	__le32 enabled;
+	__le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+	__le32 interval; /* in seconds */
+} __packed;
+
+struct wmi_tlv_stats_ev {
+	__le32 stats_id; /* WMI_STAT_ */
+	__le32 num_pdev_stats;
+	__le32 num_vdev_stats;
+	__le32 num_peer_stats;
+	__le32 num_bcnflt_stats;
+	__le32 num_chan_stats;
+} __packed;
+
+struct wmi_tlv_p2p_noa_ev {
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+	__le32 vdev_id;
+	__le32 reason;
+	__le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+	__le32 vdev_id;
+	__le32 is_add;
+	__le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+	__le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+	__le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+	__le32 vdev_id;
+	__le32 flag;
+	__le32 wake_reason;
+	__le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+	WOW_PATTERN_MIN = 0,
+	WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+	WOW_IPV4_SYNC_PATTERN,
+	WOW_IPV6_SYNC_PATTERN,
+	WOW_WILD_CARD_PATTERN,
+	WOW_TIMER_PATTERN,
+	WOW_MAGIC_PATTERN,
+	WOW_IPV6_RA_PATTERN,
+	WOW_IOAC_PKT_PATTERN,
+	WOW_IOAC_TMR_PATTERN,
+	WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE		148
+#define WOW_DEFAULT_BITMASK_SIZE		148
+
+struct wmi_tlv_wow_bitmap_pattern {
+	u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+	u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+	__le32 pattern_offset;
+	__le32 pattern_len;
+	__le32 bitmask_len;
+	__le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+	__le32 vdev_id;
+	__le32 pattern_id;
+	__le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+	__le32 vdev_id;
+	__le32 pattern_id;
+	__le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+	WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+	WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+	WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+	__le32 vdev_id;
+	__le32 state;
+	__le32 notification_interval_ms;
+	__le32 tx_discovery_threshold;
+	__le32 tx_teardown_threshold;
+	__le32 rssi_teardown_threshold;
+	__le32 rssi_delta;
+	__le32 tdls_options;
+	__le32 tdls_peer_traffic_ind_window;
+	__le32 tdls_peer_traffic_response_timeout_ms;
+	__le32 tdls_puapsd_mask;
+	__le32 tdls_puapsd_inactivity_time_ms;
+	__le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 peer_state;
+} __packed;
+
+enum {
+	WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+	WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+	WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+	WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK	0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB	5
+
+struct wmi_tdls_peer_capab {
+	__le32 peer_qos;
+	__le32 buff_sta_support;
+	__le32 off_chan_support;
+	__le32 peer_curr_operclass;
+	__le32 self_curr_operclass;
+	__le32 peer_chan_len;
+	__le32 peer_operclass_len;
+	u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+	__le32 is_peer_responder;
+	__le32 pref_offchan_num;
+	__le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+	__le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ *		Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ *		Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ *		vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ *		vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+	WMI_TLV_TX_PAUSE_ID_MCC = 1,
+	WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+	WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+	WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+	WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+	WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+	WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+	WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+	WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+	WMI_TLV_TX_PAUSE_ACTION_STOP,
+	WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+	__le32 pause_id;
+	__le32 action;
+	__le32 vdev_map;
+	__le32 peer_id;
+	__le32 tid_map;
+} __packed;
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644
index 0000000..7569db0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -0,0 +1,7634 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "mac.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+#include "p2p.h"
+#include "hw.h"
+
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+	.init_cmdid = WMI_INIT_CMDID,
+	.start_scan_cmdid = WMI_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+	.network_list_offload_config_cmdid =
+				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+	.echo_cmdid = WMI_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+	.init_cmdid = WMI_10X_INIT_CMDID,
+	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10X_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.2.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
+	.init_cmdid = WMI_10_2_INIT_CMDID,
+	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10_2_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+	.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+	.init_cmdid = WMI_10_4_INIT_CMDID,
+	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10_4_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+	.tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+	.wlan_peer_caching_add_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+	.wlan_peer_caching_evict_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+	.wlan_peer_caching_restore_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+	.wlan_peer_caching_print_all_peers_info_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+	.peer_add_proxy_sta_entry_cmdid =
+			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+	.nan_cmdid = WMI_10_4_NAN_CMDID,
+	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+	.pdev_smart_ant_set_rx_antenna_cmdid =
+			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	.peer_smart_ant_set_tx_antenna_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	.peer_smart_ant_set_train_info_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	.peer_smart_ant_set_node_config_ops_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	.pdev_set_antenna_switch_table_cmdid =
+			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+	.pdev_ratepwr_chainmsk_table_cmdid =
+			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+	.pdev_get_ani_ofdm_config_cmdid =
+			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+	.vdev_filter_neighbor_rx_packets_cmdid =
+			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+	.pdev_bss_chan_info_request_cmdid =
+			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_VDEV_PARAM_WDS,
+	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_VDEV_PARAM_SGI,
+	.ldpc = WMI_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_VDEV_PARAM_TXBF,
+	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+					WMI_VDEV_PARAM_UNSUPPORTED,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_10X_VDEV_PARAM_WDS,
+	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_10X_VDEV_PARAM_SGI,
+	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_10X_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
+	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_10X_VDEV_PARAM_WDS,
+	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_10X_VDEV_PARAM_SGI,
+	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_10X_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_10_4_VDEV_PARAM_WDS,
+	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_10_4_VDEV_PARAM_SGI,
+	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_10_4_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
+	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	.early_rx_bmiss_sample_cycle =
+	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+	.dcs = WMI_PDEV_PARAM_DCS,
+	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+	.dcs = WMI_10X_PDEV_PARAM_DCS,
+	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
+	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+	.dcs = WMI_10X_PDEV_PARAM_DCS,
+	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+/* firmware 10.2 specific mappings */
+static struct wmi_cmd_map wmi_10_2_cmd_map = {
+	.init_cmdid = WMI_10_2_INIT_CMDID,
+	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10_2_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	.pdev_stats_update_period =
+			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period =
+			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period =
+			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+	.dcs = WMI_10_4_PDEV_PARAM_DCS,
+	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+	.smart_antenna_default_antenna =
+			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	.remove_mcast2ucast_buffer =
+			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	.peer_sta_ps_statechg_enable =
+			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+				const struct wmi_channel_arg *arg)
+{
+	u32 flags = 0;
+
+	memset(ch, 0, sizeof(*ch));
+
+	if (arg->passive)
+		flags |= WMI_CHAN_FLAG_PASSIVE;
+	if (arg->allow_ibss)
+		flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+	if (arg->allow_ht)
+		flags |= WMI_CHAN_FLAG_ALLOW_HT;
+	if (arg->allow_vht)
+		flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+	if (arg->ht40plus)
+		flags |= WMI_CHAN_FLAG_HT40_PLUS;
+	if (arg->chan_radar)
+		flags |= WMI_CHAN_FLAG_DFS;
+
+	ch->mhz = __cpu_to_le32(arg->freq);
+	ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
+	ch->band_center_freq2 = 0;
+	ch->min_power = arg->min_power;
+	ch->max_power = arg->max_power;
+	ch->reg_power = arg->max_reg_power;
+	ch->antenna_max = arg->max_antenna_gain;
+
+	/* mode & flags share storage */
+	ch->mode = arg->mode;
+	ch->flags |= __cpu_to_le32(flags);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+	unsigned long time_left;
+
+	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+						WMI_SERVICE_READY_TIMEOUT_HZ);
+	if (!time_left)
+		return -ETIMEDOUT;
+	return 0;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+	unsigned long time_left;
+
+	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+						WMI_UNIFIED_READY_TIMEOUT_HZ);
+	if (!time_left)
+		return -ETIMEDOUT;
+	return 0;
+}
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
+{
+	struct sk_buff *skb;
+	u32 round_len = roundup(len, 4);
+
+	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, WMI_SKB_HEADROOM);
+	if (!IS_ALIGNED((unsigned long)skb->data, 4))
+		ath10k_warn(ar, "Unaligned WMI skb\n");
+
+	skb_put(skb, round_len);
+	memset(skb->data, 0, round_len);
+
+	return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+	dev_kfree_skb(skb);
+}
+
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+			       u32 cmd_id)
+{
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+	struct wmi_cmd_hdr *cmd_hdr;
+	int ret;
+	u32 cmd = 0;
+
+	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+		return -ENOMEM;
+
+	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+	memset(skb_cb, 0, sizeof(*skb_cb));
+	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
+
+	if (ret)
+		goto err_pull;
+
+	return 0;
+
+err_pull:
+	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+	return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	struct ath10k_skb_cb *cb;
+	struct sk_buff *bcn;
+	int ret;
+
+	spin_lock_bh(&ar->data_lock);
+
+	bcn = arvif->beacon;
+
+	if (!bcn)
+		goto unlock;
+
+	cb = ATH10K_SKB_CB(bcn);
+
+	switch (arvif->beacon_state) {
+	case ATH10K_BEACON_SENDING:
+	case ATH10K_BEACON_SENT:
+		break;
+	case ATH10K_BEACON_SCHEDULED:
+		arvif->beacon_state = ATH10K_BEACON_SENDING;
+		spin_unlock_bh(&ar->data_lock);
+
+		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
+							arvif->vdev_id,
+							bcn->data, bcn->len,
+							cb->paddr,
+							cb->bcn.dtim_zero,
+							cb->bcn.deliver_cab);
+
+		spin_lock_bh(&ar->data_lock);
+
+		if (ret == 0)
+			arvif->beacon_state = ATH10K_BEACON_SENT;
+		else
+			arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+	ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+	ieee80211_iterate_active_interfaces_atomic(ar->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   ath10k_wmi_tx_beacons_iter,
+						   NULL);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+	/* try to send pending beacons first. they take priority */
+	ath10k_wmi_tx_beacons_nowait(ar);
+
+	wake_up(&ar->wmi.tx_credits_wq);
+}
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (cmd_id == WMI_CMD_UNSUPPORTED) {
+		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
+			    cmd_id);
+		return ret;
+	}
+
+	wait_event_timeout(ar->wmi.tx_credits_wq, ({
+		/* try to send pending beacons first. they take priority */
+		ath10k_wmi_tx_beacons_nowait(ar);
+
+		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+
+		if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+			ret = -ESHUTDOWN;
+
+		(ret != -EAGAIN);
+	}), 3*HZ);
+
+	if (ret)
+		dev_kfree_skb_any(skb);
+
+	return ret;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+	struct wmi_mgmt_tx_cmd *cmd;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *skb;
+	int len;
+	u32 buf_len = msdu->len;
+	u16 fc;
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	fc = le16_to_cpu(hdr->frame_control);
+
+	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+		return ERR_PTR(-EINVAL);
+
+	len = sizeof(cmd->hdr) + msdu->len;
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		len += IEEE80211_CCMP_MIC_LEN;
+		buf_len += IEEE80211_CCMP_MIC_LEN;
+	}
+
+	len = round_up(len, 4);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
+
+	cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
+	cmd->hdr.tx_rate = 0;
+	cmd->hdr.tx_power = 0;
+	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
+
+	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
+	memcpy(cmd->buf, msdu->data, msdu->len);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
+		   fc & IEEE80211_FCTL_STYPE);
+	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
+	trace_ath10k_tx_payload(ar, skb->data, skb->len);
+
+	return skb;
+}
+
+static void ath10k_wmi_event_scan_started(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
+			    ath10k_scan_state_str(ar->scan.state),
+			    ar->scan.state);
+		break;
+	case ATH10K_SCAN_STARTING:
+		ar->scan.state = ATH10K_SCAN_RUNNING;
+
+		if (ar->scan.is_roc)
+			ieee80211_ready_on_channel(ar->hw);
+
+		complete(&ar->scan.started);
+		break;
+	}
+}
+
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+			    ath10k_scan_state_str(ar->scan.state),
+			    ar->scan.state);
+		break;
+	case ATH10K_SCAN_STARTING:
+		complete(&ar->scan.started);
+		__ath10k_scan_finish(ar);
+		break;
+	}
+}
+
+static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+	case ATH10K_SCAN_STARTING:
+		/* One suspected reason scan can be completed while starting is
+		 * if firmware fails to deliver all scan events to the host,
+		 * e.g. when transport pipe is full. This has been observed
+		 * with spectral scan phyerr events starving wmi transport
+		 * pipe. In such case the "scan completed" event should be (and
+		 * is) ignored by the host as it may be just firmware's scan
+		 * state machine recovering.
+		 */
+		ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
+			    ath10k_scan_state_str(ar->scan.state),
+			    ar->scan.state);
+		break;
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		__ath10k_scan_finish(ar);
+		break;
+	}
+}
+
+static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
+{
+	lockdep_assert_held(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+	case ATH10K_SCAN_STARTING:
+		ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+			    ath10k_scan_state_str(ar->scan.state),
+			    ar->scan.state);
+		break;
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		ar->scan_channel = NULL;
+		break;
+	}
+}
+
+static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
+{
+	lockdep_assert_held(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+	case ATH10K_SCAN_STARTING:
+		ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+			    ath10k_scan_state_str(ar->scan.state),
+			    ar->scan.state);
+		break;
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+
+		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+			complete(&ar->scan.on_channel);
+		break;
+	}
+}
+
+static const char *
+ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+			       enum wmi_scan_completion_reason reason)
+{
+	switch (type) {
+	case WMI_SCAN_EVENT_STARTED:
+		return "started";
+	case WMI_SCAN_EVENT_COMPLETED:
+		switch (reason) {
+		case WMI_SCAN_REASON_COMPLETED:
+			return "completed";
+		case WMI_SCAN_REASON_CANCELLED:
+			return "completed [cancelled]";
+		case WMI_SCAN_REASON_PREEMPTED:
+			return "completed [preempted]";
+		case WMI_SCAN_REASON_TIMEDOUT:
+			return "completed [timedout]";
+		case WMI_SCAN_REASON_INTERNAL_FAILURE:
+			return "completed [internal err]";
+		case WMI_SCAN_REASON_MAX:
+			break;
+		}
+		return "completed [unknown]";
+	case WMI_SCAN_EVENT_BSS_CHANNEL:
+		return "bss channel";
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+		return "foreign channel";
+	case WMI_SCAN_EVENT_DEQUEUED:
+		return "dequeued";
+	case WMI_SCAN_EVENT_PREEMPTED:
+		return "preempted";
+	case WMI_SCAN_EVENT_START_FAILED:
+		return "start failed";
+	case WMI_SCAN_EVENT_RESTARTED:
+		return "restarted";
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+		return "foreign channel exit";
+	default:
+		return "unknown";
+	}
+}
+
+static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
+				      struct wmi_scan_ev_arg *arg)
+{
+	struct wmi_scan_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->event_type = ev->event_type;
+	arg->reason = ev->reason;
+	arg->channel_freq = ev->channel_freq;
+	arg->scan_req_id = ev->scan_req_id;
+	arg->scan_id = ev->scan_id;
+	arg->vdev_id = ev->vdev_id;
+
+	return 0;
+}
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_scan_ev_arg arg = {};
+	enum wmi_scan_event_type event_type;
+	enum wmi_scan_completion_reason reason;
+	u32 freq;
+	u32 req_id;
+	u32 scan_id;
+	u32 vdev_id;
+	int ret;
+
+	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
+		return ret;
+	}
+
+	event_type = __le32_to_cpu(arg.event_type);
+	reason = __le32_to_cpu(arg.reason);
+	freq = __le32_to_cpu(arg.channel_freq);
+	req_id = __le32_to_cpu(arg.scan_req_id);
+	scan_id = __le32_to_cpu(arg.scan_id);
+	vdev_id = __le32_to_cpu(arg.vdev_id);
+
+	spin_lock_bh(&ar->data_lock);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+		   ath10k_wmi_event_scan_type_str(event_type, reason),
+		   event_type, reason, freq, req_id, scan_id, vdev_id,
+		   ath10k_scan_state_str(ar->scan.state), ar->scan.state);
+
+	switch (event_type) {
+	case WMI_SCAN_EVENT_STARTED:
+		ath10k_wmi_event_scan_started(ar);
+		break;
+	case WMI_SCAN_EVENT_COMPLETED:
+		ath10k_wmi_event_scan_completed(ar);
+		break;
+	case WMI_SCAN_EVENT_BSS_CHANNEL:
+		ath10k_wmi_event_scan_bss_chan(ar);
+		break;
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+		ath10k_wmi_event_scan_foreign_chan(ar, freq);
+		break;
+	case WMI_SCAN_EVENT_START_FAILED:
+		ath10k_warn(ar, "received scan start failure event\n");
+		ath10k_wmi_event_scan_start_failed(ar);
+		break;
+	case WMI_SCAN_EVENT_DEQUEUED:
+	case WMI_SCAN_EVENT_PREEMPTED:
+	case WMI_SCAN_EVENT_RESTARTED:
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+	default:
+		break;
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+	return 0;
+}
+
+static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
+{
+	enum ieee80211_band band;
+
+	switch (phy_mode) {
+	case MODE_11A:
+	case MODE_11NA_HT20:
+	case MODE_11NA_HT40:
+	case MODE_11AC_VHT20:
+	case MODE_11AC_VHT40:
+	case MODE_11AC_VHT80:
+		band = IEEE80211_BAND_5GHZ;
+		break;
+	case MODE_11G:
+	case MODE_11B:
+	case MODE_11GONLY:
+	case MODE_11NG_HT20:
+	case MODE_11NG_HT40:
+	case MODE_11AC_VHT20_2G:
+	case MODE_11AC_VHT40_2G:
+	case MODE_11AC_VHT80_2G:
+	default:
+		band = IEEE80211_BAND_2GHZ;
+	}
+
+	return band;
+}
+
+/* If keys are configured, HW decrypts all frames
+ * with protected bit set. Mark such frames as decrypted.
+ */
+static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
+					 struct sk_buff *skb,
+					 struct ieee80211_rx_status *status)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	unsigned int hdrlen;
+	bool peer_key;
+	u8 *addr, keyidx;
+
+	if (!ieee80211_is_auth(hdr->frame_control) ||
+	    !ieee80211_has_protected(hdr->frame_control))
+		return;
+
+	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
+		return;
+
+	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
+	addr = ieee80211_get_SA(hdr);
+
+	spin_lock_bh(&ar->data_lock);
+	peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (peer_key) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac wep key present for peer %pM\n", addr);
+		status->flag |= RX_FLAG_DECRYPTED;
+	}
+}
+
+static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
+					 struct wmi_mgmt_rx_ev_arg *arg)
+{
+	struct wmi_mgmt_rx_event_v1 *ev_v1;
+	struct wmi_mgmt_rx_event_v2 *ev_v2;
+	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+	size_t pull_len;
+	u32 msdu_len;
+
+	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+		ev_hdr = &ev_v2->hdr.v1;
+		pull_len = sizeof(*ev_v2);
+	} else {
+		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+		ev_hdr = &ev_v1->hdr;
+		pull_len = sizeof(*ev_v1);
+	}
+
+	if (skb->len < pull_len)
+		return -EPROTO;
+
+	skb_pull(skb, pull_len);
+	arg->channel = ev_hdr->channel;
+	arg->buf_len = ev_hdr->buf_len;
+	arg->status = ev_hdr->status;
+	arg->snr = ev_hdr->snr;
+	arg->phy_mode = ev_hdr->phy_mode;
+	arg->rate = ev_hdr->rate;
+
+	msdu_len = __le32_to_cpu(arg->buf_len);
+	if (skb->len < msdu_len)
+		return -EPROTO;
+
+	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
+	 * trailer with credit update. Trim the excess garbage.
+	 */
+	skb_trim(skb, msdu_len);
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct wmi_mgmt_rx_ev_arg *arg)
+{
+	struct wmi_10_4_mgmt_rx_event *ev;
+	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+	size_t pull_len;
+	u32 msdu_len;
+
+	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+	ev_hdr = &ev->hdr;
+	pull_len = sizeof(*ev);
+
+	if (skb->len < pull_len)
+		return -EPROTO;
+
+	skb_pull(skb, pull_len);
+	arg->channel = ev_hdr->channel;
+	arg->buf_len = ev_hdr->buf_len;
+	arg->status = ev_hdr->status;
+	arg->snr = ev_hdr->snr;
+	arg->phy_mode = ev_hdr->phy_mode;
+	arg->rate = ev_hdr->rate;
+
+	msdu_len = __le32_to_cpu(arg->buf_len);
+	if (skb->len < msdu_len)
+		return -EPROTO;
+
+	/* Make sure bytes added for padding are removed. */
+	skb_trim(skb, msdu_len);
+
+	return 0;
+}
+
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_mgmt_rx_ev_arg arg = {};
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_supported_band *sband;
+	u32 rx_status;
+	u32 channel;
+	u32 phy_mode;
+	u32 snr;
+	u32 rate;
+	u32 buf_len;
+	u16 fc;
+	int ret;
+
+	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	channel = __le32_to_cpu(arg.channel);
+	buf_len = __le32_to_cpu(arg.buf_len);
+	rx_status = __le32_to_cpu(arg.status);
+	snr = __le32_to_cpu(arg.snr);
+	phy_mode = __le32_to_cpu(arg.phy_mode);
+	rate = __le32_to_cpu(arg.rate);
+
+	memset(status, 0, sizeof(*status));
+
+	ath10k_dbg(ar, ATH10K_DBG_MGMT,
+		   "event mgmt rx status %08x\n", rx_status);
+
+	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (rx_status & WMI_RX_STATUS_ERR_CRC) {
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (rx_status & WMI_RX_STATUS_ERR_MIC)
+		status->flag |= RX_FLAG_MMIC_ERROR;
+
+	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
+	 * MODE_11B. This means phy_mode is not a reliable source for the band
+	 * of mgmt rx.
+	 */
+	if (channel >= 1 && channel <= 14) {
+		status->band = IEEE80211_BAND_2GHZ;
+	} else if (channel >= 36 && channel <= 165) {
+		status->band = IEEE80211_BAND_5GHZ;
+	} else {
+		/* Shouldn't happen unless list of advertised channels to
+		 * mac80211 has been changed.
+		 */
+		WARN_ON_ONCE(1);
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+	sband = &ar->mac.sbands[status->band];
+
+	status->freq = ieee80211_channel_to_frequency(channel, status->band);
+	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = le16_to_cpu(hdr->frame_control);
+
+	ath10k_wmi_handle_wep_reauth(ar, skb, status);
+
+	/* FW delivers WEP Shared Auth frame with Protected Bit set and
+	 * encrypted payload. However in case of PMF it delivers decrypted
+	 * frames with Protected Bit set. */
+	if (ieee80211_has_protected(hdr->frame_control) &&
+	    !ieee80211_is_auth(hdr->frame_control)) {
+		status->flag |= RX_FLAG_DECRYPTED;
+
+		if (!ieee80211_is_action(hdr->frame_control) &&
+		    !ieee80211_is_deauth(hdr->frame_control) &&
+		    !ieee80211_is_disassoc(hdr->frame_control)) {
+			status->flag |= RX_FLAG_IV_STRIPPED |
+					RX_FLAG_MMIC_STRIPPED;
+			hdr->frame_control = __cpu_to_le16(fc &
+					~IEEE80211_FCTL_PROTECTED);
+		}
+	}
+
+	if (ieee80211_is_beacon(hdr->frame_control))
+		ath10k_mac_handle_beacon(ar, skb);
+
+	ath10k_dbg(ar, ATH10K_DBG_MGMT,
+		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+		   skb, skb->len,
+		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+	ath10k_dbg(ar, ATH10K_DBG_MGMT,
+		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+		   status->freq, status->band, status->signal,
+		   status->rate_idx);
+
+	ieee80211_rx(ar->hw, skb);
+	return 0;
+}
+
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+	struct ieee80211_supported_band *sband;
+	int band, ch, idx = 0;
+
+	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+		sband = ar->hw->wiphy->bands[band];
+		if (!sband)
+			continue;
+
+		for (ch = 0; ch < sband->n_channels; ch++, idx++)
+			if (sband->channels[ch].center_freq == freq)
+				goto exit;
+	}
+
+exit:
+	return idx;
+}
+
+static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
+					 struct wmi_ch_info_ev_arg *arg)
+{
+	struct wmi_chan_info_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->err_code = ev->err_code;
+	arg->freq = ev->freq;
+	arg->cmd_flags = ev->cmd_flags;
+	arg->noise_floor = ev->noise_floor;
+	arg->rx_clear_count = ev->rx_clear_count;
+	arg->cycle_count = ev->cycle_count;
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct wmi_ch_info_ev_arg *arg)
+{
+	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->err_code = ev->err_code;
+	arg->freq = ev->freq;
+	arg->cmd_flags = ev->cmd_flags;
+	arg->noise_floor = ev->noise_floor;
+	arg->rx_clear_count = ev->rx_clear_count;
+	arg->cycle_count = ev->cycle_count;
+	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+	arg->rx_frame_count = ev->rx_frame_count;
+
+	return 0;
+}
+
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_ch_info_ev_arg arg = {};
+	struct survey_info *survey;
+	u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
+	int idx, ret;
+
+	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+		return;
+	}
+
+	err_code = __le32_to_cpu(arg.err_code);
+	freq = __le32_to_cpu(arg.freq);
+	cmd_flags = __le32_to_cpu(arg.cmd_flags);
+	noise_floor = __le32_to_cpu(arg.noise_floor);
+	rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
+	cycle_count = __le32_to_cpu(arg.cycle_count);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+		   err_code, freq, cmd_flags, noise_floor, rx_clear_count,
+		   cycle_count);
+
+	spin_lock_bh(&ar->data_lock);
+
+	switch (ar->scan.state) {
+	case ATH10K_SCAN_IDLE:
+	case ATH10K_SCAN_STARTING:
+		ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+		goto exit;
+	case ATH10K_SCAN_RUNNING:
+	case ATH10K_SCAN_ABORTING:
+		break;
+	}
+
+	idx = freq_to_idx(ar, freq);
+	if (idx >= ARRAY_SIZE(ar->survey)) {
+		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+			    freq, idx);
+		goto exit;
+	}
+
+	if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+		if (ar->ch_info_can_report_survey) {
+			survey = &ar->survey[idx];
+			survey->noise = noise_floor;
+			survey->filled = SURVEY_INFO_NOISE_DBM;
+
+			ath10k_hw_fill_survey_time(ar,
+						   survey,
+						   cycle_count,
+						   rx_clear_count,
+						   ar->survey_last_cycle_count,
+						   ar->survey_last_rx_clear_count);
+		}
+
+		ar->ch_info_can_report_survey = false;
+	} else {
+		ar->ch_info_can_report_survey = true;
+	}
+
+	if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+		ar->survey_last_rx_clear_count = rx_clear_count;
+		ar->survey_last_cycle_count = cycle_count;
+	}
+
+exit:
+	spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+}
+
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+		   skb->len);
+
+	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
+
+	return 0;
+}
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+				     struct ath10k_fw_stats_pdev *dst)
+{
+	dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
+	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+	dst->cycle_count = __le32_to_cpu(src->cycle_count);
+	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+				   struct ath10k_fw_stats_pdev *dst)
+{
+	dst->comp_queued = __le32_to_cpu(src->comp_queued);
+	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+	dst->local_enqued = __le32_to_cpu(src->local_enqued);
+	dst->local_freed = __le32_to_cpu(src->local_freed);
+	dst->hw_queued = __le32_to_cpu(src->hw_queued);
+	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+	dst->underrun = __le32_to_cpu(src->underrun);
+	dst->tx_abort = __le32_to_cpu(src->tx_abort);
+	dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+	dst->tx_ko = __le32_to_cpu(src->tx_ko);
+	dst->data_rc = __le32_to_cpu(src->data_rc);
+	dst->self_triggers = __le32_to_cpu(src->self_triggers);
+	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+static void
+ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
+				   struct ath10k_fw_stats_pdev *dst)
+{
+	dst->comp_queued = __le32_to_cpu(src->comp_queued);
+	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+	dst->local_enqued = __le32_to_cpu(src->local_enqued);
+	dst->local_freed = __le32_to_cpu(src->local_freed);
+	dst->hw_queued = __le32_to_cpu(src->hw_queued);
+	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+	dst->underrun = __le32_to_cpu(src->underrun);
+	dst->tx_abort = __le32_to_cpu(src->tx_abort);
+	dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+	dst->tx_ko = __le32_to_cpu(src->tx_ko);
+	dst->data_rc = __le32_to_cpu(src->data_rc);
+	dst->self_triggers = __le32_to_cpu(src->self_triggers);
+	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+	dst->hw_paused = __le32_to_cpu(src->hw_paused);
+	dst->seq_posted = __le32_to_cpu(src->seq_posted);
+	dst->seq_failed_queueing =
+		__le32_to_cpu(src->seq_failed_queueing);
+	dst->seq_completed = __le32_to_cpu(src->seq_completed);
+	dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
+	dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
+	dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
+	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+	dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
+	dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
+	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+	dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
+}
+
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+				   struct ath10k_fw_stats_pdev *dst)
+{
+	dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
+	dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
+	dst->r0_frags = __le32_to_cpu(src->r0_frags);
+	dst->r1_frags = __le32_to_cpu(src->r1_frags);
+	dst->r2_frags = __le32_to_cpu(src->r2_frags);
+	dst->r3_frags = __le32_to_cpu(src->r3_frags);
+	dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
+	dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
+	dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
+	dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
+	dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
+	dst->phy_errs = __le32_to_cpu(src->phy_errs);
+	dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
+	dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
+}
+
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+				      struct ath10k_fw_stats_pdev *dst)
+{
+	dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+	dst->rts_bad = __le32_to_cpu(src->rts_bad);
+	dst->rts_good = __le32_to_cpu(src->rts_good);
+	dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+	dst->no_beacons = __le32_to_cpu(src->no_beacons);
+	dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+}
+
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+				struct ath10k_fw_stats_peer *dst)
+{
+	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+}
+
+static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct ath10k_fw_stats *stats)
+{
+	const struct wmi_stats_event *ev = (void *)skb->data;
+	u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+	int i;
+
+	if (!skb_pull(skb, sizeof(*ev)))
+		return -EPROTO;
+
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	/* fw doesn't implement vdev stats */
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_peer_stats(src, dst);
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
+					   struct sk_buff *skb,
+					   struct ath10k_fw_stats *stats)
+{
+	const struct wmi_stats_event *ev = (void *)skb->data;
+	u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
+	int i;
+
+	if (!skb_pull(skb, sizeof(*ev)))
+		return -EPROTO;
+
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_10x_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	/* fw doesn't implement vdev stats */
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10x_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct ath10k_fw_stats *stats)
+{
+	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+	u32 num_pdev_stats;
+	u32 num_pdev_ext_stats;
+	u32 num_vdev_stats;
+	u32 num_peer_stats;
+	int i;
+
+	if (!skb_pull(skb, sizeof(*ev)))
+		return -EPROTO;
+
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_10_2_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+		/* FIXME: expose 10.2 specific values */
+
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	for (i = 0; i < num_pdev_ext_stats; i++) {
+		const struct wmi_10_2_pdev_ext_stats *src;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		/* FIXME: expose values to userspace
+		 *
+		 * Note: Even though this loop seems to do nothing it is
+		 * required to parse following sub-structures properly.
+		 */
+	}
+
+	/* fw doesn't implement vdev stats */
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10_2_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+		/* FIXME: expose 10.2 specific values */
+
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct ath10k_fw_stats *stats)
+{
+	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+	u32 num_pdev_stats;
+	u32 num_pdev_ext_stats;
+	u32 num_vdev_stats;
+	u32 num_peer_stats;
+	int i;
+
+	if (!skb_pull(skb, sizeof(*ev)))
+		return -EPROTO;
+
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_10_2_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+		/* FIXME: expose 10.2 specific values */
+
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	for (i = 0; i < num_pdev_ext_stats; i++) {
+		const struct wmi_10_2_pdev_ext_stats *src;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		/* FIXME: expose values to userspace
+		 *
+		 * Note: Even though this loop seems to do nothing it is
+		 * required to parse following sub-structures properly.
+		 */
+	}
+
+	/* fw doesn't implement vdev stats */
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10_2_4_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
+
+		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+		/* FIXME: expose 10.2 specific values */
+
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct ath10k_fw_stats *stats)
+{
+	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+	u32 num_pdev_stats;
+	u32 num_pdev_ext_stats;
+	u32 num_vdev_stats;
+	u32 num_peer_stats;
+	int i;
+
+	if (!skb_pull(skb, sizeof(*ev)))
+		return -EPROTO;
+
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_10_4_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
+		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	for (i = 0; i < num_pdev_ext_stats; i++) {
+		const struct wmi_10_2_pdev_ext_stats *src;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		/* FIXME: expose values to userspace
+		 *
+		 * Note: Even though this loop seems to do nothing it is
+		 * required to parse following sub-structures properly.
+		 */
+	}
+
+	/* fw doesn't implement vdev stats */
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10_4_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+		dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+		dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+		/* FIXME: expose 10.4 specific values */
+
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	return 0;
+}
+
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+	ath10k_debug_fw_stats_process(ar, skb);
+}
+
+static int
+ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+				 struct wmi_vdev_start_ev_arg *arg)
+{
+	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_id = ev->vdev_id;
+	arg->req_id = ev->req_id;
+	arg->resp_type = ev->resp_type;
+	arg->status = ev->status;
+
+	return 0;
+}
+
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_vdev_start_ev_arg arg = {};
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
+		return;
+	}
+
+	if (WARN_ON(__le32_to_cpu(arg.status)))
+		return;
+
+	complete(&ar->vdev_setup_done);
+}
+
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+	complete(&ar->vdev_setup_done);
+}
+
+static int
+ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
+				struct wmi_peer_kick_ev_arg *arg)
+{
+	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->mac_addr = ev->peer_macaddr.addr;
+
+	return 0;
+}
+
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_peer_kick_ev_arg arg = {};
+	struct ieee80211_sta *sta;
+	int ret;
+
+	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
+			    ret);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+		   arg.mac_addr);
+
+	rcu_read_lock();
+
+	sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
+	if (!sta) {
+		ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
+			    arg.mac_addr);
+		goto exit;
+	}
+
+	ieee80211_report_low_ack(sta, 10);
+
+exit:
+	rcu_read_unlock();
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+				  struct ath10k_vif *arvif,
+				  struct sk_buff *bcn,
+				  const struct wmi_tim_info_arg *tim_info)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+	struct ieee80211_tim_ie *tim;
+	u8 *ies, *ie;
+	u8 ie_len, pvm_len;
+	__le32 t;
+	u32 v, tim_len;
+
+	/* When FW reports 0 in tim_len, ensure atleast first byte
+	 * in tim_bitmap is considered for pvm calculation.
+	 */
+	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
+
+	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
+	 * we must copy the bitmap upon change and reuse it later */
+	if (__le32_to_cpu(tim_info->tim_changed)) {
+		int i;
+
+		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
+			tim_len = sizeof(arvif->u.ap.tim_bitmap);
+		}
+
+		for (i = 0; i < tim_len; i++) {
+			t = tim_info->tim_bitmap[i / 4];
+			v = __le32_to_cpu(t);
+			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+		}
+
+		/* FW reports either length 0 or length based on max supported
+		 * station. so we calculate this on our own
+		 */
+		arvif->u.ap.tim_len = 0;
+		for (i = 0; i < tim_len; i++)
+			if (arvif->u.ap.tim_bitmap[i])
+				arvif->u.ap.tim_len = i;
+
+		arvif->u.ap.tim_len++;
+	}
+
+	ies = bcn->data;
+	ies += ieee80211_hdrlen(hdr->frame_control);
+	ies += 12; /* fixed parameters */
+
+	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+				    (u8 *)skb_tail_pointer(bcn) - ies);
+	if (!ie) {
+		if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+			ath10k_warn(ar, "no tim ie found;\n");
+		return;
+	}
+
+	tim = (void *)ie + 2;
+	ie_len = ie[1];
+	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+	if (pvm_len < arvif->u.ap.tim_len) {
+		int expand_size = tim_len - pvm_len;
+		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+		void *next_ie = ie + 2 + ie_len;
+
+		if (skb_put(bcn, expand_size)) {
+			memmove(next_ie + expand_size, next_ie, move_size);
+
+			ie[1] += expand_size;
+			ie_len += expand_size;
+			pvm_len += expand_size;
+		} else {
+			ath10k_warn(ar, "tim expansion failed\n");
+		}
+	}
+
+	if (pvm_len > tim_len) {
+		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
+		return;
+	}
+
+	tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
+	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+	if (tim->dtim_count == 0) {
+		ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+
+		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
+			ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+		   tim->dtim_count, tim->dtim_period,
+		   tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+				  struct sk_buff *bcn,
+				  const struct wmi_p2p_noa_info *noa)
+{
+	if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+		return;
+
+	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+
+	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+		ath10k_p2p_noa_update(arvif, noa);
+
+	if (arvif->u.ap.noa_data)
+		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+			memcpy(skb_put(bcn, arvif->u.ap.noa_len),
+			       arvif->u.ap.noa_data,
+			       arvif->u.ap.noa_len);
+}
+
+static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
+				      struct wmi_swba_ev_arg *arg)
+{
+	struct wmi_host_swba_event *ev = (void *)skb->data;
+	u32 map;
+	size_t i;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_map = ev->vdev_map;
+
+	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+		if (!(map & BIT(0)))
+			continue;
+
+		/* If this happens there were some changes in firmware and
+		 * ath10k should update the max size of tim_info array.
+		 */
+		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+			break;
+
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
+		i++;
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+					   struct sk_buff *skb,
+					   struct wmi_swba_ev_arg *arg)
+{
+	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+	u32 map, tim_len;
+	size_t i;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_map = ev->vdev_map;
+
+	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+		if (!(map & BIT(0)))
+			continue;
+
+		/* If this happens there were some changes in firmware and
+		 * ath10k should update the max size of tim_info array.
+		 */
+		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+			break;
+
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+		if (tim_len) {
+			/* Exclude 4 byte guard length */
+			tim_len -= 4;
+			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+		} else {
+			arg->tim_info[i].tim_len = 0;
+		}
+
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+		/* 10.4 firmware doesn't have p2p support. notice of absence
+		 * info can be ignored for now.
+		 */
+
+		i++;
+	}
+
+	return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+	return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_swba_ev_arg arg = {};
+	u32 map;
+	int i = -1;
+	const struct wmi_tim_info_arg *tim_info;
+	const struct wmi_p2p_noa_info *noa_info;
+	struct ath10k_vif *arvif;
+	struct sk_buff *bcn;
+	dma_addr_t paddr;
+	int ret, vdev_id = 0;
+
+	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
+		return;
+	}
+
+	map = __le32_to_cpu(arg.vdev_map);
+
+	ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
+		   map);
+
+	for (; map; map >>= 1, vdev_id++) {
+		if (!(map & 0x1))
+			continue;
+
+		i++;
+
+		if (i >= WMI_MAX_AP_VDEV) {
+			ath10k_warn(ar, "swba has corrupted vdev map\n");
+			break;
+		}
+
+		tim_info = &arg.tim_info[i];
+		noa_info = arg.noa_info[i];
+
+		ath10k_dbg(ar, ATH10K_DBG_MGMT,
+			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
+			   i,
+			   __le32_to_cpu(tim_info->tim_len),
+			   __le32_to_cpu(tim_info->tim_mcast),
+			   __le32_to_cpu(tim_info->tim_changed),
+			   __le32_to_cpu(tim_info->tim_num_ps_pending),
+			   __le32_to_cpu(tim_info->tim_bitmap[3]),
+			   __le32_to_cpu(tim_info->tim_bitmap[2]),
+			   __le32_to_cpu(tim_info->tim_bitmap[1]),
+			   __le32_to_cpu(tim_info->tim_bitmap[0]));
+
+		/* TODO: Only first 4 word from tim_bitmap is dumped.
+		 * Extend debug code to dump full tim_bitmap.
+		 */
+
+		arvif = ath10k_get_arvif(ar, vdev_id);
+		if (arvif == NULL) {
+			ath10k_warn(ar, "no vif for vdev_id %d found\n",
+				    vdev_id);
+			continue;
+		}
+
+		/* There are no completions for beacons so wait for next SWBA
+		 * before telling mac80211 to decrement CSA counter
+		 *
+		 * Once CSA counter is completed stop sending beacons until
+		 * actual channel switch is done */
+		if (arvif->vif->csa_active &&
+		    ieee80211_csa_is_complete(arvif->vif)) {
+			ieee80211_csa_finish(arvif->vif);
+			continue;
+		}
+
+		bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+		if (!bcn) {
+			ath10k_warn(ar, "could not get mac80211 beacon\n");
+			continue;
+		}
+
+		ath10k_tx_h_seq_no(arvif->vif, bcn);
+		ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
+		ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
+
+		spin_lock_bh(&ar->data_lock);
+
+		if (arvif->beacon) {
+			switch (arvif->beacon_state) {
+			case ATH10K_BEACON_SENT:
+				break;
+			case ATH10K_BEACON_SCHEDULED:
+				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
+					    arvif->vdev_id);
+				break;
+			case ATH10K_BEACON_SENDING:
+				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
+					    arvif->vdev_id);
+				dev_kfree_skb(bcn);
+				goto skip;
+			}
+
+			ath10k_mac_vif_beacon_free(arvif);
+		}
+
+		if (!arvif->beacon_buf) {
+			paddr = dma_map_single(arvif->ar->dev, bcn->data,
+					       bcn->len, DMA_TO_DEVICE);
+			ret = dma_mapping_error(arvif->ar->dev, paddr);
+			if (ret) {
+				ath10k_warn(ar, "failed to map beacon: %d\n",
+					    ret);
+				dev_kfree_skb_any(bcn);
+				ret = -EIO;
+				goto skip;
+			}
+
+			ATH10K_SKB_CB(bcn)->paddr = paddr;
+		} else {
+			if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
+				ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
+					    bcn->len, IEEE80211_MAX_FRAME_LEN);
+				skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
+			}
+			memcpy(arvif->beacon_buf, bcn->data, bcn->len);
+			ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
+		}
+
+		arvif->beacon = bcn;
+		arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+
+		trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
+		trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
+
+skip:
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	ath10k_wmi_tx_beacons_nowait(ar);
+}
+
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+				    struct wmi_phyerr_ev_arg *phyerr,
+				    const struct phyerr_radar_report *rr,
+				    u64 tsf)
+{
+	u32 reg0, reg1, tsf32l;
+	struct ieee80211_channel *ch;
+	struct pulse_event pe;
+	u64 tsf64;
+	u8 rssi, width;
+
+	reg0 = __le32_to_cpu(rr->reg0);
+	reg1 = __le32_to_cpu(rr->reg1);
+
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+	if (!ar->dfs_detector)
+		return;
+
+	spin_lock_bh(&ar->data_lock);
+	ch = ar->rx_channel;
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!ch) {
+		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+		goto radar_detected;
+	}
+
+	/* report event to DFS pattern detector */
+	tsf32l = phyerr->tsf_timestamp;
+	tsf64 = tsf & (~0xFFFFFFFFULL);
+	tsf64 |= tsf32l;
+
+	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+	rssi = phyerr->rssi_combined;
+
+	/* hardware store this as 8 bit signed value,
+	 * set to zero if negative number
+	 */
+	if (rssi & 0x80)
+		rssi = 0;
+
+	pe.ts = tsf64;
+	pe.freq = ch->center_freq;
+	pe.width = width;
+	pe.rssi = rssi;
+	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+		   pe.freq, pe.width, pe.rssi, pe.ts);
+
+	ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
+		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+			   "dfs no pulse pattern detected, yet\n");
+		return;
+	}
+
+radar_detected:
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+	ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+	/* Control radar events reporting in debugfs file
+	   dfs_block_radar_events */
+	if (ar->dfs_block_radar_events) {
+		ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
+		return;
+	}
+
+	ieee80211_radar_detected(ar->hw);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+				 struct wmi_phyerr_ev_arg *phyerr,
+				 const struct phyerr_fft_report *fftr,
+				 u64 tsf)
+{
+	u32 reg0, reg1;
+	u8 rssi, peak_mag;
+
+	reg0 = __le32_to_cpu(fftr->reg0);
+	reg1 = __le32_to_cpu(fftr->reg1);
+	rssi = phyerr->rssi_combined;
+
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+	/* false event detection */
+	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+			  struct wmi_phyerr_ev_arg *phyerr,
+			  u64 tsf)
+{
+	int buf_len, tlv_len, res, i = 0;
+	const struct phyerr_tlv *tlv;
+	const struct phyerr_radar_report *rr;
+	const struct phyerr_fft_report *fftr;
+	const u8 *tlv_buf;
+
+	buf_len = phyerr->buf_len;
+	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+		   phyerr->phy_err_code, phyerr->rssi_combined,
+		   phyerr->tsf_timestamp, tsf, buf_len);
+
+	/* Skip event if DFS disabled */
+	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+		return;
+
+	ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+	while (i < buf_len) {
+		if (i + sizeof(*tlv) > buf_len) {
+			ath10k_warn(ar, "too short buf for tlv header (%d)\n",
+				    i);
+			return;
+		}
+
+		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+		tlv_len = __le16_to_cpu(tlv->len);
+		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+			   tlv_len, tlv->tag, tlv->sig);
+
+		switch (tlv->tag) {
+		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+				ath10k_warn(ar, "too short radar pulse summary (%d)\n",
+					    i);
+				return;
+			}
+
+			rr = (struct phyerr_radar_report *)tlv_buf;
+			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
+			break;
+		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+				ath10k_warn(ar, "too short fft report (%d)\n",
+					    i);
+				return;
+			}
+
+			fftr = (struct phyerr_fft_report *)tlv_buf;
+			res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
+			if (res)
+				return;
+			break;
+		}
+
+		i += sizeof(*tlv) + tlv_len;
+	}
+}
+
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+				    struct wmi_phyerr_ev_arg *phyerr,
+				    u64 tsf)
+{
+	int buf_len, tlv_len, res, i = 0;
+	struct phyerr_tlv *tlv;
+	const void *tlv_buf;
+	const struct phyerr_fft_report *fftr;
+	size_t fftr_len;
+
+	buf_len = phyerr->buf_len;
+
+	while (i < buf_len) {
+		if (i + sizeof(*tlv) > buf_len) {
+			ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
+				    i);
+			return;
+		}
+
+		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+		tlv_len = __le16_to_cpu(tlv->len);
+		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+
+		if (i + sizeof(*tlv) + tlv_len > buf_len) {
+			ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
+				    i);
+			return;
+		}
+
+		switch (tlv->tag) {
+		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+			if (sizeof(*fftr) > tlv_len) {
+				ath10k_warn(ar, "failed to parse fft report at byte %d\n",
+					    i);
+				return;
+			}
+
+			fftr_len = tlv_len - sizeof(*fftr);
+			fftr = tlv_buf;
+			res = ath10k_spectral_process_fft(ar, phyerr,
+							  fftr, fftr_len,
+							  tsf);
+			if (res < 0) {
+				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
+					   res);
+				return;
+			}
+			break;
+		}
+
+		i += sizeof(*tlv) + tlv_len;
+	}
+}
+
+static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct wmi_phyerr_hdr_arg *arg)
+{
+	struct wmi_phyerr_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = skb->len - sizeof(*ev);
+	arg->phyerrs = ev->phyerrs;
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+						 struct sk_buff *skb,
+						 struct wmi_phyerr_hdr_arg *arg)
+{
+	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	/* 10.4 firmware always reports only one phyerr */
+	arg->num_phyerrs = 1;
+
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = skb->len;
+	arg->phyerrs = skb->data;
+
+	return 0;
+}
+
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
+				 const void *phyerr_buf,
+				 int left_len,
+				 struct wmi_phyerr_ev_arg *arg)
+{
+	const struct wmi_phyerr *phyerr = phyerr_buf;
+	int i;
+
+	if (left_len < sizeof(*phyerr)) {
+		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+			    left_len, sizeof(*phyerr));
+		return -EINVAL;
+	}
+
+	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+	arg->freq1 = __le16_to_cpu(phyerr->freq1);
+	arg->freq2 = __le16_to_cpu(phyerr->freq2);
+	arg->rssi_combined = phyerr->rssi_combined;
+	arg->chan_width_mhz = phyerr->chan_width_mhz;
+	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+	arg->buf = phyerr->buf;
+	arg->hdr_len = sizeof(*phyerr);
+
+	for (i = 0; i < 4; i++)
+		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+	switch (phyerr->phy_err_code) {
+	case PHY_ERROR_GEN_SPECTRAL_SCAN:
+		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+		break;
+	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
+		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
+		break;
+	case PHY_ERROR_GEN_RADAR:
+		arg->phy_err_code = PHY_ERROR_RADAR;
+		break;
+	default:
+		arg->phy_err_code = PHY_ERROR_UNKNOWN;
+		break;
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
+					     const void *phyerr_buf,
+					     int left_len,
+					     struct wmi_phyerr_ev_arg *arg)
+{
+	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
+	u32 phy_err_mask;
+	int i;
+
+	if (left_len < sizeof(*phyerr)) {
+		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+			    left_len, sizeof(*phyerr));
+		return -EINVAL;
+	}
+
+	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+	arg->freq1 = __le16_to_cpu(phyerr->freq1);
+	arg->freq2 = __le16_to_cpu(phyerr->freq2);
+	arg->rssi_combined = phyerr->rssi_combined;
+	arg->chan_width_mhz = phyerr->chan_width_mhz;
+	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+	arg->buf = phyerr->buf;
+	arg->hdr_len = sizeof(*phyerr);
+
+	for (i = 0; i < 4; i++)
+		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
+
+	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
+		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
+		arg->phy_err_code = PHY_ERROR_RADAR;
+	else
+		arg->phy_err_code = PHY_ERROR_UNKNOWN;
+
+	return 0;
+}
+
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_phyerr_hdr_arg hdr_arg = {};
+	struct wmi_phyerr_ev_arg phyerr_arg = {};
+	const void *phyerr;
+	u32 count, i, buf_len, phy_err_code;
+	u64 tsf;
+	int left_len, ret;
+
+	ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
+		return;
+	}
+
+	/* Check number of included events */
+	count = hdr_arg.num_phyerrs;
+
+	left_len = hdr_arg.buf_len;
+
+	tsf = hdr_arg.tsf_u32;
+	tsf <<= 32;
+	tsf |= hdr_arg.tsf_l32;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event phyerr count %d tsf64 0x%llX\n",
+		   count, tsf);
+
+	phyerr = hdr_arg.phyerrs;
+	for (i = 0; i < count; i++) {
+		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
+		if (ret) {
+			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
+				    i);
+			return;
+		}
+
+		left_len -= phyerr_arg.hdr_len;
+		buf_len = phyerr_arg.buf_len;
+		phy_err_code = phyerr_arg.phy_err_code;
+
+		if (left_len < buf_len) {
+			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
+			return;
+		}
+
+		left_len -= buf_len;
+
+		switch (phy_err_code) {
+		case PHY_ERROR_RADAR:
+			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+			break;
+		case PHY_ERROR_SPECTRAL_SCAN:
+			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
+			break;
+		case PHY_ERROR_FALSE_RADAR_EXT:
+			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
+			break;
+		default:
+			break;
+		}
+
+		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
+	}
+}
+
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_roam_ev_arg arg = {};
+	int ret;
+	u32 vdev_id;
+	u32 reason;
+	s32 rssi;
+
+	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+		return;
+	}
+
+	vdev_id = __le32_to_cpu(arg.vdev_id);
+	reason = __le32_to_cpu(arg.reason);
+	rssi = __le32_to_cpu(arg.rssi);
+	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+		   vdev_id, reason, rssi);
+
+	if (reason >= WMI_ROAM_REASON_MAX)
+		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+			    reason, vdev_id);
+
+	switch (reason) {
+	case WMI_ROAM_REASON_BEACON_MISS:
+		ath10k_mac_handle_beacon_miss(ar, vdev_id);
+		break;
+	case WMI_ROAM_REASON_BETTER_AP:
+	case WMI_ROAM_REASON_LOW_RSSI:
+	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+	case WMI_ROAM_REASON_HO_FAILED:
+		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+			    reason, vdev_id);
+		break;
+	}
+}
+
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
+{
+	char buf[101], c;
+	int i;
+
+	for (i = 0; i < sizeof(buf) - 1; i++) {
+		if (i >= skb->len)
+			break;
+
+		c = skb->data[i];
+
+		if (c == '\0')
+			break;
+
+		if (isascii(c) && isprint(c))
+			buf[i] = c;
+		else
+			buf[i] = '.';
+	}
+
+	if (i == sizeof(buf) - 1)
+		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
+
+	/* for some reason the debug prints end with \n, remove that */
+	if (skb->data[i - 1] == '\n')
+		i--;
+
+	/* the last byte is always reserved for the null character */
+	buf[i] = '\0';
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
+}
+
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_wow_ev_arg ev = {};
+	int ret;
+
+	complete(&ar->wow.wakeup_completed);
+
+	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+		   wow_reason(ev.wake_reason));
+}
+
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
+				     struct wmi_pdev_tpc_config_event *ev,
+				     u32 rate_idx, u32 num_chains,
+				     u32 rate_code, u8 type)
+{
+	u8 tpc, num_streams, preamble, ch, stm_idx;
+
+	num_streams = ATH10K_HW_NSS(rate_code);
+	preamble = ATH10K_HW_PREAMBLE(rate_code);
+	ch = num_chains - 1;
+
+	tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
+
+	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+		goto out;
+
+	if (preamble == WMI_RATE_PREAMBLE_CCK)
+		goto out;
+
+	stm_idx = num_streams - 1;
+	if (num_chains <= num_streams)
+		goto out;
+
+	switch (type) {
+	case WMI_TPC_TABLE_TYPE_STBC:
+		tpc = min_t(u8, tpc,
+			    ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
+		break;
+	case WMI_TPC_TABLE_TYPE_TXBF:
+		tpc = min_t(u8, tpc,
+			    ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
+		break;
+	case WMI_TPC_TABLE_TYPE_CDD:
+		tpc = min_t(u8, tpc,
+			    ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
+		break;
+	default:
+		ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
+		tpc = 0;
+		break;
+	}
+
+out:
+	return tpc;
+}
+
+static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+					  struct wmi_pdev_tpc_config_event *ev,
+					  struct ath10k_tpc_stats *tpc_stats,
+					  u8 *rate_code, u16 *pream_table, u8 type)
+{
+	u32 i, j, pream_idx, flags;
+	u8 tpc[WMI_TPC_TX_N_CHAIN];
+	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+	char buff[WMI_TPC_BUF_SIZE];
+
+	flags = __le32_to_cpu(ev->flags);
+
+	switch (type) {
+	case WMI_TPC_TABLE_TYPE_CDD:
+		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+			return;
+		}
+		break;
+	case WMI_TPC_TABLE_TYPE_STBC:
+		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+			return;
+		}
+		break;
+	case WMI_TPC_TABLE_TYPE_TXBF:
+		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+			return;
+		}
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "invalid table type in wmi tpc event: %d\n", type);
+		return;
+	}
+
+	pream_idx = 0;
+	for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+		memset(tpc_value, 0, sizeof(tpc_value));
+		memset(buff, 0, sizeof(buff));
+		if (i == pream_table[pream_idx])
+			pream_idx++;
+
+		for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+			if (j >= __le32_to_cpu(ev->num_tx_chain))
+				break;
+
+			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+							    rate_code[i],
+							    type);
+			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+			strncat(tpc_value, buff, strlen(buff));
+		}
+		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
+		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
+		memcpy(tpc_stats->tpc_table[type].tpc_value[i],
+		       tpc_value, sizeof(tpc_value));
+	}
+}
+
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+{
+	u32 i, j, pream_idx, num_tx_chain;
+	u8 rate_code[WMI_TPC_RATE_MAX], rate_idx;
+	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+	struct wmi_pdev_tpc_config_event *ev;
+	struct ath10k_tpc_stats *tpc_stats;
+
+	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
+
+	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+	if (!tpc_stats)
+		return;
+
+	/* Create the rate code table based on the chains supported */
+	rate_idx = 0;
+	pream_idx = 0;
+
+	/* Fill CCK rate code */
+	for (i = 0; i < 4; i++) {
+		rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
+		rate_idx++;
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill OFDM rate code */
+	for (i = 0; i < 8; i++) {
+		rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
+		rate_idx++;
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+
+	/* Fill HT20 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 8; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill HT40 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 8; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill VHT20 rate code */
+	for (i = 0; i < __le32_to_cpu(ev->num_tx_chain); i++) {
+		for (j = 0; j < 10; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill VHT40 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 10; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill VHT80 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 10; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+
+	pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
+
+	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+	tpc_stats->twice_antenna_reduction =
+		__le32_to_cpu(ev->twice_antenna_reduction);
+	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+	tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+	tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+
+	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+				      rate_code, pream_table,
+				      WMI_TPC_TABLE_TYPE_CDD);
+	ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
+				      rate_code, pream_table,
+				      WMI_TPC_TABLE_TYPE_STBC);
+	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+				      rate_code, pream_table,
+				      WMI_TPC_TABLE_TYPE_TXBF);
+
+	ath10k_debug_tpc_stats_process(ar, tpc_stats);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+		   __le32_to_cpu(ev->chan_freq),
+		   __le32_to_cpu(ev->phy_mode),
+		   __le32_to_cpu(ev->ctl),
+		   __le32_to_cpu(ev->reg_domain),
+		   a_sle32_to_cpu(ev->twice_antenna_gain),
+		   __le32_to_cpu(ev->twice_antenna_reduction),
+		   __le32_to_cpu(ev->power_limit),
+		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
+		   __le32_to_cpu(ev->num_tx_chain),
+		   __le32_to_cpu(ev->rate_max));
+}
+
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+						struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
+{
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+				     u32 num_units, u32 unit_len)
+{
+	dma_addr_t paddr;
+	u32 pool_size;
+	int idx = ar->wmi.num_mem_chunks;
+
+	pool_size = num_units * round_up(unit_len, 4);
+
+	if (!pool_size)
+		return -EINVAL;
+
+	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+							   pool_size,
+							   &paddr,
+							   GFP_KERNEL);
+	if (!ar->wmi.mem_chunks[idx].vaddr) {
+		ath10k_warn(ar, "failed to allocate memory chunk\n");
+		return -ENOMEM;
+	}
+
+	memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+	ar->wmi.mem_chunks[idx].paddr = paddr;
+	ar->wmi.mem_chunks[idx].len = pool_size;
+	ar->wmi.mem_chunks[idx].req_id = req_id;
+	ar->wmi.num_mem_chunks++;
+
+	return 0;
+}
+
+static bool
+ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
+				 const struct wlan_host_mem_req **mem_reqs,
+				 u32 num_mem_reqs)
+{
+	u32 req_id, num_units, unit_size, num_unit_info;
+	u32 pool_size;
+	int i, j;
+	bool found;
+
+	if (ar->wmi.num_mem_chunks != num_mem_reqs)
+		return false;
+
+	for (i = 0; i < num_mem_reqs; ++i) {
+		req_id = __le32_to_cpu(mem_reqs[i]->req_id);
+		num_units = __le32_to_cpu(mem_reqs[i]->num_units);
+		unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
+		num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
+
+		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+			if (ar->num_active_peers)
+				num_units = ar->num_active_peers + 1;
+			else
+				num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+			num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+			num_units = ar->max_num_vdevs + 1;
+		}
+
+		found = false;
+		for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
+			if (ar->wmi.mem_chunks[j].req_id == req_id) {
+				pool_size = num_units * round_up(unit_size, 4);
+				if (ar->wmi.mem_chunks[j].len == pool_size) {
+					found = true;
+					break;
+				}
+			}
+		}
+		if (!found)
+			return false;
+	}
+
+	return true;
+}
+
+static int
+ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+				   struct wmi_svc_rdy_ev_arg *arg)
+{
+	struct wmi_service_ready_event *ev;
+	size_t i, n;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	ev = (void *)skb->data;
+	skb_pull(skb, sizeof(*ev));
+	arg->min_tx_power = ev->hw_min_tx_power;
+	arg->max_tx_power = ev->hw_max_tx_power;
+	arg->ht_cap = ev->ht_cap_info;
+	arg->vht_cap = ev->vht_cap_info;
+	arg->sw_ver0 = ev->sw_version;
+	arg->sw_ver1 = ev->sw_version_1;
+	arg->phy_capab = ev->phy_capability;
+	arg->num_rf_chains = ev->num_rf_chains;
+	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+	arg->num_mem_reqs = ev->num_mem_reqs;
+	arg->service_map = ev->wmi_service_bitmap;
+	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+		  ARRAY_SIZE(arg->mem_reqs));
+	for (i = 0; i < n; i++)
+		arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+	if (skb->len <
+	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+		return -EPROTO;
+
+	return 0;
+}
+
+static int
+ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+				  struct wmi_svc_rdy_ev_arg *arg)
+{
+	struct wmi_10x_service_ready_event *ev;
+	int i, n;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	ev = (void *)skb->data;
+	skb_pull(skb, sizeof(*ev));
+	arg->min_tx_power = ev->hw_min_tx_power;
+	arg->max_tx_power = ev->hw_max_tx_power;
+	arg->ht_cap = ev->ht_cap_info;
+	arg->vht_cap = ev->vht_cap_info;
+	arg->sw_ver0 = ev->sw_version;
+	arg->phy_capab = ev->phy_capability;
+	arg->num_rf_chains = ev->num_rf_chains;
+	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+	arg->num_mem_reqs = ev->num_mem_reqs;
+	arg->service_map = ev->wmi_service_bitmap;
+	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+		  ARRAY_SIZE(arg->mem_reqs));
+	for (i = 0; i < n; i++)
+		arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+	if (skb->len <
+	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+		return -EPROTO;
+
+	return 0;
+}
+
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+	struct sk_buff *skb = ar->svc_rdy_skb;
+	struct wmi_svc_rdy_ev_arg arg = {};
+	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+	int ret;
+	bool allocated;
+
+	if (!skb) {
+		ath10k_warn(ar, "invalid service ready event skb\n");
+		return;
+	}
+
+	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
+		return;
+	}
+
+	memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+	ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
+			   arg.service_map_len);
+
+	ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
+	ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
+	ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
+	ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+	ar->fw_version_major =
+		(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
+	ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
+	ar->fw_version_release =
+		(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
+	ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
+	ar->phy_capability = __le32_to_cpu(arg.phy_capab);
+	ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
+	ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
+
+	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
+			arg.service_map, arg.service_map_len);
+
+	/* only manually set fw features when not using FW IE format */
+	if (ar->fw_api == 1 && ar->fw_version_build > 636)
+		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
+	if (ar->num_rf_chains > ar->max_spatial_stream) {
+		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
+			    ar->num_rf_chains, ar->max_spatial_stream);
+		ar->num_rf_chains = ar->max_spatial_stream;
+	}
+
+	if (!ar->cfg_tx_chainmask) {
+		ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+		ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
+	}
+
+	if (strlen(ar->hw->wiphy->fw_version) == 0) {
+		snprintf(ar->hw->wiphy->fw_version,
+			 sizeof(ar->hw->wiphy->fw_version),
+			 "%u.%u.%u.%u",
+			 ar->fw_version_major,
+			 ar->fw_version_minor,
+			 ar->fw_version_release,
+			 ar->fw_version_build);
+	}
+
+	num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
+	if (num_mem_reqs > WMI_MAX_MEM_REQS) {
+		ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
+			    num_mem_reqs);
+		return;
+	}
+
+	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+				    TARGET_10_4_NUM_VDEVS;
+		ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+				       TARGET_10_4_NUM_VDEVS;
+		ar->num_tids = ar->num_active_peers * 2;
+		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+	}
+
+	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+	 * and WMI_SERVICE_IRAM_TIDS, etc.
+	 */
+
+	allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
+						     num_mem_reqs);
+	if (allocated)
+		goto skip_mem_alloc;
+
+	/* Either this event is received during boot time or there is a change
+	 * in memory requirement from firmware when compared to last request.
+	 * Free any old memory and do a fresh allocation based on the current
+	 * memory requirement.
+	 */
+	ath10k_wmi_free_host_mem(ar);
+
+	for (i = 0; i < num_mem_reqs; ++i) {
+		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
+		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
+		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
+		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
+
+		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+			if (ar->num_active_peers)
+				num_units = ar->num_active_peers + 1;
+			else
+				num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+			/* number of units to allocate is number of
+			 * peers, 1 extra for self peer on target */
+			/* this needs to be tied, host and target
+			 * can get out of sync */
+			num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+			num_units = ar->max_num_vdevs + 1;
+		}
+
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+			   req_id,
+			   __le32_to_cpu(arg.mem_reqs[i]->num_units),
+			   num_unit_info,
+			   unit_size,
+			   num_units);
+
+		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+						unit_size);
+		if (ret)
+			return;
+	}
+
+skip_mem_alloc:
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+		   __le32_to_cpu(arg.min_tx_power),
+		   __le32_to_cpu(arg.max_tx_power),
+		   __le32_to_cpu(arg.ht_cap),
+		   __le32_to_cpu(arg.vht_cap),
+		   __le32_to_cpu(arg.sw_ver0),
+		   __le32_to_cpu(arg.sw_ver1),
+		   __le32_to_cpu(arg.fw_build),
+		   __le32_to_cpu(arg.phy_capab),
+		   __le32_to_cpu(arg.num_rf_chains),
+		   __le32_to_cpu(arg.eeprom_rd),
+		   __le32_to_cpu(arg.num_mem_reqs));
+
+	dev_kfree_skb(skb);
+	ar->svc_rdy_skb = NULL;
+	complete(&ar->wmi.service_ready);
+}
+
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+	ar->svc_rdy_skb = skb;
+	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
+static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+				     struct wmi_rdy_ev_arg *arg)
+{
+	struct wmi_ready_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->sw_version = ev->sw_version;
+	arg->abi_version = ev->abi_version;
+	arg->status = ev->status;
+	arg->mac_addr = ev->mac_addr.addr;
+
+	return 0;
+}
+
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+				      struct wmi_roam_ev_arg *arg)
+{
+	struct wmi_roam_ev *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_id = ev->vdev_id;
+	arg->reason = ev->reason;
+
+	return 0;
+}
+
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_rdy_ev_arg arg = {};
+	int ret;
+
+	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
+	if (ret) {
+		ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+		   __le32_to_cpu(arg.sw_version),
+		   __le32_to_cpu(arg.abi_version),
+		   arg.mac_addr,
+		   __le32_to_cpu(arg.status));
+
+	ether_addr_copy(ar->mac_addr, arg.mac_addr);
+	complete(&ar->wmi.unified_ready);
+	return 0;
+}
+
+static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
+{
+	const struct wmi_pdev_temperature_event *ev;
+
+	ev = (struct wmi_pdev_temperature_event *)skb->data;
+	if (WARN_ON(skb->len < sizeof(*ev)))
+		return -EPROTO;
+
+	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+	return 0;
+}
+
+static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_event_id id;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	switch (id) {
+	case WMI_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
+	case WMI_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_ROAM_EVENTID:
+		ath10k_wmi_event_roam(ar, skb);
+		break;
+	case WMI_PROFILE_MATCH:
+		ath10k_wmi_event_profile_match(ar, skb);
+		break;
+	case WMI_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_PDEV_QVIT_EVENTID:
+		ath10k_wmi_event_pdev_qvit(ar, skb);
+		break;
+	case WMI_WLAN_PROFILE_DATA_EVENTID:
+		ath10k_wmi_event_wlan_profile_data(ar, skb);
+		break;
+	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_measurement_report(ar, skb);
+		break;
+	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_tsf_measurement_report(ar, skb);
+		break;
+	case WMI_RTT_ERROR_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_error_report(ar, skb);
+		break;
+	case WMI_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_wmi_event_wow_wakeup_host(ar, skb);
+		break;
+	case WMI_DCS_INTERFERENCE_EVENTID:
+		ath10k_wmi_event_dcs_interference(ar, skb);
+		break;
+	case WMI_PDEV_TPC_CONFIG_EVENTID:
+		ath10k_wmi_event_pdev_tpc_config(ar, skb);
+		break;
+	case WMI_PDEV_FTM_INTG_EVENTID:
+		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+		break;
+	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+		ath10k_wmi_event_gtk_offload_status(ar, skb);
+		break;
+	case WMI_GTK_REKEY_FAIL_EVENTID:
+		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+		break;
+	case WMI_TX_DELBA_COMPLETE_EVENTID:
+		ath10k_wmi_event_delba_complete(ar, skb);
+		break;
+	case WMI_TX_ADDBA_COMPLETE_EVENTID:
+		ath10k_wmi_event_addba_complete(ar, skb);
+		break;
+	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+		break;
+	case WMI_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_10x_event_id id;
+	bool consumed;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+	/* Ready event must be handled normally also in UTF mode so that we
+	 * know the UTF firmware has booted, others we are just bypass WMI
+	 * events to testmode.
+	 */
+	if (consumed && id != WMI_10X_READY_EVENTID) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi testmode consumed 0x%x\n", id);
+		goto out;
+	}
+
+	switch (id) {
+	case WMI_10X_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_10X_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_10X_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_10X_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_10X_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_10X_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
+	case WMI_10X_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_10X_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_10X_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_10X_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_10X_ROAM_EVENTID:
+		ath10k_wmi_event_roam(ar, skb);
+		break;
+	case WMI_10X_PROFILE_MATCH:
+		ath10k_wmi_event_profile_match(ar, skb);
+		break;
+	case WMI_10X_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_10X_PDEV_QVIT_EVENTID:
+		ath10k_wmi_event_pdev_qvit(ar, skb);
+		break;
+	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+		ath10k_wmi_event_wlan_profile_data(ar, skb);
+		break;
+	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_measurement_report(ar, skb);
+		break;
+	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_tsf_measurement_report(ar, skb);
+		break;
+	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_error_report(ar, skb);
+		break;
+	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_wmi_event_wow_wakeup_host(ar, skb);
+		break;
+	case WMI_10X_DCS_INTERFERENCE_EVENTID:
+		ath10k_wmi_event_dcs_interference(ar, skb);
+		break;
+	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+		ath10k_wmi_event_pdev_tpc_config(ar, skb);
+		break;
+	case WMI_10X_INST_RSSI_STATS_EVENTID:
+		ath10k_wmi_event_inst_rssi_stats(ar, skb);
+		break;
+	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+		ath10k_wmi_event_vdev_standby_req(ar, skb);
+		break;
+	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+		ath10k_wmi_event_vdev_resume_req(ar, skb);
+		break;
+	case WMI_10X_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_10X_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	case WMI_10X_PDEV_UTF_EVENTID:
+		/* ignore utf events */
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_10_2_event_id id;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	switch (id) {
+	case WMI_10_2_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_10_2_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_10_2_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_10_2_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_10_2_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_10_2_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
+	case WMI_10_2_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_10_2_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_10_2_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_10_2_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_10_2_ROAM_EVENTID:
+		ath10k_wmi_event_roam(ar, skb);
+		break;
+	case WMI_10_2_PROFILE_MATCH:
+		ath10k_wmi_event_profile_match(ar, skb);
+		break;
+	case WMI_10_2_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_10_2_PDEV_QVIT_EVENTID:
+		ath10k_wmi_event_pdev_qvit(ar, skb);
+		break;
+	case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
+		ath10k_wmi_event_wlan_profile_data(ar, skb);
+		break;
+	case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_measurement_report(ar, skb);
+		break;
+	case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
+		ath10k_wmi_event_tsf_measurement_report(ar, skb);
+		break;
+	case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
+		ath10k_wmi_event_rtt_error_report(ar, skb);
+		break;
+	case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_wmi_event_wow_wakeup_host(ar, skb);
+		break;
+	case WMI_10_2_DCS_INTERFERENCE_EVENTID:
+		ath10k_wmi_event_dcs_interference(ar, skb);
+		break;
+	case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
+		ath10k_wmi_event_pdev_tpc_config(ar, skb);
+		break;
+	case WMI_10_2_INST_RSSI_STATS_EVENTID:
+		ath10k_wmi_event_inst_rssi_stats(ar, skb);
+		break;
+	case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
+		ath10k_wmi_event_vdev_standby_req(ar, skb);
+		break;
+	case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
+		ath10k_wmi_event_vdev_resume_req(ar, skb);
+		break;
+	case WMI_10_2_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_10_2_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
+		ath10k_wmi_event_temperature(ar, skb);
+		break;
+	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
+	case WMI_10_2_GPIO_INPUT_EVENTID:
+	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
+	case WMI_10_2_GENERIC_BUFFER_EVENTID:
+	case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
+	case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
+	case WMI_10_2_WDS_PEER_EVENTID:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received event id %d not implemented\n", id);
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_10_4_event_id id;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	switch (id) {
+	case WMI_10_4_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_10_4_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_10_4_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_10_4_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_10_4_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_10_4_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_10_4_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_10_4_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_10_4_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_10_4_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_10_4_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_10_4_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received event id %d not implemented\n", id);
+		break;
+	case WMI_10_4_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	int ret;
+
+	ret = ath10k_wmi_rx(ar, skb);
+	if (ret)
+		ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
+}
+
+int ath10k_wmi_connect(struct ath10k *ar)
+{
+	int status;
+	struct ath10k_htc_svc_conn_req conn_req;
+	struct ath10k_htc_svc_conn_resp conn_resp;
+
+	memset(&conn_req, 0, sizeof(conn_req));
+	memset(&conn_resp, 0, sizeof(conn_resp));
+
+	/* these fields are the same for all service endpoints */
+	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
+
+	/* connect to control service */
+	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+	if (status) {
+		ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
+			    status);
+		return status;
+	}
+
+	ar->wmi.eid = conn_resp.eid;
+	return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+			      u16 ctl2g, u16 ctl5g,
+			      enum wmi_dfs_region dfs_reg)
+{
+	struct wmi_pdev_set_regdomain_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+	cmd->reg_domain = __cpu_to_le32(rd);
+	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+		   rd, rd2g, rd5g, ctl2g, ctl5g);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
+				  rd5g, u16 ctl2g, u16 ctl5g,
+				  enum wmi_dfs_region dfs_reg)
+{
+	struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+	cmd->reg_domain = __cpu_to_le32(rd);
+	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+	cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+	struct wmi_pdev_suspend_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+	cmd->suspend_opt = __cpu_to_le32(suspend_opt);
+
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, 0);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+	struct wmi_pdev_set_param_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+		ath10k_warn(ar, "pdev param %d not supported by firmware\n",
+			    id);
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+	cmd->param_id    = __cpu_to_le32(id);
+	cmd->param_value = __cpu_to_le32(value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+		   id, value);
+	return skb;
+}
+
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+				    struct wmi_host_mem_chunks *chunks)
+{
+	struct host_memory_chunk *chunk;
+	int i;
+
+	chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+		chunk = &chunks->items[i];
+		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi chunk %d len %d requested, addr 0x%llx\n",
+			   i,
+			   ar->wmi.mem_chunks[i].len,
+			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
+	}
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config config = {};
+	u32 len, val;
+
+	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
+	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+	config.num_offload_reorder_bufs =
+		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+	config.scan_max_pending_reqs =
+		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+	config.bmiss_offload_max_vdev =
+		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_vdev =
+		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_ap_profiles =
+		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+	config.gtk_offload_max_vdev =
+		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(ar, len);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_init_cmd *)buf->data;
+
+	memcpy(&cmd->resource_config, &config, sizeof(config));
+	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
+	return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd_10x *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config_10x config = {};
+	u32 len, val;
+
+	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+	config.scan_max_pending_reqs =
+		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+	config.bmiss_offload_max_vdev =
+		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_vdev =
+		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_ap_profiles =
+		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(ar, len);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+	memcpy(&cmd->resource_config, &config, sizeof(config));
+	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
+	return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd_10_2 *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config_10x config = {};
+	u32 len, val, features;
+
+	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+
+	config.scan_max_pending_reqs =
+		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+	config.bmiss_offload_max_vdev =
+		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_vdev =
+		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+	config.roam_offload_max_ap_profiles =
+		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+	config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
+	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(ar, len);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
+
+	features = WMI_10_2_RX_BATCH_MODE;
+	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+		features |= WMI_10_2_COEX_GPIO;
+	cmd->resource_config.feature_mask = __cpu_to_le32(features);
+
+	memcpy(&cmd->resource_config.common, &config, sizeof(config));
+	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
+	return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd_10_4 *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config_10_4 config = {};
+	u32 len;
+
+	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+	config.num_peers = __cpu_to_le32(ar->max_num_peers);
+	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+	config.num_tids = __cpu_to_le32(ar->num_tids);
+
+	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+	config.num_offload_reorder_buffs =
+			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+	config.tx_chain_mask  = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+	config.rx_chain_mask  = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+	config.rx_decap_mode	    = __cpu_to_le32(ar->wmi.rx_decap_mode);
+	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+	config.bmiss_offload_max_vdev =
+			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+	config.roam_offload_max_vdev  =
+			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+	config.roam_offload_max_ap_profiles =
+			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+	config.rx_skip_defrag_timeout_dup_detection_check =
+	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+	config.gtk_offload_max_vdev =
+			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+	config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+	config.max_peer_ext_stats =
+			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+	config.tt_support =
+			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(ar, len);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+	memcpy(&cmd->resource_config, &config, sizeof(config));
+	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+	return buf;
+}
+
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
+{
+	if (arg->ie_len && !arg->ie)
+		return -EINVAL;
+	if (arg->n_channels && !arg->channels)
+		return -EINVAL;
+	if (arg->n_ssids && !arg->ssids)
+		return -EINVAL;
+	if (arg->n_bssids && !arg->bssids)
+		return -EINVAL;
+
+	if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+		return -EINVAL;
+	if (arg->n_channels > ARRAY_SIZE(arg->channels))
+		return -EINVAL;
+	if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+		return -EINVAL;
+	if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+		return -EINVAL;
+
+	return 0;
+}
+
+static size_t
+ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
+{
+	int len = 0;
+
+	if (arg->ie_len) {
+		len += sizeof(struct wmi_ie_data);
+		len += roundup(arg->ie_len, 4);
+	}
+
+	if (arg->n_channels) {
+		len += sizeof(struct wmi_chan_list);
+		len += sizeof(__le32) * arg->n_channels;
+	}
+
+	if (arg->n_ssids) {
+		len += sizeof(struct wmi_ssid_list);
+		len += sizeof(struct wmi_ssid) * arg->n_ssids;
+	}
+
+	if (arg->n_bssids) {
+		len += sizeof(struct wmi_bssid_list);
+		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+	}
+
+	return len;
+}
+
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+				      const struct wmi_start_scan_arg *arg)
+{
+	u32 scan_id;
+	u32 scan_req_id;
+
+	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
+	scan_id |= arg->scan_id;
+
+	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+	scan_req_id |= arg->scan_req_id;
+
+	cmn->scan_id            = __cpu_to_le32(scan_id);
+	cmn->scan_req_id        = __cpu_to_le32(scan_req_id);
+	cmn->vdev_id            = __cpu_to_le32(arg->vdev_id);
+	cmn->scan_priority      = __cpu_to_le32(arg->scan_priority);
+	cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+	cmn->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
+	cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+	cmn->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
+	cmn->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
+	cmn->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
+	cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+	cmn->idle_time          = __cpu_to_le32(arg->idle_time);
+	cmn->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
+	cmn->probe_delay        = __cpu_to_le32(arg->probe_delay);
+	cmn->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
+}
+
+static void
+ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+			       const struct wmi_start_scan_arg *arg)
+{
+	struct wmi_ie_data *ie;
+	struct wmi_chan_list *channels;
+	struct wmi_ssid_list *ssids;
+	struct wmi_bssid_list *bssids;
+	void *ptr = tlvs->tlvs;
+	int i;
+
+	if (arg->n_channels) {
+		channels = ptr;
+		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+		channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+		for (i = 0; i < arg->n_channels; i++)
+			channels->channel_list[i].freq =
+				__cpu_to_le16(arg->channels[i]);
+
+		ptr += sizeof(*channels);
+		ptr += sizeof(__le32) * arg->n_channels;
+	}
+
+	if (arg->n_ssids) {
+		ssids = ptr;
+		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+		for (i = 0; i < arg->n_ssids; i++) {
+			ssids->ssids[i].ssid_len =
+				__cpu_to_le32(arg->ssids[i].len);
+			memcpy(&ssids->ssids[i].ssid,
+			       arg->ssids[i].ssid,
+			       arg->ssids[i].len);
+		}
+
+		ptr += sizeof(*ssids);
+		ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
+	}
+
+	if (arg->n_bssids) {
+		bssids = ptr;
+		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+		for (i = 0; i < arg->n_bssids; i++)
+			memcpy(&bssids->bssid_list[i],
+			       arg->bssids[i].bssid,
+			       ETH_ALEN);
+
+		ptr += sizeof(*bssids);
+		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+	}
+
+	if (arg->ie_len) {
+		ie = ptr;
+		ie->tag = __cpu_to_le32(WMI_IE_TAG);
+		ie->ie_len = __cpu_to_le32(arg->ie_len);
+		memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+		ptr += sizeof(*ie);
+		ptr += roundup(arg->ie_len, 4);
+	}
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
+			     const struct wmi_start_scan_arg *arg)
+{
+	struct wmi_start_scan_cmd *cmd;
+	struct sk_buff *skb;
+	size_t len;
+	int ret;
+
+	ret = ath10k_wmi_start_scan_verify(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_start_scan_cmd *)skb->data;
+
+	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+	cmd->burst_duration_ms = __cpu_to_le32(0);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
+				 const struct wmi_start_scan_arg *arg)
+{
+	struct wmi_10x_start_scan_cmd *cmd;
+	struct sk_buff *skb;
+	size_t len;
+	int ret;
+
+	ret = ath10k_wmi_start_scan_verify(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+
+	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+	ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
+	return skb;
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+				struct wmi_start_scan_arg *arg)
+{
+	/* setup commonly used values */
+	arg->scan_req_id = 1;
+	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+	arg->dwell_time_active = 50;
+	arg->dwell_time_passive = 150;
+	arg->min_rest_time = 50;
+	arg->max_rest_time = 500;
+	arg->repeat_probe_time = 0;
+	arg->probe_spacing_time = 0;
+	arg->idle_time = 0;
+	arg->max_scan_time = 20000;
+	arg->probe_delay = 5;
+	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+		| WMI_SCAN_EVENT_COMPLETED
+		| WMI_SCAN_EVENT_BSS_CHANNEL
+		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
+		| WMI_SCAN_EVENT_DEQUEUED;
+	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+	arg->n_bssids = 1;
+	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
+			    const struct wmi_stop_scan_arg *arg)
+{
+	struct wmi_stop_scan_cmd *cmd;
+	struct sk_buff *skb;
+	u32 scan_id;
+	u32 req_id;
+
+	if (arg->req_id > 0xFFF)
+		return ERR_PTR(-EINVAL);
+	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	scan_id = arg->u.scan_id;
+	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+	req_id = arg->req_id;
+	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+	cmd = (struct wmi_stop_scan_cmd *)skb->data;
+	cmd->req_type    = __cpu_to_le32(arg->req_type);
+	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
+	cmd->scan_id     = __cpu_to_le32(scan_id);
+	cmd->scan_req_id = __cpu_to_le32(req_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+		   arg->req_id, arg->req_type, arg->u.scan_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
+			      enum wmi_vdev_type type,
+			      enum wmi_vdev_subtype subtype,
+			      const u8 macaddr[ETH_ALEN])
+{
+	struct wmi_vdev_create_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_create_cmd *)skb->data;
+	cmd->vdev_id      = __cpu_to_le32(vdev_id);
+	cmd->vdev_type    = __cpu_to_le32(type);
+	cmd->vdev_subtype = __cpu_to_le32(subtype);
+	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+		   vdev_id, type, subtype, macaddr);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+	struct wmi_vdev_delete_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "WMI vdev delete id %d\n", vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
+			     const struct wmi_vdev_start_request_arg *arg,
+			     bool restart)
+{
+	struct wmi_vdev_start_request_cmd *cmd;
+	struct sk_buff *skb;
+	const char *cmdname;
+	u32 flags = 0;
+
+	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+		return ERR_PTR(-EINVAL);
+
+	if (restart)
+		cmdname = "restart";
+	else
+		cmdname = "start";
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	if (arg->hidden_ssid)
+		flags |= WMI_VDEV_START_HIDDEN_SSID;
+	if (arg->pmf_enabled)
+		flags |= WMI_VDEV_START_PMF_ENABLED;
+
+	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
+	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
+	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
+	cmd->flags           = __cpu_to_le32(flags);
+	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
+	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
+
+	if (arg->ssid) {
+		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+	}
+
+	ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
+		   cmdname, arg->vdev_id,
+		   flags, arg->channel.freq, arg->channel.mode,
+		   cmd->chan.flags, arg->channel.max_power);
+
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+	struct wmi_vdev_stop_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+			  const u8 *bssid)
+{
+	struct wmi_vdev_up_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_up_cmd *)skb->data;
+	cmd->vdev_id       = __cpu_to_le32(vdev_id);
+	cmd->vdev_assoc_id = __cpu_to_le32(aid);
+	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+		   vdev_id, aid, bssid);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+	struct wmi_vdev_down_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_down_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+				 u32 param_id, u32 param_value)
+{
+	struct wmi_vdev_set_param_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "vdev param %d not supported by firmware\n",
+			    param_id);
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+	cmd->vdev_id     = __cpu_to_le32(vdev_id);
+	cmd->param_id    = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(param_value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi vdev id 0x%x set param %d value %d\n",
+		   vdev_id, param_id, param_value);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
+				   const struct wmi_vdev_install_key_arg *arg)
+{
+	struct wmi_vdev_install_key_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+		return ERR_PTR(-EINVAL);
+	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
+	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
+	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
+	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
+	cmd->key_len       = __cpu_to_le32(arg->key_len);
+	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+	if (arg->macaddr)
+		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+	if (arg->key_data)
+		memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi vdev install key idx %d cipher %d len %d\n",
+		   arg->key_idx, arg->key_cipher, arg->key_len);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
+				     const struct wmi_vdev_spectral_conf_arg *arg)
+{
+	struct wmi_vdev_spectral_conf_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+	cmd->scan_count = __cpu_to_le32(arg->scan_count);
+	cmd->scan_period = __cpu_to_le32(arg->scan_period);
+	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+				       u32 trigger, u32 enable)
+{
+	struct wmi_vdev_spectral_enable_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->trigger_cmd = __cpu_to_le32(trigger);
+	cmd->enable_cmd = __cpu_to_le32(enable);
+
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+			      const u8 peer_addr[ETH_ALEN],
+			      enum wmi_peer_type peer_type)
+{
+	struct wmi_peer_create_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_peer_create_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer create vdev_id %d peer_addr %pM\n",
+		   vdev_id, peer_addr);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+			      const u8 peer_addr[ETH_ALEN])
+{
+	struct wmi_peer_delete_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_peer_delete_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer delete vdev_id %d peer_addr %pM\n",
+		   vdev_id, peer_addr);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+			     const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+	struct wmi_peer_flush_tids_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+	cmd->vdev_id         = __cpu_to_le32(vdev_id);
+	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+		   vdev_id, peer_addr, tid_bitmap);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+				 const u8 *peer_addr,
+				 enum wmi_peer_param param_id,
+				 u32 param_value)
+{
+	struct wmi_peer_set_param_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+	cmd->vdev_id     = __cpu_to_le32(vdev_id);
+	cmd->param_id    = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(param_value);
+	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
+		   vdev_id, peer_addr, param_id, param_value);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+			     enum wmi_sta_ps_mode psmode)
+{
+	struct wmi_sta_powersave_mode_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+	cmd->vdev_id     = __cpu_to_le32(vdev_id);
+	cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi set powersave id 0x%x mode %d\n",
+		   vdev_id, psmode);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+			     enum wmi_sta_powersave_param param_id,
+			     u32 value)
+{
+	struct wmi_sta_powersave_param_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+	cmd->vdev_id     = __cpu_to_le32(vdev_id);
+	cmd->param_id    = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(value);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+		   vdev_id, param_id, value);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+			    enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+	struct wmi_ap_ps_peer_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (!mac)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->param_id = __cpu_to_le32(param_id);
+	cmd->param_value = __cpu_to_le32(value);
+	ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+		   vdev_id, param_id, value, mac);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
+				 const struct wmi_scan_chan_list_arg *arg)
+{
+	struct wmi_scan_chan_list_cmd *cmd;
+	struct sk_buff *skb;
+	struct wmi_channel_arg *ch;
+	struct wmi_channel *ci;
+	int len;
+	int i;
+
+	len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-EINVAL);
+
+	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+	for (i = 0; i < arg->n_channels; i++) {
+		ch = &arg->channels[i];
+		ci = &cmd->chan_info[i];
+
+		ath10k_wmi_put_wmi_channel(ci, ch);
+	}
+
+	return skb;
+}
+
+static void
+ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
+			   const struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
+
+	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
+	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
+	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
+	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
+	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
+	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
+	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
+	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
+	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
+	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
+	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
+
+	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+
+	cmd->peer_legacy_rates.num_rates =
+		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
+	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+	       arg->peer_legacy_rates.num_rates);
+
+	cmd->peer_ht_rates.num_rates =
+		__cpu_to_le32(arg->peer_ht_rates.num_rates);
+	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+	       arg->peer_ht_rates.num_rates);
+
+	cmd->peer_vht_rates.rx_max_rate =
+		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+	cmd->peer_vht_rates.rx_mcs_set =
+		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+	cmd->peer_vht_rates.tx_max_rate =
+		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+	cmd->peer_vht_rates.tx_mcs_set =
+		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
+				const struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
+
+	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+	memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
+				const struct wmi_peer_assoc_complete_arg *arg)
+{
+	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
+				const struct wmi_peer_assoc_complete_arg *arg)
+{
+	struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
+	int max_mcs, max_nss;
+	u32 info0;
+
+	/* TODO: Is using max values okay with firmware? */
+	max_mcs = 0xf;
+	max_nss = 0xf;
+
+	info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
+		SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
+
+	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+	cmd->info0 = __cpu_to_le32(info0);
+}
+
+static int
+ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
+{
+	if (arg->peer_mpdu_density > 16)
+		return -EINVAL;
+	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+		return -EINVAL;
+	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
+			     const struct wmi_peer_assoc_complete_arg *arg)
+{
+	size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
+	struct sk_buff *skb;
+	int ret;
+
+	ret = ath10k_wmi_peer_assoc_check_arg(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer assoc vdev %d addr %pM (%s)\n",
+		   arg->vdev_id, arg->addr,
+		   arg->peer_reassoc ? "reassociate" : "new");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
+				  const struct wmi_peer_assoc_complete_arg *arg)
+{
+	size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
+	struct sk_buff *skb;
+	int ret;
+
+	ret = ath10k_wmi_peer_assoc_check_arg(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer assoc vdev %d addr %pM (%s)\n",
+		   arg->vdev_id, arg->addr,
+		   arg->peer_reassoc ? "reassociate" : "new");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
+				  const struct wmi_peer_assoc_complete_arg *arg)
+{
+	size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
+	struct sk_buff *skb;
+	int ret;
+
+	ret = ath10k_wmi_peer_assoc_check_arg(arg);
+	if (ret)
+		return ERR_PTR(ret);
+
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi peer assoc vdev %d addr %pM (%s)\n",
+		   arg->vdev_id, arg->addr,
+		   arg->peer_reassoc ? "reassociate" : "new");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, 0);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
+	return skb;
+}
+
+/* This function assumes the beacon is already DMA mapped */
+static struct sk_buff *
+ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
+			     size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
+			     bool deliver_cab)
+{
+	struct wmi_bcn_tx_ref_cmd *cmd;
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	hdr = (struct ieee80211_hdr *)bcn;
+	fc = le16_to_cpu(hdr->frame_control);
+
+	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->data_len = __cpu_to_le32(bcn_len);
+	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+	cmd->msdu_id = 0;
+	cmd->frame_control = __cpu_to_le32(fc);
+	cmd->flags = 0;
+	cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
+
+	if (dtim_zero)
+		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+	if (deliver_cab)
+		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+	return skb;
+}
+
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+			      const struct wmi_wmm_params_arg *arg)
+{
+	params->cwmin  = __cpu_to_le32(arg->cwmin);
+	params->cwmax  = __cpu_to_le32(arg->cwmax);
+	params->aifs   = __cpu_to_le32(arg->aifs);
+	params->txop   = __cpu_to_le32(arg->txop);
+	params->acm    = __cpu_to_le32(arg->acm);
+	params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
+			       const struct wmi_wmm_params_all_arg *arg)
+{
+	struct wmi_pdev_set_wmm_params *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+	ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+	ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+	ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+	ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+	struct wmi_request_stats_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_request_stats_cmd *)skb->data;
+	cmd->stats_id = __cpu_to_le32(stats_mask);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+		   stats_mask);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
+				enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+	struct wmi_force_fw_hang_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+	cmd->type = __cpu_to_le32(type);
+	cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+		   type, delay_ms);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+			     u32 log_level)
+{
+	struct wmi_dbglog_cfg_cmd *cmd;
+	struct sk_buff *skb;
+	u32 cfg;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+	if (module_enable) {
+		cfg = SM(log_level,
+			 ATH10K_DBGLOG_CFG_LOG_LVL);
+	} else {
+		/* set back defaults, all modules with WARN level */
+		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+			 ATH10K_DBGLOG_CFG_LOG_LVL);
+		module_enable = ~0;
+	}
+
+	cmd->module_enable = __cpu_to_le32(module_enable);
+	cmd->module_valid = __cpu_to_le32(~0);
+	cmd->config_enable = __cpu_to_le32(cfg);
+	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+		   __le32_to_cpu(cmd->module_enable),
+		   __le32_to_cpu(cmd->module_valid),
+		   __le32_to_cpu(cmd->config_enable),
+		   __le32_to_cpu(cmd->config_valid));
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+{
+	struct wmi_pdev_pktlog_enable_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ev_bitmap &= ATH10K_PKTLOG_ANY;
+
+	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
+	cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
+		   ev_bitmap);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
+{
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, 0);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+				      u32 duration, u32 next_offset,
+				      u32 enabled)
+{
+	struct wmi_pdev_set_quiet_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
+	cmd->period = __cpu_to_le32(period);
+	cmd->duration = __cpu_to_le32(duration);
+	cmd->next_start = __cpu_to_le32(next_offset);
+	cmd->enabled = __cpu_to_le32(enabled);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi quiet param: period %u duration %u enabled %d\n",
+		   period, duration, enabled);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
+				   const u8 *mac)
+{
+	struct wmi_addba_clear_resp_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (!mac)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+		   vdev_id, mac);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+			     u32 tid, u32 buf_size)
+{
+	struct wmi_addba_send_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (!mac)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_addba_send_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, mac);
+	cmd->tid = __cpu_to_le32(tid);
+	cmd->buffersize = __cpu_to_le32(buf_size);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+		   vdev_id, mac, tid, buf_size);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+				 u32 tid, u32 status)
+{
+	struct wmi_addba_setresponse_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (!mac)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, mac);
+	cmd->tid = __cpu_to_le32(tid);
+	cmd->statuscode = __cpu_to_le32(status);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+		   vdev_id, mac, tid, status);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+			     u32 tid, u32 initiator, u32 reason)
+{
+	struct wmi_delba_send_cmd *cmd;
+	struct sk_buff *skb;
+
+	if (!mac)
+		return ERR_PTR(-EINVAL);
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_delba_send_cmd *)skb->data;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	ether_addr_copy(cmd->peer_macaddr.addr, mac);
+	cmd->tid = __cpu_to_le32(tid);
+	cmd->initiator = __cpu_to_le32(initiator);
+	cmd->reasoncode = __cpu_to_le32(reason);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+		   vdev_id, mac, tid, initiator, reason);
+	return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+	struct wmi_pdev_get_tpc_config_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
+	cmd->param = __cpu_to_le32(param);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev get tcp config param:%d\n", param);
+	return skb;
+}
+
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
+{
+	struct ath10k_fw_stats_peer *i;
+	size_t num = 0;
+
+	list_for_each_entry(i, head, list)
+		++num;
+
+	return num;
+}
+
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+	struct ath10k_fw_stats_vdev *i;
+	size_t num = 0;
+
+	list_for_each_entry(i, head, list)
+		++num;
+
+	return num;
+}
+
+static void
+ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				   char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n",
+			"ath10k PDEV stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			"=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Channel noise floor", pdev->ch_noise_floor);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Channel TX power", pdev->chan_tx_power);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"TX frame count", pdev->tx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX frame count", pdev->rx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX clear count", pdev->rx_clear_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Cycle count", pdev->cycle_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"PHY error count", pdev->phy_err_count);
+
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				    char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RTS bad count", pdev->rts_bad);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RTS good count", pdev->rts_good);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"FCS bad count", pdev->fcs_bad);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"No beacon count", pdev->no_beacons);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"MIB int count", pdev->mib_int_count);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath10k PDEV TX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies queued", pdev->comp_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies disp.", pdev->comp_delivered);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDU queued", pdev->msdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU queued", pdev->mpdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs dropped", pdev->wmm_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local enqued", pdev->local_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local freed", pdev->local_freed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HW queued", pdev->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs reaped", pdev->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Num underruns", pdev->underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs cleaned", pdev->tx_abort);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs requed", pdev->mpdus_requed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Excessive retries", pdev->tx_ko);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HW rate", pdev->data_rc);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Sched self tiggers", pdev->self_triggers);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Dropped due to SW retries",
+			 pdev->sw_retry_failure);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Illegal rate phy errors",
+			 pdev->illgl_rate_phy_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Pdev continuous xretry", pdev->pdev_cont_xretry);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "TX timeout", pdev->pdev_tx_timeout);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PDEV resets", pdev->pdev_resets);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY underrun", pdev->phy_underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU is more than txop limit", pdev->txop_ovf);
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath10k PDEV RX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Mid PPDU route change",
+			 pdev->mid_ppdu_route_change);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Tot. number of statuses", pdev->status_rcvd);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 0", pdev->r0_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 1", pdev->r1_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 2", pdev->r2_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 3", pdev->r3_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to HTT", pdev->htt_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to HTT", pdev->htt_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to stack", pdev->loc_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to stack", pdev->loc_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Oversized AMSUs", pdev->oversize_amsdu);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors", pdev->phy_errs);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors drops", pdev->phy_err_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
+			      char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"vdev id", vdev->vdev_id);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"beacon snr", vdev->beacon_snr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"data snr", vdev->data_snr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rx frames", vdev->num_rx_frames);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rts fail", vdev->num_rts_fail);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rts success", vdev->num_rts_success);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rx err", vdev->num_rx_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rx discard", vdev->num_rx_discard);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num tx not acked", vdev->num_tx_not_acked);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames", i,
+				vdev->num_tx_frames[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames retries", i,
+				vdev->num_tx_frames_retries[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames failures", i,
+				vdev->num_tx_frames_failures[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] 0x%08x\n",
+				"tx rate history", i,
+				vdev->tx_rate_history[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"beacon rssi history", i,
+				vdev->beacon_rssi_history[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
+			      char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+			"Peer MAC address", peer->peer_macaddr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer RSSI", peer->peer_rssi);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer TX rate", peer->peer_tx_rate);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer RX rate", peer->peer_rx_rate);
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf)
+{
+	u32 len = 0;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	const struct ath10k_fw_stats_pdev *pdev;
+	const struct ath10k_fw_stats_vdev *vdev;
+	const struct ath10k_fw_stats_peer *peer;
+	size_t num_peers;
+	size_t num_vdevs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath10k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath10k_warn(ar, "failed to get pdev stats\n");
+		goto unlock;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k VDEV stats", num_vdevs);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k PEER stats", num_peers);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(peer, &fw_stats->peers, list) {
+		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+				     struct ath10k_fw_stats *fw_stats,
+				     char *buf)
+{
+	unsigned int len = 0;
+	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	const struct ath10k_fw_stats_pdev *pdev;
+	const struct ath10k_fw_stats_vdev *vdev;
+	const struct ath10k_fw_stats_peer *peer;
+	size_t num_peers;
+	size_t num_vdevs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath10k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath10k_warn(ar, "failed to get pdev stats\n");
+		goto unlock;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k VDEV stats", num_vdevs);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k PEER stats", num_peers);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(peer, &fw_stats->peers, list) {
+		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+					   u32 detect_level, u32 detect_margin)
+{
+	struct wmi_pdev_set_adaptive_cca_params *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
+	cmd->enable = __cpu_to_le32(enable);
+	cmd->cca_detect_level = __cpu_to_le32(detect_level);
+	cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
+		   enable, detect_level, detect_margin);
+	return skb;
+}
+
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf)
+{
+	u32 len = 0;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	const struct ath10k_fw_stats_pdev *pdev;
+	const struct ath10k_fw_stats_vdev *vdev;
+	const struct ath10k_fw_stats_peer *peer;
+	size_t num_peers;
+	size_t num_vdevs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath10k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath10k_warn(ar, "failed to get pdev stats\n");
+		goto unlock;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"HW paused", pdev->hw_paused);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs posted", pdev->seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs failed queueing", pdev->seq_failed_queueing);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs completed", pdev->seq_completed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs restarted", pdev->seq_restarted);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MU Seqs posted", pdev->mu_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs SW flushed", pdev->mpdus_sw_flush);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs HW filtered", pdev->mpdus_hw_filter);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs truncated", pdev->mpdus_truncated);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs receive no ACK", pdev->mpdus_ack_failed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs expired", pdev->mpdus_expired);
+
+	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Num Rx Overflow errors", pdev->rx_ovfl_errs);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			"ath10k VDEV stats", num_vdevs);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				"=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			"ath10k PEER stats", num_peers);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				"=================");
+
+	list_for_each_entry(peer, &fw_stats->peers, list) {
+		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+static const struct wmi_ops wmi_ops = {
+	.rx = ath10k_wmi_op_rx,
+	.map_svc = wmi_main_svc_map,
+
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_init = ath10k_wmi_op_gen_init,
+	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	/* .gen_vdev_wmm_conf not implemented */
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	/* .gen_pdev_get_temperature not implemented */
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+	/* .gen_bcn_tmpl not implemented */
+	/* .gen_prb_tmpl not implemented */
+	/* .gen_p2p_go_bcn_ie not implemented */
+	/* .gen_adaptive_qcs not implemented */
+	/* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_1_ops = {
+	.rx = ath10k_wmi_10_1_op_rx,
+	.map_svc = wmi_10x_svc_map,
+	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+	.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
+	.gen_init = ath10k_wmi_10_1_op_gen_init,
+	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+	.gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
+	/* .gen_pdev_get_temperature not implemented */
+
+	/* shared with main branch */
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	/* .gen_vdev_wmm_conf not implemented */
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	/* .gen_bcn_tmpl not implemented */
+	/* .gen_prb_tmpl not implemented */
+	/* .gen_p2p_go_bcn_ie not implemented */
+	/* .gen_adaptive_qcs not implemented */
+	/* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_ops = {
+	.rx = ath10k_wmi_10_2_op_rx,
+	.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
+	.gen_init = ath10k_wmi_10_2_op_gen_init,
+	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+	/* .gen_pdev_get_temperature not implemented */
+
+	/* shared with 10.1 */
+	.map_svc = wmi_10x_svc_map,
+	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	/* .gen_vdev_wmm_conf not implemented */
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	/* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_4_ops = {
+	.rx = ath10k_wmi_10_2_op_rx,
+	.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
+	.gen_init = ath10k_wmi_10_2_op_gen_init,
+	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+
+	/* shared with 10.1 */
+	.map_svc = wmi_10x_svc_map,
+	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	.gen_pdev_enable_adaptive_cca =
+		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
+	/* .gen_bcn_tmpl not implemented */
+	/* .gen_prb_tmpl not implemented */
+	/* .gen_p2p_go_bcn_ie not implemented */
+	/* .gen_adaptive_qcs not implemented */
+};
+
+static const struct wmi_ops wmi_10_4_ops = {
+	.rx = ath10k_wmi_10_4_op_rx,
+	.map_svc = wmi_10_4_svc_map,
+
+	.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
+	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_init = ath10k_wmi_10_4_op_gen_init,
+	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+
+	/* shared with 10.2 */
+	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+};
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+	switch (ar->wmi.op_version) {
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->wmi.ops = &wmi_10_4_ops;
+		ar->wmi.cmd = &wmi_10_4_cmd_map;
+		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
+		ar->wmi.ops = &wmi_10_2_4_ops;
+		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_2:
+		ar->wmi.cmd = &wmi_10_2_cmd_map;
+		ar->wmi.ops = &wmi_10_2_ops;
+		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_1:
+		ar->wmi.cmd = &wmi_10x_cmd_map;
+		ar->wmi.ops = &wmi_10_1_ops;
+		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_MAIN:
+		ar->wmi.cmd = &wmi_cmd_map;
+		ar->wmi.ops = &wmi_ops;
+		ar->wmi.vdev_param = &wmi_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_pdev_param_map;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_TLV:
+		ath10k_wmi_tlv_attach(ar);
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_UNSET:
+	case ATH10K_FW_WMI_OP_VERSION_MAX:
+		ath10k_err(ar, "unsupported WMI op version: %d\n",
+			   ar->wmi.op_version);
+		return -EINVAL;
+	}
+
+	init_completion(&ar->wmi.service_ready);
+	init_completion(&ar->wmi.unified_ready);
+
+	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
+	return 0;
+}
+
+void ath10k_wmi_free_host_mem(struct ath10k *ar)
+{
+	int i;
+
+	/* free the host memory chunks requested by firmware */
+	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+		dma_free_coherent(ar->dev,
+				  ar->wmi.mem_chunks[i].len,
+				  ar->wmi.mem_chunks[i].vaddr,
+				  ar->wmi.mem_chunks[i].paddr);
+	}
+
+	ar->wmi.num_mem_chunks = 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+	cancel_work_sync(&ar->svc_rdy_work);
+
+	if (ar->svc_rdy_skb)
+		dev_kfree_skb(ar->svc_rdy_skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644
index 0000000..72a4ef7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -0,0 +1,6340 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ *    command/event structures. Do not use u8, u16, bool or
+ *    enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ *    using masks if necessary. Do not use the programming language's bit
+ *    field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ *    the u32 variables. Use these macros for set/get of these fields.
+ *    Try to use this to optimize the structure without bloating it with
+ *    u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ *    variable is already 4-byte aligned by virtue of being a u32
+ *    type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ *    using the 2 stars at the begining of C comment instead of one star to
+ *    enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+	__le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK   0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB    0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB  24
+
+#define HTC_PROTOCOL_VERSION    0x0002
+#define WMI_PROTOCOL_VERSION    0x0002
+
+/*
+ * There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+	return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+	return le32_to_cpu((__force __le32)val);
+}
+
+enum wmi_service {
+	WMI_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_SERVICE_SCAN_OFFLOAD,
+	WMI_SERVICE_ROAM_OFFLOAD,
+	WMI_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_SERVICE_STA_PWRSAVE,
+	WMI_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_SERVICE_AP_UAPSD,
+	WMI_SERVICE_AP_DFS,
+	WMI_SERVICE_11AC,
+	WMI_SERVICE_BLOCKACK,
+	WMI_SERVICE_PHYERR,
+	WMI_SERVICE_BCN_FILTER,
+	WMI_SERVICE_RTT,
+	WMI_SERVICE_RATECTRL,
+	WMI_SERVICE_WOW,
+	WMI_SERVICE_RATECTRL_CACHE,
+	WMI_SERVICE_IRAM_TIDS,
+	WMI_SERVICE_ARPNS_OFFLOAD,
+	WMI_SERVICE_NLO,
+	WMI_SERVICE_GTK_OFFLOAD,
+	WMI_SERVICE_SCAN_SCH,
+	WMI_SERVICE_CSA_OFFLOAD,
+	WMI_SERVICE_CHATTER,
+	WMI_SERVICE_COEX_FREQAVOID,
+	WMI_SERVICE_PACKET_POWER_SAVE,
+	WMI_SERVICE_FORCE_FW_HANG,
+	WMI_SERVICE_GPIO,
+	WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+	WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	WMI_SERVICE_STA_KEEP_ALIVE,
+	WMI_SERVICE_TX_ENCAP,
+	WMI_SERVICE_BURST,
+	WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_SERVICE_ROAM_SCAN_OFFLOAD,
+	WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	WMI_SERVICE_EARLY_RX,
+	WMI_SERVICE_STA_SMPS,
+	WMI_SERVICE_FWTEST,
+	WMI_SERVICE_STA_WMMAC,
+	WMI_SERVICE_TDLS,
+	WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+	WMI_SERVICE_ADAPTIVE_OCS,
+	WMI_SERVICE_BA_SSN_SUPPORT,
+	WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+	WMI_SERVICE_WLAN_HB,
+	WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
+	WMI_SERVICE_BATCH_SCAN,
+	WMI_SERVICE_QPOWER,
+	WMI_SERVICE_PLMREQ,
+	WMI_SERVICE_THERMAL_MGMT,
+	WMI_SERVICE_RMC,
+	WMI_SERVICE_MHF_OFFLOAD,
+	WMI_SERVICE_COEX_SAR,
+	WMI_SERVICE_BCN_TXRATE_OVERRIDE,
+	WMI_SERVICE_NAN,
+	WMI_SERVICE_L1SS_STAT,
+	WMI_SERVICE_ESTIMATE_LINKSPEED,
+	WMI_SERVICE_OBSS_SCAN,
+	WMI_SERVICE_TDLS_OFFCHAN,
+	WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+	WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
+	WMI_SERVICE_IBSS_PWRSAVE,
+	WMI_SERVICE_LPASS,
+	WMI_SERVICE_EXTSCAN,
+	WMI_SERVICE_D0WOW,
+	WMI_SERVICE_HSOFFLOAD,
+	WMI_SERVICE_ROAM_HO_OFFLOAD,
+	WMI_SERVICE_RX_FULL_REORDER,
+	WMI_SERVICE_DHCP_OFFLOAD,
+	WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+	WMI_SERVICE_MDNS_OFFLOAD,
+	WMI_SERVICE_SAP_AUTH_OFFLOAD,
+	WMI_SERVICE_ATF,
+	WMI_SERVICE_COEX_GPIO,
+	WMI_SERVICE_ENHANCED_PROXY_STA,
+	WMI_SERVICE_TT,
+	WMI_SERVICE_PEER_CACHING,
+	WMI_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_SERVICE_BSS_CHANNEL_INFO_64,
+
+	/* keep last */
+	WMI_SERVICE_MAX,
+};
+
+enum wmi_10x_service {
+	WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_10X_SERVICE_SCAN_OFFLOAD,
+	WMI_10X_SERVICE_ROAM_OFFLOAD,
+	WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_10X_SERVICE_STA_PWRSAVE,
+	WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_10X_SERVICE_AP_UAPSD,
+	WMI_10X_SERVICE_AP_DFS,
+	WMI_10X_SERVICE_11AC,
+	WMI_10X_SERVICE_BLOCKACK,
+	WMI_10X_SERVICE_PHYERR,
+	WMI_10X_SERVICE_BCN_FILTER,
+	WMI_10X_SERVICE_RTT,
+	WMI_10X_SERVICE_RATECTRL,
+	WMI_10X_SERVICE_WOW,
+	WMI_10X_SERVICE_RATECTRL_CACHE,
+	WMI_10X_SERVICE_IRAM_TIDS,
+	WMI_10X_SERVICE_BURST,
+
+	/* introduced in 10.2 */
+	WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_10X_SERVICE_FORCE_FW_HANG,
+	WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_10X_SERVICE_ATF,
+	WMI_10X_SERVICE_COEX_GPIO,
+};
+
+enum wmi_main_service {
+	WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+	WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+	WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_MAIN_SERVICE_STA_PWRSAVE,
+	WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_MAIN_SERVICE_AP_UAPSD,
+	WMI_MAIN_SERVICE_AP_DFS,
+	WMI_MAIN_SERVICE_11AC,
+	WMI_MAIN_SERVICE_BLOCKACK,
+	WMI_MAIN_SERVICE_PHYERR,
+	WMI_MAIN_SERVICE_BCN_FILTER,
+	WMI_MAIN_SERVICE_RTT,
+	WMI_MAIN_SERVICE_RATECTRL,
+	WMI_MAIN_SERVICE_WOW,
+	WMI_MAIN_SERVICE_RATECTRL_CACHE,
+	WMI_MAIN_SERVICE_IRAM_TIDS,
+	WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+	WMI_MAIN_SERVICE_NLO,
+	WMI_MAIN_SERVICE_GTK_OFFLOAD,
+	WMI_MAIN_SERVICE_SCAN_SCH,
+	WMI_MAIN_SERVICE_CSA_OFFLOAD,
+	WMI_MAIN_SERVICE_CHATTER,
+	WMI_MAIN_SERVICE_COEX_FREQAVOID,
+	WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+	WMI_MAIN_SERVICE_FORCE_FW_HANG,
+	WMI_MAIN_SERVICE_GPIO,
+	WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+	WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+	WMI_MAIN_SERVICE_TX_ENCAP,
+};
+
+enum wmi_10_4_service {
+	WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_10_4_SERVICE_SCAN_OFFLOAD,
+	WMI_10_4_SERVICE_ROAM_OFFLOAD,
+	WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_10_4_SERVICE_STA_PWRSAVE,
+	WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_10_4_SERVICE_AP_UAPSD,
+	WMI_10_4_SERVICE_AP_DFS,
+	WMI_10_4_SERVICE_11AC,
+	WMI_10_4_SERVICE_BLOCKACK,
+	WMI_10_4_SERVICE_PHYERR,
+	WMI_10_4_SERVICE_BCN_FILTER,
+	WMI_10_4_SERVICE_RTT,
+	WMI_10_4_SERVICE_RATECTRL,
+	WMI_10_4_SERVICE_WOW,
+	WMI_10_4_SERVICE_RATECTRL_CACHE,
+	WMI_10_4_SERVICE_IRAM_TIDS,
+	WMI_10_4_SERVICE_BURST,
+	WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_10_4_SERVICE_GTK_OFFLOAD,
+	WMI_10_4_SERVICE_SCAN_SCH,
+	WMI_10_4_SERVICE_CSA_OFFLOAD,
+	WMI_10_4_SERVICE_CHATTER,
+	WMI_10_4_SERVICE_COEX_FREQAVOID,
+	WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+	WMI_10_4_SERVICE_FORCE_FW_HANG,
+	WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_10_4_SERVICE_GPIO,
+	WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+	WMI_10_4_SERVICE_TX_ENCAP,
+	WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	WMI_10_4_SERVICE_EARLY_RX,
+	WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+	WMI_10_4_SERVICE_TT,
+	WMI_10_4_SERVICE_ATF,
+	WMI_10_4_SERVICE_PEER_CACHING,
+	WMI_10_4_SERVICE_COEX_GPIO,
+	WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
+static inline char *wmi_service_name(int service_id)
+{
+#define SVCSTR(x) case x: return #x
+
+	switch (service_id) {
+	SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
+	SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
+	SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
+	SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
+	SVCSTR(WMI_SERVICE_STA_PWRSAVE);
+	SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+	SVCSTR(WMI_SERVICE_AP_UAPSD);
+	SVCSTR(WMI_SERVICE_AP_DFS);
+	SVCSTR(WMI_SERVICE_11AC);
+	SVCSTR(WMI_SERVICE_BLOCKACK);
+	SVCSTR(WMI_SERVICE_PHYERR);
+	SVCSTR(WMI_SERVICE_BCN_FILTER);
+	SVCSTR(WMI_SERVICE_RTT);
+	SVCSTR(WMI_SERVICE_RATECTRL);
+	SVCSTR(WMI_SERVICE_WOW);
+	SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
+	SVCSTR(WMI_SERVICE_IRAM_TIDS);
+	SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
+	SVCSTR(WMI_SERVICE_NLO);
+	SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
+	SVCSTR(WMI_SERVICE_SCAN_SCH);
+	SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
+	SVCSTR(WMI_SERVICE_CHATTER);
+	SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
+	SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
+	SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
+	SVCSTR(WMI_SERVICE_GPIO);
+	SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
+	SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
+	SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
+	SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
+	SVCSTR(WMI_SERVICE_TX_ENCAP);
+	SVCSTR(WMI_SERVICE_BURST);
+	SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
+	SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+	SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
+	SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
+	SVCSTR(WMI_SERVICE_EARLY_RX);
+	SVCSTR(WMI_SERVICE_STA_SMPS);
+	SVCSTR(WMI_SERVICE_FWTEST);
+	SVCSTR(WMI_SERVICE_STA_WMMAC);
+	SVCSTR(WMI_SERVICE_TDLS);
+	SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
+	SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
+	SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
+	SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
+	SVCSTR(WMI_SERVICE_WLAN_HB);
+	SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
+	SVCSTR(WMI_SERVICE_BATCH_SCAN);
+	SVCSTR(WMI_SERVICE_QPOWER);
+	SVCSTR(WMI_SERVICE_PLMREQ);
+	SVCSTR(WMI_SERVICE_THERMAL_MGMT);
+	SVCSTR(WMI_SERVICE_RMC);
+	SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
+	SVCSTR(WMI_SERVICE_COEX_SAR);
+	SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
+	SVCSTR(WMI_SERVICE_NAN);
+	SVCSTR(WMI_SERVICE_L1SS_STAT);
+	SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
+	SVCSTR(WMI_SERVICE_OBSS_SCAN);
+	SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
+	SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
+	SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
+	SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
+	SVCSTR(WMI_SERVICE_LPASS);
+	SVCSTR(WMI_SERVICE_EXTSCAN);
+	SVCSTR(WMI_SERVICE_D0WOW);
+	SVCSTR(WMI_SERVICE_HSOFFLOAD);
+	SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
+	SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
+	SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
+	SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
+	SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
+	SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+	SVCSTR(WMI_SERVICE_ATF);
+	SVCSTR(WMI_SERVICE_COEX_GPIO);
+	SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+	SVCSTR(WMI_SERVICE_TT);
+	SVCSTR(WMI_SERVICE_PEER_CACHING);
+	SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+	SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+	SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
+	default:
+		return NULL;
+	}
+
+#undef SVCSTR
+}
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+	((svc_id) < (len) && \
+	 __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+	 BIT((svc_id)%(sizeof(u32))))
+
+#define SVCMAP(x, y, len) \
+	do { \
+		if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+			__set_bit(y, out); \
+	} while (0)
+
+static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
+				   size_t len)
+{
+	SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
+	       WMI_SERVICE_BEACON_OFFLOAD, len);
+	SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
+	       WMI_SERVICE_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
+	       WMI_SERVICE_ROAM_OFFLOAD, len);
+	SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+	       WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+	SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
+	       WMI_SERVICE_STA_PWRSAVE, len);
+	SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+	       WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+	SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
+	       WMI_SERVICE_AP_UAPSD, len);
+	SVCMAP(WMI_10X_SERVICE_AP_DFS,
+	       WMI_SERVICE_AP_DFS, len);
+	SVCMAP(WMI_10X_SERVICE_11AC,
+	       WMI_SERVICE_11AC, len);
+	SVCMAP(WMI_10X_SERVICE_BLOCKACK,
+	       WMI_SERVICE_BLOCKACK, len);
+	SVCMAP(WMI_10X_SERVICE_PHYERR,
+	       WMI_SERVICE_PHYERR, len);
+	SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
+	       WMI_SERVICE_BCN_FILTER, len);
+	SVCMAP(WMI_10X_SERVICE_RTT,
+	       WMI_SERVICE_RTT, len);
+	SVCMAP(WMI_10X_SERVICE_RATECTRL,
+	       WMI_SERVICE_RATECTRL, len);
+	SVCMAP(WMI_10X_SERVICE_WOW,
+	       WMI_SERVICE_WOW, len);
+	SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
+	       WMI_SERVICE_RATECTRL_CACHE, len);
+	SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
+	       WMI_SERVICE_IRAM_TIDS, len);
+	SVCMAP(WMI_10X_SERVICE_BURST,
+	       WMI_SERVICE_BURST, len);
+	SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+	SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
+	       WMI_SERVICE_FORCE_FW_HANG, len);
+	SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+	SVCMAP(WMI_10X_SERVICE_ATF,
+	       WMI_SERVICE_ATF, len);
+	SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+	       WMI_SERVICE_COEX_GPIO, len);
+}
+
+static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
+				    size_t len)
+{
+	SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
+	       WMI_SERVICE_BEACON_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+	       WMI_SERVICE_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+	       WMI_SERVICE_ROAM_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+	       WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
+	       WMI_SERVICE_STA_PWRSAVE, len);
+	SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+	       WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+	SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
+	       WMI_SERVICE_AP_UAPSD, len);
+	SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
+	       WMI_SERVICE_AP_DFS, len);
+	SVCMAP(WMI_MAIN_SERVICE_11AC,
+	       WMI_SERVICE_11AC, len);
+	SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
+	       WMI_SERVICE_BLOCKACK, len);
+	SVCMAP(WMI_MAIN_SERVICE_PHYERR,
+	       WMI_SERVICE_PHYERR, len);
+	SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
+	       WMI_SERVICE_BCN_FILTER, len);
+	SVCMAP(WMI_MAIN_SERVICE_RTT,
+	       WMI_SERVICE_RTT, len);
+	SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
+	       WMI_SERVICE_RATECTRL, len);
+	SVCMAP(WMI_MAIN_SERVICE_WOW,
+	       WMI_SERVICE_WOW, len);
+	SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
+	       WMI_SERVICE_RATECTRL_CACHE, len);
+	SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
+	       WMI_SERVICE_IRAM_TIDS, len);
+	SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+	       WMI_SERVICE_ARPNS_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_NLO,
+	       WMI_SERVICE_NLO, len);
+	SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
+	       WMI_SERVICE_GTK_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
+	       WMI_SERVICE_SCAN_SCH, len);
+	SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
+	       WMI_SERVICE_CSA_OFFLOAD, len);
+	SVCMAP(WMI_MAIN_SERVICE_CHATTER,
+	       WMI_SERVICE_CHATTER, len);
+	SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
+	       WMI_SERVICE_COEX_FREQAVOID, len);
+	SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+	       WMI_SERVICE_PACKET_POWER_SAVE, len);
+	SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
+	       WMI_SERVICE_FORCE_FW_HANG, len);
+	SVCMAP(WMI_MAIN_SERVICE_GPIO,
+	       WMI_SERVICE_GPIO, len);
+	SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+	       WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+	SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+	SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+	SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+	       WMI_SERVICE_STA_KEEP_ALIVE, len);
+	SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
+	       WMI_SERVICE_TX_ENCAP, len);
+}
+
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+				    size_t len)
+{
+	SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+	       WMI_SERVICE_BEACON_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+	       WMI_SERVICE_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+	       WMI_SERVICE_ROAM_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+	       WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+	       WMI_SERVICE_STA_PWRSAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+	       WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+	       WMI_SERVICE_AP_UAPSD, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+	       WMI_SERVICE_AP_DFS, len);
+	SVCMAP(WMI_10_4_SERVICE_11AC,
+	       WMI_SERVICE_11AC, len);
+	SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+	       WMI_SERVICE_BLOCKACK, len);
+	SVCMAP(WMI_10_4_SERVICE_PHYERR,
+	       WMI_SERVICE_PHYERR, len);
+	SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+	       WMI_SERVICE_BCN_FILTER, len);
+	SVCMAP(WMI_10_4_SERVICE_RTT,
+	       WMI_SERVICE_RTT, len);
+	SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+	       WMI_SERVICE_RATECTRL, len);
+	SVCMAP(WMI_10_4_SERVICE_WOW,
+	       WMI_SERVICE_WOW, len);
+	SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+	       WMI_SERVICE_RATECTRL_CACHE, len);
+	SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+	       WMI_SERVICE_IRAM_TIDS, len);
+	SVCMAP(WMI_10_4_SERVICE_BURST,
+	       WMI_SERVICE_BURST, len);
+	SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+	       WMI_SERVICE_GTK_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+	       WMI_SERVICE_SCAN_SCH, len);
+	SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+	       WMI_SERVICE_CSA_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_CHATTER,
+	       WMI_SERVICE_CHATTER, len);
+	SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+	       WMI_SERVICE_COEX_FREQAVOID, len);
+	SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+	       WMI_SERVICE_PACKET_POWER_SAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+	       WMI_SERVICE_FORCE_FW_HANG, len);
+	SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_GPIO,
+	       WMI_SERVICE_GPIO, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+	       WMI_SERVICE_STA_KEEP_ALIVE, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+	       WMI_SERVICE_TX_ENCAP, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	       WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+	SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+	       WMI_SERVICE_EARLY_RX, len);
+	SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+	       WMI_SERVICE_ENHANCED_PROXY_STA, len);
+	SVCMAP(WMI_10_4_SERVICE_TT,
+	       WMI_SERVICE_TT, len);
+	SVCMAP(WMI_10_4_SERVICE_ATF,
+	       WMI_SERVICE_ATF, len);
+	SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+	       WMI_SERVICE_PEER_CACHING, len);
+	SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+	       WMI_SERVICE_COEX_GPIO, len);
+	SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+	       WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+	SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+	       WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+	SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+	       WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
+#undef SVCMAP
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+	union {
+		u8 addr[6];
+		struct {
+			u32 word0;
+			u32 word1;
+		} __packed;
+	} __packed;
+} __packed;
+
+struct wmi_cmd_map {
+	u32 init_cmdid;
+	u32 start_scan_cmdid;
+	u32 stop_scan_cmdid;
+	u32 scan_chan_list_cmdid;
+	u32 scan_sch_prio_tbl_cmdid;
+	u32 pdev_set_regdomain_cmdid;
+	u32 pdev_set_channel_cmdid;
+	u32 pdev_set_param_cmdid;
+	u32 pdev_pktlog_enable_cmdid;
+	u32 pdev_pktlog_disable_cmdid;
+	u32 pdev_set_wmm_params_cmdid;
+	u32 pdev_set_ht_cap_ie_cmdid;
+	u32 pdev_set_vht_cap_ie_cmdid;
+	u32 pdev_set_dscp_tid_map_cmdid;
+	u32 pdev_set_quiet_mode_cmdid;
+	u32 pdev_green_ap_ps_enable_cmdid;
+	u32 pdev_get_tpc_config_cmdid;
+	u32 pdev_set_base_macaddr_cmdid;
+	u32 vdev_create_cmdid;
+	u32 vdev_delete_cmdid;
+	u32 vdev_start_request_cmdid;
+	u32 vdev_restart_request_cmdid;
+	u32 vdev_up_cmdid;
+	u32 vdev_stop_cmdid;
+	u32 vdev_down_cmdid;
+	u32 vdev_set_param_cmdid;
+	u32 vdev_install_key_cmdid;
+	u32 peer_create_cmdid;
+	u32 peer_delete_cmdid;
+	u32 peer_flush_tids_cmdid;
+	u32 peer_set_param_cmdid;
+	u32 peer_assoc_cmdid;
+	u32 peer_add_wds_entry_cmdid;
+	u32 peer_remove_wds_entry_cmdid;
+	u32 peer_mcast_group_cmdid;
+	u32 bcn_tx_cmdid;
+	u32 pdev_send_bcn_cmdid;
+	u32 bcn_tmpl_cmdid;
+	u32 bcn_filter_rx_cmdid;
+	u32 prb_req_filter_rx_cmdid;
+	u32 mgmt_tx_cmdid;
+	u32 prb_tmpl_cmdid;
+	u32 addba_clear_resp_cmdid;
+	u32 addba_send_cmdid;
+	u32 addba_status_cmdid;
+	u32 delba_send_cmdid;
+	u32 addba_set_resp_cmdid;
+	u32 send_singleamsdu_cmdid;
+	u32 sta_powersave_mode_cmdid;
+	u32 sta_powersave_param_cmdid;
+	u32 sta_mimo_ps_mode_cmdid;
+	u32 pdev_dfs_enable_cmdid;
+	u32 pdev_dfs_disable_cmdid;
+	u32 roam_scan_mode;
+	u32 roam_scan_rssi_threshold;
+	u32 roam_scan_period;
+	u32 roam_scan_rssi_change_threshold;
+	u32 roam_ap_profile;
+	u32 ofl_scan_add_ap_profile;
+	u32 ofl_scan_remove_ap_profile;
+	u32 ofl_scan_period;
+	u32 p2p_dev_set_device_info;
+	u32 p2p_dev_set_discoverability;
+	u32 p2p_go_set_beacon_ie;
+	u32 p2p_go_set_probe_resp_ie;
+	u32 p2p_set_vendor_ie_data_cmdid;
+	u32 ap_ps_peer_param_cmdid;
+	u32 ap_ps_peer_uapsd_coex_cmdid;
+	u32 peer_rate_retry_sched_cmdid;
+	u32 wlan_profile_trigger_cmdid;
+	u32 wlan_profile_set_hist_intvl_cmdid;
+	u32 wlan_profile_get_profile_data_cmdid;
+	u32 wlan_profile_enable_profile_id_cmdid;
+	u32 wlan_profile_list_profile_id_cmdid;
+	u32 pdev_suspend_cmdid;
+	u32 pdev_resume_cmdid;
+	u32 add_bcn_filter_cmdid;
+	u32 rmv_bcn_filter_cmdid;
+	u32 wow_add_wake_pattern_cmdid;
+	u32 wow_del_wake_pattern_cmdid;
+	u32 wow_enable_disable_wake_event_cmdid;
+	u32 wow_enable_cmdid;
+	u32 wow_hostwakeup_from_sleep_cmdid;
+	u32 rtt_measreq_cmdid;
+	u32 rtt_tsf_cmdid;
+	u32 vdev_spectral_scan_configure_cmdid;
+	u32 vdev_spectral_scan_enable_cmdid;
+	u32 request_stats_cmdid;
+	u32 set_arp_ns_offload_cmdid;
+	u32 network_list_offload_config_cmdid;
+	u32 gtk_offload_cmdid;
+	u32 csa_offload_enable_cmdid;
+	u32 csa_offload_chanswitch_cmdid;
+	u32 chatter_set_mode_cmdid;
+	u32 peer_tid_addba_cmdid;
+	u32 peer_tid_delba_cmdid;
+	u32 sta_dtim_ps_method_cmdid;
+	u32 sta_uapsd_auto_trig_cmdid;
+	u32 sta_keepalive_cmd;
+	u32 echo_cmdid;
+	u32 pdev_utf_cmdid;
+	u32 dbglog_cfg_cmdid;
+	u32 pdev_qvit_cmdid;
+	u32 pdev_ftm_intg_cmdid;
+	u32 vdev_set_keepalive_cmdid;
+	u32 vdev_get_keepalive_cmdid;
+	u32 force_fw_hang_cmdid;
+	u32 gpio_config_cmdid;
+	u32 gpio_output_cmdid;
+	u32 pdev_get_temperature_cmdid;
+	u32 vdev_set_wmm_params_cmdid;
+	u32 tdls_set_state_cmdid;
+	u32 tdls_peer_update_cmdid;
+	u32 adaptive_qcs_cmdid;
+	u32 scan_update_request_cmdid;
+	u32 vdev_standby_response_cmdid;
+	u32 vdev_resume_response_cmdid;
+	u32 wlan_peer_caching_add_peer_cmdid;
+	u32 wlan_peer_caching_evict_peer_cmdid;
+	u32 wlan_peer_caching_restore_peer_cmdid;
+	u32 wlan_peer_caching_print_all_peers_info_cmdid;
+	u32 peer_update_wds_entry_cmdid;
+	u32 peer_add_proxy_sta_entry_cmdid;
+	u32 rtt_keepalive_cmdid;
+	u32 oem_req_cmdid;
+	u32 nan_cmdid;
+	u32 vdev_ratemask_cmdid;
+	u32 qboost_cfg_cmdid;
+	u32 pdev_smart_ant_enable_cmdid;
+	u32 pdev_smart_ant_set_rx_antenna_cmdid;
+	u32 peer_smart_ant_set_tx_antenna_cmdid;
+	u32 peer_smart_ant_set_train_info_cmdid;
+	u32 peer_smart_ant_set_node_config_ops_cmdid;
+	u32 pdev_set_antenna_switch_table_cmdid;
+	u32 pdev_set_ctl_table_cmdid;
+	u32 pdev_set_mimogain_table_cmdid;
+	u32 pdev_ratepwr_table_cmdid;
+	u32 pdev_ratepwr_chainmsk_table_cmdid;
+	u32 pdev_fips_cmdid;
+	u32 tt_set_conf_cmdid;
+	u32 fwtest_cmdid;
+	u32 vdev_atf_request_cmdid;
+	u32 peer_atf_request_cmdid;
+	u32 pdev_get_ani_cck_config_cmdid;
+	u32 pdev_get_ani_ofdm_config_cmdid;
+	u32 pdev_reserve_ast_entry_cmdid;
+	u32 pdev_get_nfcal_power_cmdid;
+	u32 pdev_get_tpc_cmdid;
+	u32 pdev_get_ast_info_cmdid;
+	u32 vdev_set_dscp_tid_map_cmdid;
+	u32 pdev_get_info_cmdid;
+	u32 vdev_get_info_cmdid;
+	u32 vdev_filter_neighbor_rx_packets_cmdid;
+	u32 mu_cal_start_cmdid;
+	u32 set_cca_params_cmdid;
+	u32 pdev_bss_chan_info_request_cmdid;
+	u32 pdev_enable_adaptive_cca_cmdid;
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+	/* 0 to 2 are reserved */
+	WMI_GRP_START = 0x3,
+	WMI_GRP_SCAN = WMI_GRP_START,
+	WMI_GRP_PDEV,
+	WMI_GRP_VDEV,
+	WMI_GRP_PEER,
+	WMI_GRP_MGMT,
+	WMI_GRP_BA_NEG,
+	WMI_GRP_STA_PS,
+	WMI_GRP_DFS,
+	WMI_GRP_ROAM,
+	WMI_GRP_OFL_SCAN,
+	WMI_GRP_P2P,
+	WMI_GRP_AP_PS,
+	WMI_GRP_RATE_CTRL,
+	WMI_GRP_PROFILE,
+	WMI_GRP_SUSPEND,
+	WMI_GRP_BCN_FILTER,
+	WMI_GRP_WOW,
+	WMI_GRP_RTT,
+	WMI_GRP_SPECTRAL,
+	WMI_GRP_STATS,
+	WMI_GRP_ARP_NS_OFL,
+	WMI_GRP_NLO_OFL,
+	WMI_GRP_GTK_OFL,
+	WMI_GRP_CSA_OFL,
+	WMI_GRP_CHATTER,
+	WMI_GRP_TID_ADDBA,
+	WMI_GRP_MISC,
+	WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
+enum wmi_cmd_id {
+	WMI_INIT_CMDID = 0x1,
+
+	/* Scan specific commands */
+	WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+	WMI_STOP_SCAN_CMDID,
+	WMI_SCAN_CHAN_LIST_CMDID,
+	WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+	/* PDEV (physical device) specific commands */
+	WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+	WMI_PDEV_SET_CHANNEL_CMDID,
+	WMI_PDEV_SET_PARAM_CMDID,
+	WMI_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_PDEV_GET_TPC_CONFIG_CMDID,
+	WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+	/* VDEV (virtual device) specific commands */
+	WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+	WMI_VDEV_DELETE_CMDID,
+	WMI_VDEV_START_REQUEST_CMDID,
+	WMI_VDEV_RESTART_REQUEST_CMDID,
+	WMI_VDEV_UP_CMDID,
+	WMI_VDEV_STOP_CMDID,
+	WMI_VDEV_DOWN_CMDID,
+	WMI_VDEV_SET_PARAM_CMDID,
+	WMI_VDEV_INSTALL_KEY_CMDID,
+
+	/* peer specific commands */
+	WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+	WMI_PEER_DELETE_CMDID,
+	WMI_PEER_FLUSH_TIDS_CMDID,
+	WMI_PEER_SET_PARAM_CMDID,
+	WMI_PEER_ASSOC_CMDID,
+	WMI_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_PEER_MCAST_GROUP_CMDID,
+
+	/* beacon/management specific commands */
+	WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+	WMI_PDEV_SEND_BCN_CMDID,
+	WMI_BCN_TMPL_CMDID,
+	WMI_BCN_FILTER_RX_CMDID,
+	WMI_PRB_REQ_FILTER_RX_CMDID,
+	WMI_MGMT_TX_CMDID,
+	WMI_PRB_TMPL_CMDID,
+
+	/* commands to directly control BA negotiation directly from host. */
+	WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+	WMI_ADDBA_SEND_CMDID,
+	WMI_ADDBA_STATUS_CMDID,
+	WMI_DELBA_SEND_CMDID,
+	WMI_ADDBA_SET_RESP_CMDID,
+	WMI_SEND_SINGLEAMSDU_CMDID,
+
+	/* Station power save specific config */
+	WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+	WMI_STA_POWERSAVE_PARAM_CMDID,
+	WMI_STA_MIMO_PS_MODE_CMDID,
+
+	/** DFS-specific commands */
+	WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+	WMI_PDEV_DFS_DISABLE_CMDID,
+
+	/* Roaming specific  commands */
+	WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+	WMI_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_ROAM_SCAN_PERIOD,
+	WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_ROAM_AP_PROFILE,
+
+	/* offload scan specific commands */
+	WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+	WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_OFL_SCAN_PERIOD,
+
+	/* P2P specific commands */
+	WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+	WMI_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_P2P_GO_SET_BEACON_IE,
+	WMI_P2P_GO_SET_PROBE_RESP_IE,
+	WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+	/* AP power save specific config */
+	WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+	WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+	/* Rate-control specific commands */
+	WMI_PEER_RATE_RETRY_SCHED_CMDID =
+	WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+	/* WLAN Profiling commands. */
+	WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+	WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+	/* Suspend resume command Ids */
+	WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+	WMI_PDEV_RESUME_CMDID,
+
+	/* Beacon filter commands */
+	WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+	WMI_RMV_BCN_FILTER_CMDID,
+
+	/* WOW Specific WMI commands*/
+	WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+	WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_WOW_ENABLE_CMDID,
+	WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+	/* RTT measurement related cmd */
+	WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+	WMI_RTT_TSF_CMDID,
+
+	/* spectral scan commands */
+	WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+	WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+	/* F/W stats */
+	WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+	/* ARP OFFLOAD REQUEST*/
+	WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+	/* NS offload confid*/
+	WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+	/* GTK offload Specific WMI commands*/
+	WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+	/* CSA offload Specific WMI commands*/
+	WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+	WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+	/* Chatter commands*/
+	WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+	/* addba specific commands */
+	WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+	WMI_PEER_TID_DELBA_CMDID,
+
+	/* set station mimo powersave method */
+	WMI_STA_DTIM_PS_METHOD_CMDID,
+	/* Configure the Station UAPSD AC Auto Trigger Parameters */
+	WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+	/* STA Keep alive parameter configuration,
+	   Requires WMI_SERVICE_STA_KEEP_ALIVE */
+	WMI_STA_KEEPALIVE_CMD,
+
+	/* misc command group */
+	WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+	WMI_PDEV_UTF_CMDID,
+	WMI_DBGLOG_CFG_CMDID,
+	WMI_PDEV_QVIT_CMDID,
+	WMI_PDEV_FTM_INTG_CMDID,
+	WMI_VDEV_SET_KEEPALIVE_CMDID,
+	WMI_VDEV_GET_KEEPALIVE_CMDID,
+	WMI_FORCE_FW_HANG_CMDID,
+
+	/* GPIO Configuration */
+	WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+	WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+	WMI_SERVICE_READY_EVENTID = 0x1,
+	WMI_READY_EVENTID,
+
+	/* Scan specific events */
+	WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+	/* PDEV specific events */
+	WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+	WMI_CHAN_INFO_EVENTID,
+	WMI_PHYERR_EVENTID,
+
+	/* VDEV specific events */
+	WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+	WMI_VDEV_STOPPED_EVENTID,
+	WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+	/* peer specific events */
+	WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+	/* beacon/mgmt specific events */
+	WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+	WMI_HOST_SWBA_EVENTID,
+	WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+	/* ADDBA Related WMI Events*/
+	WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+	WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+	/* Roam event to trigger roaming on host */
+	WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+	WMI_PROFILE_MATCH,
+
+	/* WoW */
+	WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+	/* RTT */
+	WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+	WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_RTT_ERROR_REPORT_EVENTID,
+
+	/* GTK offload */
+	WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+	WMI_GTK_REKEY_FAIL_EVENTID,
+
+	/* CSA IE received event */
+	WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+	/* Misc events */
+	WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+	WMI_PDEV_UTF_EVENTID,
+	WMI_DEBUG_MESG_EVENTID,
+	WMI_UPDATE_STATS_EVENTID,
+	WMI_DEBUG_PRINT_EVENTID,
+	WMI_DCS_INTERFERENCE_EVENTID,
+	WMI_PDEV_QVIT_EVENTID,
+	WMI_WLAN_PROFILE_DATA_EVENTID,
+	WMI_PDEV_FTM_INTG_EVENTID,
+	WMI_WLAN_FREQ_AVOID_EVENTID,
+	WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+	/* GPIO Event */
+	WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+	WMI_10X_START_CMDID = 0x9000,
+	WMI_10X_END_CMDID = 0x9FFF,
+
+	/* initialize the wlan sub system */
+	WMI_10X_INIT_CMDID,
+
+	/* Scan specific commands */
+
+	WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+	WMI_10X_STOP_SCAN_CMDID,
+	WMI_10X_SCAN_CHAN_LIST_CMDID,
+	WMI_10X_ECHO_CMDID,
+
+	/* PDEV(physical device) specific commands */
+	WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+	WMI_10X_PDEV_SET_CHANNEL_CMDID,
+	WMI_10X_PDEV_SET_PARAM_CMDID,
+	WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+	/* VDEV(virtual device) specific commands */
+	WMI_10X_VDEV_CREATE_CMDID,
+	WMI_10X_VDEV_DELETE_CMDID,
+	WMI_10X_VDEV_START_REQUEST_CMDID,
+	WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+	WMI_10X_VDEV_UP_CMDID,
+	WMI_10X_VDEV_STOP_CMDID,
+	WMI_10X_VDEV_DOWN_CMDID,
+	WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+	WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+	WMI_10X_VDEV_SET_PARAM_CMDID,
+	WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+	/* peer specific commands */
+	WMI_10X_PEER_CREATE_CMDID,
+	WMI_10X_PEER_DELETE_CMDID,
+	WMI_10X_PEER_FLUSH_TIDS_CMDID,
+	WMI_10X_PEER_SET_PARAM_CMDID,
+	WMI_10X_PEER_ASSOC_CMDID,
+	WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+	/* beacon/management specific commands */
+
+	WMI_10X_BCN_TX_CMDID,
+	WMI_10X_BCN_PRB_TMPL_CMDID,
+	WMI_10X_BCN_FILTER_RX_CMDID,
+	WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+	WMI_10X_MGMT_TX_CMDID,
+
+	/* commands to directly control ba negotiation directly from host. */
+	WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+	WMI_10X_ADDBA_SEND_CMDID,
+	WMI_10X_ADDBA_STATUS_CMDID,
+	WMI_10X_DELBA_SEND_CMDID,
+	WMI_10X_ADDBA_SET_RESP_CMDID,
+	WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+	/* Station power save specific config */
+	WMI_10X_STA_POWERSAVE_MODE_CMDID,
+	WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+	WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+	/* set debug log config */
+	WMI_10X_DBGLOG_CFG_CMDID,
+
+	/* DFS-specific commands */
+	WMI_10X_PDEV_DFS_ENABLE_CMDID,
+	WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+	/* QVIT specific command id */
+	WMI_10X_PDEV_QVIT_CMDID,
+
+	/* Offload Scan and Roaming related  commands */
+	WMI_10X_ROAM_SCAN_MODE,
+	WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_10X_ROAM_SCAN_PERIOD,
+	WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_10X_ROAM_AP_PROFILE,
+	WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+	WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_10X_OFL_SCAN_PERIOD,
+
+	/* P2P specific commands */
+	WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+	WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_10X_P2P_GO_SET_BEACON_IE,
+	WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+	/* AP power save specific config */
+	WMI_10X_AP_PS_PEER_PARAM_CMDID,
+	WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+	/* Rate-control specific commands */
+	WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+	/* WLAN Profiling commands. */
+	WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+	WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+	/* Suspend resume command Ids */
+	WMI_10X_PDEV_SUSPEND_CMDID,
+	WMI_10X_PDEV_RESUME_CMDID,
+
+	/* Beacon filter commands */
+	WMI_10X_ADD_BCN_FILTER_CMDID,
+	WMI_10X_RMV_BCN_FILTER_CMDID,
+
+	/* WOW Specific WMI commands*/
+	WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+	WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_10X_WOW_ENABLE_CMDID,
+	WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+	/* RTT measurement related cmd */
+	WMI_10X_RTT_MEASREQ_CMDID,
+	WMI_10X_RTT_TSF_CMDID,
+
+	/* transmit beacon by value */
+	WMI_10X_PDEV_SEND_BCN_CMDID,
+
+	/* F/W stats */
+	WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_10X_REQUEST_STATS_CMDID,
+
+	/* GPIO Configuration */
+	WMI_10X_GPIO_CONFIG_CMDID,
+	WMI_10X_GPIO_OUTPUT_CMDID,
+
+	WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+	WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+	WMI_10X_READY_EVENTID,
+	WMI_10X_START_EVENTID = 0x9000,
+	WMI_10X_END_EVENTID = 0x9FFF,
+
+	/* Scan specific events */
+	WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+	WMI_10X_ECHO_EVENTID,
+	WMI_10X_DEBUG_MESG_EVENTID,
+	WMI_10X_UPDATE_STATS_EVENTID,
+
+	/* Instantaneous RSSI event */
+	WMI_10X_INST_RSSI_STATS_EVENTID,
+
+	/* VDEV specific events */
+	WMI_10X_VDEV_START_RESP_EVENTID,
+	WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+	WMI_10X_VDEV_RESUME_REQ_EVENTID,
+	WMI_10X_VDEV_STOPPED_EVENTID,
+
+	/* peer  specific events */
+	WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+	/* beacon/mgmt specific events */
+	WMI_10X_HOST_SWBA_EVENTID,
+	WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_10X_MGMT_RX_EVENTID,
+
+	/* Channel stats event */
+	WMI_10X_CHAN_INFO_EVENTID,
+
+	/* PHY Error specific WMI event */
+	WMI_10X_PHYERR_EVENTID,
+
+	/* Roam event to trigger roaming on host */
+	WMI_10X_ROAM_EVENTID,
+
+	/* matching AP found from list of profiles */
+	WMI_10X_PROFILE_MATCH,
+
+	/* debug print message used for tracing FW code while debugging */
+	WMI_10X_DEBUG_PRINT_EVENTID,
+	/* VI spoecific event */
+	WMI_10X_PDEV_QVIT_EVENTID,
+	/* FW code profile data in response to profile request */
+	WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+	/*RTT related event ID*/
+	WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+	WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+	WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+	WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+	/* TPC config for the current operating channel */
+	WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+	WMI_10X_GPIO_INPUT_EVENTID,
+	WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
+enum wmi_10_2_cmd_id {
+	WMI_10_2_START_CMDID = 0x9000,
+	WMI_10_2_END_CMDID = 0x9FFF,
+	WMI_10_2_INIT_CMDID,
+	WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
+	WMI_10_2_STOP_SCAN_CMDID,
+	WMI_10_2_SCAN_CHAN_LIST_CMDID,
+	WMI_10_2_ECHO_CMDID,
+	WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+	WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+	WMI_10_2_PDEV_SET_PARAM_CMDID,
+	WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+	WMI_10_2_VDEV_CREATE_CMDID,
+	WMI_10_2_VDEV_DELETE_CMDID,
+	WMI_10_2_VDEV_START_REQUEST_CMDID,
+	WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+	WMI_10_2_VDEV_UP_CMDID,
+	WMI_10_2_VDEV_STOP_CMDID,
+	WMI_10_2_VDEV_DOWN_CMDID,
+	WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
+	WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
+	WMI_10_2_VDEV_SET_PARAM_CMDID,
+	WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+	WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_2_PEER_CREATE_CMDID,
+	WMI_10_2_PEER_DELETE_CMDID,
+	WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+	WMI_10_2_PEER_SET_PARAM_CMDID,
+	WMI_10_2_PEER_ASSOC_CMDID,
+	WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
+	WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_10_2_PEER_MCAST_GROUP_CMDID,
+	WMI_10_2_BCN_TX_CMDID,
+	WMI_10_2_BCN_PRB_TMPL_CMDID,
+	WMI_10_2_BCN_FILTER_RX_CMDID,
+	WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+	WMI_10_2_MGMT_TX_CMDID,
+	WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+	WMI_10_2_ADDBA_SEND_CMDID,
+	WMI_10_2_ADDBA_STATUS_CMDID,
+	WMI_10_2_DELBA_SEND_CMDID,
+	WMI_10_2_ADDBA_SET_RESP_CMDID,
+	WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+	WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+	WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+	WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+	WMI_10_2_DBGLOG_CFG_CMDID,
+	WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+	WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+	WMI_10_2_PDEV_QVIT_CMDID,
+	WMI_10_2_ROAM_SCAN_MODE,
+	WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_10_2_ROAM_SCAN_PERIOD,
+	WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_10_2_ROAM_AP_PROFILE,
+	WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+	WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_10_2_OFL_SCAN_PERIOD,
+	WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+	WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_10_2_P2P_GO_SET_BEACON_IE,
+	WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+	WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+	WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
+	WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+	WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+	WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	WMI_10_2_PDEV_SUSPEND_CMDID,
+	WMI_10_2_PDEV_RESUME_CMDID,
+	WMI_10_2_ADD_BCN_FILTER_CMDID,
+	WMI_10_2_RMV_BCN_FILTER_CMDID,
+	WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+	WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_10_2_WOW_ENABLE_CMDID,
+	WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	WMI_10_2_RTT_MEASREQ_CMDID,
+	WMI_10_2_RTT_TSF_CMDID,
+	WMI_10_2_RTT_KEEPALIVE_CMDID,
+	WMI_10_2_PDEV_SEND_BCN_CMDID,
+	WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_10_2_REQUEST_STATS_CMDID,
+	WMI_10_2_GPIO_CONFIG_CMDID,
+	WMI_10_2_GPIO_OUTPUT_CMDID,
+	WMI_10_2_VDEV_RATEMASK_CMDID,
+	WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
+	WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	WMI_10_2_FORCE_FW_HANG_CMDID,
+	WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
+	WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
+	WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	WMI_10_2_PDEV_GET_INFO,
+	WMI_10_2_VDEV_GET_INFO,
+	WMI_10_2_VDEV_ATF_REQUEST_CMDID,
+	WMI_10_2_PEER_ATF_REQUEST_CMDID,
+	WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+	WMI_10_2_MU_CAL_START_CMDID,
+	WMI_10_2_SET_LTEU_CONFIG_CMDID,
+	WMI_10_2_SET_CCA_PARAMS,
+	WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
+};
+
+enum wmi_10_2_event_id {
+	WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
+	WMI_10_2_READY_EVENTID,
+	WMI_10_2_DEBUG_MESG_EVENTID,
+	WMI_10_2_START_EVENTID = 0x9000,
+	WMI_10_2_END_EVENTID = 0x9FFF,
+	WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
+	WMI_10_2_ECHO_EVENTID,
+	WMI_10_2_UPDATE_STATS_EVENTID,
+	WMI_10_2_INST_RSSI_STATS_EVENTID,
+	WMI_10_2_VDEV_START_RESP_EVENTID,
+	WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
+	WMI_10_2_VDEV_RESUME_REQ_EVENTID,
+	WMI_10_2_VDEV_STOPPED_EVENTID,
+	WMI_10_2_PEER_STA_KICKOUT_EVENTID,
+	WMI_10_2_HOST_SWBA_EVENTID,
+	WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_10_2_MGMT_RX_EVENTID,
+	WMI_10_2_CHAN_INFO_EVENTID,
+	WMI_10_2_PHYERR_EVENTID,
+	WMI_10_2_ROAM_EVENTID,
+	WMI_10_2_PROFILE_MATCH,
+	WMI_10_2_DEBUG_PRINT_EVENTID,
+	WMI_10_2_PDEV_QVIT_EVENTID,
+	WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
+	WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_2_RTT_ERROR_REPORT_EVENTID,
+	WMI_10_2_RTT_KEEPALIVE_EVENTID,
+	WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
+	WMI_10_2_DCS_INTERFERENCE_EVENTID,
+	WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
+	WMI_10_2_GPIO_INPUT_EVENTID,
+	WMI_10_2_PEER_RATECODE_LIST_EVENTID,
+	WMI_10_2_GENERIC_BUFFER_EVENTID,
+	WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
+	WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
+	WMI_10_2_WDS_PEER_EVENTID,
+	WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
+	WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+	WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
+};
+
+enum wmi_10_4_cmd_id {
+	WMI_10_4_START_CMDID = 0x9000,
+	WMI_10_4_END_CMDID = 0x9FFF,
+	WMI_10_4_INIT_CMDID,
+	WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+	WMI_10_4_STOP_SCAN_CMDID,
+	WMI_10_4_SCAN_CHAN_LIST_CMDID,
+	WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+	WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+	WMI_10_4_ECHO_CMDID,
+	WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+	WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+	WMI_10_4_PDEV_SET_PARAM_CMDID,
+	WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+	WMI_10_4_VDEV_CREATE_CMDID,
+	WMI_10_4_VDEV_DELETE_CMDID,
+	WMI_10_4_VDEV_START_REQUEST_CMDID,
+	WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+	WMI_10_4_VDEV_UP_CMDID,
+	WMI_10_4_VDEV_STOP_CMDID,
+	WMI_10_4_VDEV_DOWN_CMDID,
+	WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+	WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+	WMI_10_4_VDEV_SET_PARAM_CMDID,
+	WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+	WMI_10_4_PEER_CREATE_CMDID,
+	WMI_10_4_PEER_DELETE_CMDID,
+	WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+	WMI_10_4_PEER_SET_PARAM_CMDID,
+	WMI_10_4_PEER_ASSOC_CMDID,
+	WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+	WMI_10_4_PEER_MCAST_GROUP_CMDID,
+	WMI_10_4_BCN_TX_CMDID,
+	WMI_10_4_PDEV_SEND_BCN_CMDID,
+	WMI_10_4_BCN_PRB_TMPL_CMDID,
+	WMI_10_4_BCN_FILTER_RX_CMDID,
+	WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+	WMI_10_4_MGMT_TX_CMDID,
+	WMI_10_4_PRB_TMPL_CMDID,
+	WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+	WMI_10_4_ADDBA_SEND_CMDID,
+	WMI_10_4_ADDBA_STATUS_CMDID,
+	WMI_10_4_DELBA_SEND_CMDID,
+	WMI_10_4_ADDBA_SET_RESP_CMDID,
+	WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+	WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+	WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+	WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+	WMI_10_4_DBGLOG_CFG_CMDID,
+	WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+	WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+	WMI_10_4_PDEV_QVIT_CMDID,
+	WMI_10_4_ROAM_SCAN_MODE,
+	WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_10_4_ROAM_SCAN_PERIOD,
+	WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_10_4_ROAM_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_PERIOD,
+	WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+	WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_10_4_P2P_GO_SET_BEACON_IE,
+	WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+	WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+	WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+	WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+	WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+	WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+	WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	WMI_10_4_PDEV_SUSPEND_CMDID,
+	WMI_10_4_PDEV_RESUME_CMDID,
+	WMI_10_4_ADD_BCN_FILTER_CMDID,
+	WMI_10_4_RMV_BCN_FILTER_CMDID,
+	WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+	WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_10_4_WOW_ENABLE_CMDID,
+	WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	WMI_10_4_RTT_MEASREQ_CMDID,
+	WMI_10_4_RTT_TSF_CMDID,
+	WMI_10_4_RTT_KEEPALIVE_CMDID,
+	WMI_10_4_OEM_REQ_CMDID,
+	WMI_10_4_NAN_CMDID,
+	WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_10_4_REQUEST_STATS_CMDID,
+	WMI_10_4_GPIO_CONFIG_CMDID,
+	WMI_10_4_GPIO_OUTPUT_CMDID,
+	WMI_10_4_VDEV_RATEMASK_CMDID,
+	WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+	WMI_10_4_GTK_OFFLOAD_CMDID,
+	WMI_10_4_QBOOST_CFG_CMDID,
+	WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+	WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+	WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+	WMI_10_4_FORCE_FW_HANG_CMDID,
+	WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+	WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+	WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	WMI_10_4_PDEV_FIPS_CMDID,
+	WMI_10_4_TT_SET_CONF_CMDID,
+	WMI_10_4_FWTEST_CMDID,
+	WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+	WMI_10_4_PEER_ATF_REQUEST_CMDID,
+	WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+	WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+	WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+	WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+	WMI_10_4_PDEV_GET_TPC_CMDID,
+	WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+	WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+	WMI_10_4_PDEV_GET_INFO_CMDID,
+	WMI_10_4_VDEV_GET_INFO_CMDID,
+	WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+	WMI_10_4_MU_CAL_START_CMDID,
+	WMI_10_4_SET_CCA_PARAMS_CMDID,
+	WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+	WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+	WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+	WMI_10_4_READY_EVENTID,
+	WMI_10_4_DEBUG_MESG_EVENTID,
+	WMI_10_4_START_EVENTID = 0x9000,
+	WMI_10_4_END_EVENTID = 0x9FFF,
+	WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+	WMI_10_4_ECHO_EVENTID,
+	WMI_10_4_UPDATE_STATS_EVENTID,
+	WMI_10_4_INST_RSSI_STATS_EVENTID,
+	WMI_10_4_VDEV_START_RESP_EVENTID,
+	WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+	WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+	WMI_10_4_VDEV_STOPPED_EVENTID,
+	WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+	WMI_10_4_HOST_SWBA_EVENTID,
+	WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_10_4_MGMT_RX_EVENTID,
+	WMI_10_4_CHAN_INFO_EVENTID,
+	WMI_10_4_PHYERR_EVENTID,
+	WMI_10_4_ROAM_EVENTID,
+	WMI_10_4_PROFILE_MATCH,
+	WMI_10_4_DEBUG_PRINT_EVENTID,
+	WMI_10_4_PDEV_QVIT_EVENTID,
+	WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+	WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+	WMI_10_4_RTT_KEEPALIVE_EVENTID,
+	WMI_10_4_OEM_CAPABILITY_EVENTID,
+	WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+	WMI_10_4_NAN_EVENTID,
+	WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+	WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+	WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+	WMI_10_4_DCS_INTERFERENCE_EVENTID,
+	WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+	WMI_10_4_CSA_HANDLING_EVENTID,
+	WMI_10_4_GPIO_INPUT_EVENTID,
+	WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+	WMI_10_4_GENERIC_BUFFER_EVENTID,
+	WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+	WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+	WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+	WMI_10_4_WDS_PEER_EVENTID,
+	WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+	WMI_10_4_PDEV_FIPS_EVENTID,
+	WMI_10_4_TT_STATS_EVENTID,
+	WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+	WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+	WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+	WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+	WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+	WMI_10_4_PDEV_TPC_EVENTID,
+	WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+	WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+	WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+	WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+	WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
+enum wmi_phy_mode {
+	MODE_11A        = 0,   /* 11a Mode */
+	MODE_11G        = 1,   /* 11b/g Mode */
+	MODE_11B        = 2,   /* 11b Mode */
+	MODE_11GONLY    = 3,   /* 11g only Mode */
+	MODE_11NA_HT20   = 4,  /* 11a HT20 mode */
+	MODE_11NG_HT20   = 5,  /* 11g HT20 mode */
+	MODE_11NA_HT40   = 6,  /* 11a HT40 mode */
+	MODE_11NG_HT40   = 7,  /* 11g HT40 mode */
+	MODE_11AC_VHT20 = 8,
+	MODE_11AC_VHT40 = 9,
+	MODE_11AC_VHT80 = 10,
+	/*    MODE_11AC_VHT160 = 11, */
+	MODE_11AC_VHT20_2G = 11,
+	MODE_11AC_VHT40_2G = 12,
+	MODE_11AC_VHT80_2G = 13,
+	MODE_UNKNOWN    = 14,
+	MODE_MAX        = 14
+};
+
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+	switch (mode) {
+	case MODE_11A:
+		return "11a";
+	case MODE_11G:
+		return "11g";
+	case MODE_11B:
+		return "11b";
+	case MODE_11GONLY:
+		return "11gonly";
+	case MODE_11NA_HT20:
+		return "11na-ht20";
+	case MODE_11NG_HT20:
+		return "11ng-ht20";
+	case MODE_11NA_HT40:
+		return "11na-ht40";
+	case MODE_11NG_HT40:
+		return "11ng-ht40";
+	case MODE_11AC_VHT20:
+		return "11ac-vht20";
+	case MODE_11AC_VHT40:
+		return "11ac-vht40";
+	case MODE_11AC_VHT80:
+		return "11ac-vht80";
+	case MODE_11AC_VHT20_2G:
+		return "11ac-vht20-2g";
+	case MODE_11AC_VHT40_2G:
+		return "11ac-vht40-2g";
+	case MODE_11AC_VHT80_2G:
+		return "11ac-vht80-2g";
+	case MODE_UNKNOWN:
+		/* skip */
+		break;
+
+		/* no default handler to allow compiler to check that the
+		 * enum is fully handled */
+	};
+
+	return "<unknown>";
+}
+
+#define WMI_CHAN_LIST_TAG	0x1
+#define WMI_SSID_LIST_TAG	0x2
+#define WMI_BSSID_LIST_TAG	0x3
+#define WMI_IE_TAG		0x4
+
+struct wmi_channel {
+	__le32 mhz;
+	__le32 band_center_freq1;
+	__le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+	union {
+		__le32 flags; /* WMI_CHAN_FLAG_ */
+		struct {
+			u8 mode; /* only 6 LSBs */
+		} __packed;
+	} __packed;
+	union {
+		__le32 reginfo0;
+		struct {
+			/* note: power unit is 0.5 dBm */
+			u8 min_power;
+			u8 max_power;
+			u8 reg_power;
+			u8 reg_classid;
+		} __packed;
+	} __packed;
+	union {
+		__le32 reginfo1;
+		struct {
+			u8 antenna_max;
+		} __packed;
+	} __packed;
+} __packed;
+
+struct wmi_channel_arg {
+	u32 freq;
+	u32 band_center_freq1;
+	bool passive;
+	bool allow_ibss;
+	bool allow_ht;
+	bool allow_vht;
+	bool ht40plus;
+	bool chan_radar;
+	/* note: power unit is 0.5 dBm */
+	u32 min_power;
+	u32 max_power;
+	u32 max_reg_power;
+	u32 max_antenna_gain;
+	u32 reg_class_id;
+	enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+	WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+	WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS      (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE        (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED  (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED    (1 << 9)
+#define WMI_CHAN_FLAG_DFS            (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT       (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT      (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+
+#define WMI_MAX_SPATIAL_STREAM        3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM   4
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI       0x0002   /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS           0x0004   /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC                0x0008   /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT     3
+#define WMI_HT_CAP_RX_STBC                0x0030   /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT     4
+#define WMI_HT_CAP_LDPC                   0x0040   /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT        0x0080   /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY           0x0700   /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI               0x0800
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED       | \
+				WMI_HT_CAP_HT20_SGI      | \
+				WMI_HT_CAP_HT40_SGI      | \
+				WMI_HT_CAP_TX_STBC       | \
+				WMI_HT_CAP_RX_STBC       | \
+				WMI_HT_CAP_LDPC)
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK            0x00000003
+#define WMI_VHT_CAP_RX_LDPC                      0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ                    0x00000020
+#define WMI_VHT_CAP_TX_STBC                      0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK                 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT           8
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP            0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT      23
+#define WMI_VHT_CAP_RX_FIXED_ANT                 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT                 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839            0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935            0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454           0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454  | \
+				 WMI_VHT_CAP_RX_LDPC             | \
+				 WMI_VHT_CAP_SGI_80MHZ           | \
+				 WMI_VHT_CAP_TX_STBC             | \
+				 WMI_VHT_CAP_RX_STBC_MASK        | \
+				 WMI_VHT_CAP_MAX_AMPDU_LEN_EXP   | \
+				 WMI_VHT_CAP_RX_FIXED_ANT        | \
+				 WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss)      ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK           0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT     16
+
+enum {
+	REGDMN_MODE_11A              = 0x00001, /* 11a channels */
+	REGDMN_MODE_TURBO            = 0x00002, /* 11a turbo-only channels */
+	REGDMN_MODE_11B              = 0x00004, /* 11b channels */
+	REGDMN_MODE_PUREG            = 0x00008, /* 11g channels (OFDM only) */
+	REGDMN_MODE_11G              = 0x00008, /* XXX historical */
+	REGDMN_MODE_108G             = 0x00020, /* 11a+Turbo channels */
+	REGDMN_MODE_108A             = 0x00040, /* 11g+Turbo channels */
+	REGDMN_MODE_XR               = 0x00100, /* XR channels */
+	REGDMN_MODE_11A_HALF_RATE    = 0x00200, /* 11A half rate channels */
+	REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+	REGDMN_MODE_11NG_HT20        = 0x00800, /* 11N-G HT20 channels */
+	REGDMN_MODE_11NA_HT20        = 0x01000, /* 11N-A HT20 channels */
+	REGDMN_MODE_11NG_HT40PLUS    = 0x02000, /* 11N-G HT40 + channels */
+	REGDMN_MODE_11NG_HT40MINUS   = 0x04000, /* 11N-G HT40 - channels */
+	REGDMN_MODE_11NA_HT40PLUS    = 0x08000, /* 11N-A HT40 + channels */
+	REGDMN_MODE_11NA_HT40MINUS   = 0x10000, /* 11N-A HT40 - channels */
+	REGDMN_MODE_11AC_VHT20       = 0x20000, /* 5Ghz, VHT20 */
+	REGDMN_MODE_11AC_VHT40PLUS   = 0x40000, /* 5Ghz, VHT40 + channels */
+	REGDMN_MODE_11AC_VHT40MINUS  = 0x80000, /* 5Ghz  VHT40 - channels */
+	REGDMN_MODE_11AC_VHT80       = 0x100000, /* 5Ghz, VHT80 channels */
+	REGDMN_MODE_ALL              = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE        0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE     0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ         0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND   0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN    0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2         0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND    0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD     0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A    0x0800
+
+struct hal_reg_capabilities {
+	/* regdomain value specified in EEPROM */
+	__le32 eeprom_rd;
+	/*regdomain */
+	__le32 eeprom_rd_ext;
+	/* CAP1 capabilities bit map. */
+	__le32 regcap1;
+	/* REGDMN EEPROM CAP. */
+	__le32 regcap2;
+	/* REGDMN MODE */
+	__le32 wireless_modes;
+	__le32 low_2ghz_chan;
+	__le32 high_2ghz_chan;
+	__le32 low_5ghz_chan;
+	__le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+	WHAL_WLAN_11A_CAPABILITY   = 0x1,
+	WHAL_WLAN_11G_CAPABILITY   = 0x2,
+	WHAL_WLAN_11AG_CAPABILITY  = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+	/* ID of the request */
+	__le32 req_id;
+	/* size of the  of each unit */
+	__le32 unit_size;
+	/* flags to  indicate that
+	 * the number units is dependent
+	 * on number of resources(num vdevs num peers .. etc)
+	 */
+	__le32 num_unit_info;
+	/*
+	 * actual number of units to allocate . if flags in the num_unit_info
+	 * indicate that number of units is tied to number of a particular
+	 * resource to allocate then  num_units filed is set to 0 and host
+	 * will derive the number units from number of the resources it is
+	 * requesting.
+	 */
+	__le32 num_units;
+} __packed;
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+	__le32 sw_version;
+	__le32 sw_version_1;
+	__le32 abi_version;
+	/* WMI_PHY_CAPABILITY */
+	__le32 phy_capability;
+	/* Maximum number of frag table entries that SW will populate less 1 */
+	__le32 max_frag_entry;
+	__le32 wmi_service_bitmap[16];
+	__le32 num_rf_chains;
+	/*
+	 * The following field is only valid for service type
+	 * WMI_SERVICE_11AC
+	 */
+	__le32 ht_cap_info; /* WMI HT Capability */
+	__le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+	__le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+	struct hal_reg_capabilities hal_reg_capabilities;
+	__le32 sys_cap_info;
+	__le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+	/*
+	 * Max beacon and Probe Response IE offload size
+	 * (includes optional P2P IEs)
+	 */
+	__le32 max_bcn_ie_size;
+	/*
+	 * request to host to allocate a chuck of memory and pss it down to FW
+	 * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+	 * data structures. Only valid for low latency interfaces like PCIE
+	 * where FW can access this memory directly (or) by DMA.
+	 */
+	__le32 num_mem_reqs;
+	struct wlan_host_mem_req mem_reqs[0];
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_service_ready_event {
+	__le32 sw_version;
+	__le32 abi_version;
+
+	/* WMI_PHY_CAPABILITY */
+	__le32 phy_capability;
+
+	/* Maximum number of frag table entries that SW will populate less 1 */
+	__le32 max_frag_entry;
+	__le32 wmi_service_bitmap[16];
+	__le32 num_rf_chains;
+
+	/*
+	 * The following field is only valid for service type
+	 * WMI_SERVICE_11AC
+	 */
+	__le32 ht_cap_info; /* WMI HT Capability */
+	__le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+	__le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+
+	struct hal_reg_capabilities hal_reg_capabilities;
+
+	__le32 sys_cap_info;
+	__le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+	/*
+	 * request to host to allocate a chuck of memory and pss it down to FW
+	 * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+	 * data structures. Only valid for low latency interfaces like PCIE
+	 * where FW can access this memory directly (or) by DMA.
+	 */
+	__le32 num_mem_reqs;
+
+	struct wlan_host_mem_req mem_reqs[0];
+} __packed;
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+
+struct wmi_ready_event {
+	__le32 sw_version;
+	__le32 abi_version;
+	struct wmi_mac_addr mac_addr;
+	__le32 status;
+} __packed;
+
+struct wmi_resource_config {
+	/* number of virtual devices (VAPs) to support */
+	__le32 num_vdevs;
+
+	/* number of peer nodes to support */
+	__le32 num_peers;
+
+	/*
+	 * In offload mode target supports features like WOW, chatter and
+	 * other protocol offloads. In order to support them some
+	 * functionalities like reorder buffering, PN checking need to be
+	 * done in target. This determines maximum number of peers suported
+	 * by target in offload mode
+	 */
+	__le32 num_offload_peers;
+
+	/* For target-based RX reordering */
+	__le32 num_offload_reorder_bufs;
+
+	/* number of keys per peer */
+	__le32 num_peer_keys;
+
+	/* total number of TX/RX data TIDs */
+	__le32 num_tids;
+
+	/*
+	 * max skid for resolving hash collisions
+	 *
+	 *   The address search table is sparse, so that if two MAC addresses
+	 *   result in the same hash value, the second of these conflicting
+	 *   entries can slide to the next index in the address search table,
+	 *   and use it, if it is unoccupied.  This ast_skid_limit parameter
+	 *   specifies the upper bound on how many subsequent indices to search
+	 *   over to find an unoccupied space.
+	 */
+	__le32 ast_skid_limit;
+
+	/*
+	 * the nominal chain mask for transmit
+	 *
+	 *   The chain mask may be modified dynamically, e.g. to operate AP
+	 *   tx with a reduced number of chains if no clients are associated.
+	 *   This configuration parameter specifies the nominal chain-mask that
+	 *   should be used when not operating with a reduced set of tx chains.
+	 */
+	__le32 tx_chain_mask;
+
+	/*
+	 * the nominal chain mask for receive
+	 *
+	 *   The chain mask may be modified dynamically, e.g. for a client
+	 *   to use a reduced number of chains for receive if the traffic to
+	 *   the client is low enough that it doesn't require downlink MIMO
+	 *   or antenna diversity.
+	 *   This configuration parameter specifies the nominal chain-mask that
+	 *   should be used when not operating with a reduced set of rx chains.
+	 */
+	__le32 rx_chain_mask;
+
+	/*
+	 * what rx reorder timeout (ms) to use for the AC
+	 *
+	 *   Each WMM access class (voice, video, best-effort, background) will
+	 *   have its own timeout value to dictate how long to wait for missing
+	 *   rx MPDUs to arrive before flushing subsequent MPDUs that have
+	 *   already been received.
+	 *   This parameter specifies the timeout in milliseconds for each
+	 *   class.
+	 */
+	__le32 rx_timeout_pri_vi;
+	__le32 rx_timeout_pri_vo;
+	__le32 rx_timeout_pri_be;
+	__le32 rx_timeout_pri_bk;
+
+	/*
+	 * what mode the rx should decap packets to
+	 *
+	 *   MAC can decap to RAW (no decap), native wifi or Ethernet types
+	 *   THis setting also determines the default TX behavior, however TX
+	 *   behavior can be modified on a per VAP basis during VAP init
+	 */
+	__le32 rx_decap_mode;
+
+	/* what is the maximum number of scan requests that can be queued */
+	__le32 scan_max_pending_reqs;
+
+	/* maximum VDEV that could use BMISS offload */
+	__le32 bmiss_offload_max_vdev;
+
+	/* maximum VDEV that could use offload roaming */
+	__le32 roam_offload_max_vdev;
+
+	/* maximum AP profiles that would push to offload roaming */
+	__le32 roam_offload_max_ap_profiles;
+
+	/*
+	 * how many groups to use for mcast->ucast conversion
+	 *
+	 *   The target's WAL maintains a table to hold information regarding
+	 *   which peers belong to a given multicast group, so that if
+	 *   multicast->unicast conversion is enabled, the target can convert
+	 *   multicast tx frames to a series of unicast tx frames, to each
+	 *   peer within the multicast group.
+	     This num_mcast_groups configuration parameter tells the target how
+	 *   many multicast groups to provide storage for within its multicast
+	 *   group membership table.
+	 */
+	__le32 num_mcast_groups;
+
+	/*
+	 * size to alloc for the mcast membership table
+	 *
+	 *   This num_mcast_table_elems configuration parameter tells the
+	 *   target how many peer elements it needs to provide storage for in
+	 *   its multicast group membership table.
+	 *   These multicast group membership table elements are shared by the
+	 *   multicast groups stored within the table.
+	 */
+	__le32 num_mcast_table_elems;
+
+	/*
+	 * whether/how to do multicast->unicast conversion
+	 *
+	 *   This configuration parameter specifies whether the target should
+	 *   perform multicast --> unicast conversion on transmit, and if so,
+	 *   what to do if it finds no entries in its multicast group
+	 *   membership table for the multicast IP address in the tx frame.
+	 *   Configuration value:
+	 *   0 -> Do not perform multicast to unicast conversion.
+	 *   1 -> Convert multicast frames to unicast, if the IP multicast
+	 *        address from the tx frame is found in the multicast group
+	 *        membership table.  If the IP multicast address is not found,
+	 *        drop the frame.
+	 *   2 -> Convert multicast frames to unicast, if the IP multicast
+	 *        address from the tx frame is found in the multicast group
+	 *        membership table.  If the IP multicast address is not found,
+	 *        transmit the frame as multicast.
+	 */
+	__le32 mcast2ucast_mode;
+
+	/*
+	 * how much memory to allocate for a tx PPDU dbg log
+	 *
+	 *   This parameter controls how much memory the target will allocate
+	 *   to store a log of tx PPDU meta-information (how large the PPDU
+	 *   was, when it was sent, whether it was successful, etc.)
+	 */
+	__le32 tx_dbg_log_size;
+
+	/* how many AST entries to be allocated for WDS */
+	__le32 num_wds_entries;
+
+	/*
+	 * MAC DMA burst size, e.g., For target PCI limit can be
+	 * 0 -default, 1 256B
+	 */
+	__le32 dma_burst_size;
+
+	/*
+	 * Fixed delimiters to be inserted after every MPDU to
+	 * account for interface latency to avoid underrun.
+	 */
+	__le32 mac_aggr_delim;
+
+	/*
+	 *   determine whether target is responsible for detecting duplicate
+	 *   non-aggregate MPDU and timing out stale fragments.
+	 *
+	 *   A-MPDU reordering is always performed on the target.
+	 *
+	 *   0: target responsible for frag timeout and dup checking
+	 *   1: host responsible for frag timeout and dup checking
+	 */
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+
+	/*
+	 * Configuration for VoW :
+	 * No of Video Nodes to be supported
+	 * and Max no of descriptors for each Video link (node).
+	 */
+	__le32 vow_config;
+
+	/* maximum VDEV that could use GTK offload */
+	__le32 gtk_offload_max_vdev;
+
+	/* Number of msdu descriptors target should use */
+	__le32 num_msdu_desc;
+
+	/*
+	 * Max. number of Tx fragments per MSDU
+	 *  This parameter controls the max number of Tx fragments per MSDU.
+	 *  This is sent by the target as part of the WMI_SERVICE_READY event
+	 *  and is overriden by the OS shim as required.
+	 */
+	__le32 max_frag_entries;
+} __packed;
+
+struct wmi_resource_config_10x {
+	/* number of virtual devices (VAPs) to support */
+	__le32 num_vdevs;
+
+	/* number of peer nodes to support */
+	__le32 num_peers;
+
+	/* number of keys per peer */
+	__le32 num_peer_keys;
+
+	/* total number of TX/RX data TIDs */
+	__le32 num_tids;
+
+	/*
+	 * max skid for resolving hash collisions
+	 *
+	 *   The address search table is sparse, so that if two MAC addresses
+	 *   result in the same hash value, the second of these conflicting
+	 *   entries can slide to the next index in the address search table,
+	 *   and use it, if it is unoccupied.  This ast_skid_limit parameter
+	 *   specifies the upper bound on how many subsequent indices to search
+	 *   over to find an unoccupied space.
+	 */
+	__le32 ast_skid_limit;
+
+	/*
+	 * the nominal chain mask for transmit
+	 *
+	 *   The chain mask may be modified dynamically, e.g. to operate AP
+	 *   tx with a reduced number of chains if no clients are associated.
+	 *   This configuration parameter specifies the nominal chain-mask that
+	 *   should be used when not operating with a reduced set of tx chains.
+	 */
+	__le32 tx_chain_mask;
+
+	/*
+	 * the nominal chain mask for receive
+	 *
+	 *   The chain mask may be modified dynamically, e.g. for a client
+	 *   to use a reduced number of chains for receive if the traffic to
+	 *   the client is low enough that it doesn't require downlink MIMO
+	 *   or antenna diversity.
+	 *   This configuration parameter specifies the nominal chain-mask that
+	 *   should be used when not operating with a reduced set of rx chains.
+	 */
+	__le32 rx_chain_mask;
+
+	/*
+	 * what rx reorder timeout (ms) to use for the AC
+	 *
+	 *   Each WMM access class (voice, video, best-effort, background) will
+	 *   have its own timeout value to dictate how long to wait for missing
+	 *   rx MPDUs to arrive before flushing subsequent MPDUs that have
+	 *   already been received.
+	 *   This parameter specifies the timeout in milliseconds for each
+	 *   class.
+	 */
+	__le32 rx_timeout_pri_vi;
+	__le32 rx_timeout_pri_vo;
+	__le32 rx_timeout_pri_be;
+	__le32 rx_timeout_pri_bk;
+
+	/*
+	 * what mode the rx should decap packets to
+	 *
+	 *   MAC can decap to RAW (no decap), native wifi or Ethernet types
+	 *   THis setting also determines the default TX behavior, however TX
+	 *   behavior can be modified on a per VAP basis during VAP init
+	 */
+	__le32 rx_decap_mode;
+
+	/* what is the maximum number of scan requests that can be queued */
+	__le32 scan_max_pending_reqs;
+
+	/* maximum VDEV that could use BMISS offload */
+	__le32 bmiss_offload_max_vdev;
+
+	/* maximum VDEV that could use offload roaming */
+	__le32 roam_offload_max_vdev;
+
+	/* maximum AP profiles that would push to offload roaming */
+	__le32 roam_offload_max_ap_profiles;
+
+	/*
+	 * how many groups to use for mcast->ucast conversion
+	 *
+	 *   The target's WAL maintains a table to hold information regarding
+	 *   which peers belong to a given multicast group, so that if
+	 *   multicast->unicast conversion is enabled, the target can convert
+	 *   multicast tx frames to a series of unicast tx frames, to each
+	 *   peer within the multicast group.
+	     This num_mcast_groups configuration parameter tells the target how
+	 *   many multicast groups to provide storage for within its multicast
+	 *   group membership table.
+	 */
+	__le32 num_mcast_groups;
+
+	/*
+	 * size to alloc for the mcast membership table
+	 *
+	 *   This num_mcast_table_elems configuration parameter tells the
+	 *   target how many peer elements it needs to provide storage for in
+	 *   its multicast group membership table.
+	 *   These multicast group membership table elements are shared by the
+	 *   multicast groups stored within the table.
+	 */
+	__le32 num_mcast_table_elems;
+
+	/*
+	 * whether/how to do multicast->unicast conversion
+	 *
+	 *   This configuration parameter specifies whether the target should
+	 *   perform multicast --> unicast conversion on transmit, and if so,
+	 *   what to do if it finds no entries in its multicast group
+	 *   membership table for the multicast IP address in the tx frame.
+	 *   Configuration value:
+	 *   0 -> Do not perform multicast to unicast conversion.
+	 *   1 -> Convert multicast frames to unicast, if the IP multicast
+	 *        address from the tx frame is found in the multicast group
+	 *        membership table.  If the IP multicast address is not found,
+	 *        drop the frame.
+	 *   2 -> Convert multicast frames to unicast, if the IP multicast
+	 *        address from the tx frame is found in the multicast group
+	 *        membership table.  If the IP multicast address is not found,
+	 *        transmit the frame as multicast.
+	 */
+	__le32 mcast2ucast_mode;
+
+	/*
+	 * how much memory to allocate for a tx PPDU dbg log
+	 *
+	 *   This parameter controls how much memory the target will allocate
+	 *   to store a log of tx PPDU meta-information (how large the PPDU
+	 *   was, when it was sent, whether it was successful, etc.)
+	 */
+	__le32 tx_dbg_log_size;
+
+	/* how many AST entries to be allocated for WDS */
+	__le32 num_wds_entries;
+
+	/*
+	 * MAC DMA burst size, e.g., For target PCI limit can be
+	 * 0 -default, 1 256B
+	 */
+	__le32 dma_burst_size;
+
+	/*
+	 * Fixed delimiters to be inserted after every MPDU to
+	 * account for interface latency to avoid underrun.
+	 */
+	__le32 mac_aggr_delim;
+
+	/*
+	 *   determine whether target is responsible for detecting duplicate
+	 *   non-aggregate MPDU and timing out stale fragments.
+	 *
+	 *   A-MPDU reordering is always performed on the target.
+	 *
+	 *   0: target responsible for frag timeout and dup checking
+	 *   1: host responsible for frag timeout and dup checking
+	 */
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+
+	/*
+	 * Configuration for VoW :
+	 * No of Video Nodes to be supported
+	 * and Max no of descriptors for each Video link (node).
+	 */
+	__le32 vow_config;
+
+	/* Number of msdu descriptors target should use */
+	__le32 num_msdu_desc;
+
+	/*
+	 * Max. number of Tx fragments per MSDU
+	 *  This parameter controls the max number of Tx fragments per MSDU.
+	 *  This is sent by the target as part of the WMI_SERVICE_READY event
+	 *  and is overriden by the OS shim as required.
+	 */
+	__le32 max_frag_entries;
+} __packed;
+
+enum wmi_10_2_feature_mask {
+	WMI_10_2_RX_BATCH_MODE = BIT(0),
+	WMI_10_2_ATF_CONFIG    = BIT(1),
+	WMI_10_2_COEX_GPIO     = BIT(3),
+};
+
+struct wmi_resource_config_10_2 {
+	struct wmi_resource_config_10x common;
+	__le32 max_peer_ext_stats;
+	__le32 smart_ant_cap; /* 0-disable, 1-enable */
+	__le32 bk_min_free;
+	__le32 be_min_free;
+	__le32 vi_min_free;
+	__le32 vo_min_free;
+	__le32 feature_mask;
+} __packed;
+
+#define NUM_UNITS_IS_NUM_VDEVS         BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS         BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS  BIT(2)
+
+struct wmi_resource_config_10_4 {
+	/* Number of virtual devices (VAPs) to support */
+	__le32 num_vdevs;
+
+	/* Number of peer nodes to support */
+	__le32 num_peers;
+
+	/* Number of active peer nodes to support */
+	__le32 num_active_peers;
+
+	/* In offload mode, target supports features like WOW, chatter and other
+	 * protocol offloads. In order to support them some functionalities like
+	 * reorder buffering, PN checking need to be done in target.
+	 * This determines maximum number of peers supported by target in
+	 * offload mode.
+	 */
+	__le32 num_offload_peers;
+
+	/* Number of reorder buffers available for doing target based reorder
+	 * Rx reorder buffering
+	 */
+	__le32 num_offload_reorder_buffs;
+
+	/* Number of keys per peer */
+	__le32 num_peer_keys;
+
+	/* Total number of TX/RX data TIDs */
+	__le32 num_tids;
+
+	/* Max skid for resolving hash collisions.
+	 * The address search table is sparse, so that if two MAC addresses
+	 * result in the same hash value, the second of these conflicting
+	 * entries can slide to the next index in the address search table,
+	 * and use it, if it is unoccupied.  This ast_skid_limit parameter
+	 * specifies the upper bound on how many subsequent indices to search
+	 * over to find an unoccupied space.
+	 */
+	__le32 ast_skid_limit;
+
+	/* The nominal chain mask for transmit.
+	 * The chain mask may be modified dynamically, e.g. to operate AP tx
+	 * with a reduced number of chains if no clients are associated.
+	 * This configuration parameter specifies the nominal chain-mask that
+	 * should be used when not operating with a reduced set of tx chains.
+	 */
+	__le32 tx_chain_mask;
+
+	/* The nominal chain mask for receive.
+	 * The chain mask may be modified dynamically, e.g. for a client to use
+	 * a reduced number of chains for receive if the traffic to the client
+	 * is low enough that it doesn't require downlink MIMO or antenna
+	 * diversity. This configuration parameter specifies the nominal
+	 * chain-mask that should be used when not operating with a reduced
+	 * set of rx chains.
+	 */
+	__le32 rx_chain_mask;
+
+	/* What rx reorder timeout (ms) to use for the AC.
+	 * Each WMM access class (voice, video, best-effort, background) will
+	 * have its own timeout value to dictate how long to wait for missing
+	 * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+	 * been received. This parameter specifies the timeout in milliseconds
+	 * for each class.
+	 */
+	__le32 rx_timeout_pri[4];
+
+	/* What mode the rx should decap packets to.
+	 * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+	 * This setting also determines the default TX behavior, however TX
+	 * behavior can be modified on a per VAP basis during VAP init
+	 */
+	__le32 rx_decap_mode;
+
+	__le32 scan_max_pending_req;
+
+	__le32 bmiss_offload_max_vdev;
+
+	__le32 roam_offload_max_vdev;
+
+	__le32 roam_offload_max_ap_profiles;
+
+	/* How many groups to use for mcast->ucast conversion.
+	 * The target's WAL maintains a table to hold information regarding
+	 * which peers belong to a given multicast group, so that if
+	 * multicast->unicast conversion is enabled, the target can convert
+	 * multicast tx frames to a series of unicast tx frames, to each peer
+	 * within the multicast group. This num_mcast_groups configuration
+	 * parameter tells the target how many multicast groups to provide
+	 * storage for within its multicast group membership table.
+	 */
+	__le32 num_mcast_groups;
+
+	/* Size to alloc for the mcast membership table.
+	 * This num_mcast_table_elems configuration parameter tells the target
+	 * how many peer elements it needs to provide storage for in its
+	 * multicast group membership table. These multicast group membership
+	 * table elements are shared by the multicast groups stored within
+	 * the table.
+	 */
+	__le32 num_mcast_table_elems;
+
+	/* Whether/how to do multicast->unicast conversion.
+	 * This configuration parameter specifies whether the target should
+	 * perform multicast --> unicast conversion on transmit, and if so,
+	 * what to do if it finds no entries in its multicast group membership
+	 * table for the multicast IP address in the tx frame.
+	 * Configuration value:
+	 * 0 -> Do not perform multicast to unicast conversion.
+	 * 1 -> Convert multicast frames to unicast, if the IP multicast address
+	 *      from the tx frame is found in the multicast group membership
+	 *      table.  If the IP multicast address is not found, drop the frame
+	 * 2 -> Convert multicast frames to unicast, if the IP multicast address
+	 *      from the tx frame is found in the multicast group membership
+	 *      table.  If the IP multicast address is not found, transmit the
+	 *      frame as multicast.
+	 */
+	__le32 mcast2ucast_mode;
+
+	/* How much memory to allocate for a tx PPDU dbg log.
+	 * This parameter controls how much memory the target will allocate to
+	 * store a log of tx PPDU meta-information (how large the PPDU was,
+	 * when it was sent, whether it was successful, etc.)
+	 */
+	__le32 tx_dbg_log_size;
+
+	/* How many AST entries to be allocated for WDS */
+	__le32 num_wds_entries;
+
+	/* MAC DMA burst size. 0 -default, 1 -256B */
+	__le32 dma_burst_size;
+
+	/* Fixed delimiters to be inserted after every MPDU to account for
+	 * interface latency to avoid underrun.
+	 */
+	__le32 mac_aggr_delim;
+
+	/* Determine whether target is responsible for detecting duplicate
+	 * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+	 * is always performed on the target.
+	 *
+	 * 0: target responsible for frag timeout and dup checking
+	 * 1: host responsible for frag timeout and dup checking
+	 */
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+
+	/* Configuration for VoW : No of Video nodes to be supported and max
+	 * no of descriptors for each video link (node).
+	 */
+	__le32 vow_config;
+
+	/* Maximum vdev that could use gtk offload */
+	__le32 gtk_offload_max_vdev;
+
+	/* Number of msdu descriptors target should use */
+	__le32 num_msdu_desc;
+
+	/* Max number of tx fragments per MSDU.
+	 * This parameter controls the max number of tx fragments per MSDU.
+	 * This will passed by target as part of the WMI_SERVICE_READY event
+	 * and is overridden by the OS shim as required.
+	 */
+	__le32 max_frag_entries;
+
+	/* Max number of extended peer stats.
+	 * This parameter controls the max number of peers for which extended
+	 * statistics are supported by target
+	 */
+	__le32 max_peer_ext_stats;
+
+	/* Smart antenna capabilities information.
+	 * 1 - Smart antenna is enabled
+	 * 0 - Smart antenna is disabled
+	 * In future this can contain smart antenna specific capabilities.
+	 */
+	__le32 smart_ant_cap;
+
+	/* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+	 * during init.
+	 */
+	__le32 bk_minfree;
+	__le32 be_minfree;
+	__le32 vi_minfree;
+	__le32 vo_minfree;
+
+	/* Rx batch mode capability.
+	 * 1 - Rx batch mode enabled
+	 * 0 - Rx batch mode disabled
+	 */
+	__le32 rx_batchmode;
+
+	/* Thermal throttling capability.
+	 * 1 - Capable of thermal throttling
+	 * 0 - Not capable of thermal throttling
+	 */
+	__le32 tt_support;
+
+	/* ATF configuration.
+	 * 1  - Enable ATF
+	 * 0  - Disable ATF
+	 */
+	__le32 atf_config;
+
+	/* Configure padding to manage IP header un-alignment
+	 * 1  - Enable padding
+	 * 0  - Disable padding
+	 */
+	__le32 iphdr_pad_config;
+
+	/* qwrap configuration
+	 * 1  - This is qwrap configuration
+	 * 0  - This is not qwrap
+	 */
+	__le32 qwrap_config;
+} __packed;
+
+/* strucutre describing host memory chunk. */
+struct host_memory_chunk {
+	/* id of the request that is passed up in service ready */
+	__le32 req_id;
+	/* the physical address the memory chunk */
+	__le32 ptr;
+	/* size of the chunk */
+	__le32 size;
+} __packed;
+
+struct wmi_host_mem_chunks {
+	__le32 count;
+	/* some fw revisions require at least 1 chunk regardless of count */
+	struct host_memory_chunk items[1];
+} __packed;
+
+struct wmi_init_cmd {
+	struct wmi_resource_config resource_config;
+	struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+	struct wmi_resource_config_10x resource_config;
+	struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_2 {
+	struct wmi_resource_config_10_2 resource_config;
+	struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_4 {
+	struct wmi_resource_config_10_4 resource_config;
+	struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_chan_list_entry {
+	__le16 freq;
+	u8 phy_mode; /* valid for 10.2 only */
+	u8 reserved;
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+	__le32 tag; /* WMI_CHAN_LIST_TAG */
+	__le32 num_chan;
+	struct wmi_chan_list_entry channel_list[0];
+} __packed;
+
+struct wmi_bssid_list {
+	__le32 tag; /* WMI_BSSID_LIST_TAG */
+	__le32 num_bssid;
+	struct wmi_mac_addr bssid_list[0];
+} __packed;
+
+struct wmi_ie_data {
+	__le32 tag; /* WMI_IE_TAG */
+	__le32 ie_len;
+	u8 ie_data[0];
+} __packed;
+
+struct wmi_ssid {
+	__le32 ssid_len;
+	u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+	__le32 tag; /* WMI_SSID_LIST_TAG */
+	__le32 num_ssids;
+	struct wmi_ssid ssids[0];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID    16
+#define WLAN_SCAN_PARAMS_MAX_BSSID   4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN  256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+	WMI_SCAN_PRIORITY_VERY_LOW = 0,
+	WMI_SCAN_PRIORITY_LOW,
+	WMI_SCAN_PRIORITY_MEDIUM,
+	WMI_SCAN_PRIORITY_HIGH,
+	WMI_SCAN_PRIORITY_VERY_HIGH,
+	WMI_SCAN_PRIORITY_COUNT   /* number of priorities supported */
+};
+
+struct wmi_start_scan_common {
+	/* Scan ID */
+	__le32 scan_id;
+	/* Scan requestor ID */
+	__le32 scan_req_id;
+	/* VDEV id(interface) that is requesting scan */
+	__le32 vdev_id;
+	/* Scan Priority, input to scan scheduler */
+	__le32 scan_priority;
+	/* Scan events subscription */
+	__le32 notify_scan_events;
+	/* dwell time in msec on active channels */
+	__le32 dwell_time_active;
+	/* dwell time in msec on passive channels */
+	__le32 dwell_time_passive;
+	/*
+	 * min time in msec on the BSS channel,only valid if atleast one
+	 * VDEV is active
+	 */
+	__le32 min_rest_time;
+	/*
+	 * max rest time in msec on the BSS channel,only valid if at least
+	 * one VDEV is active
+	 */
+	/*
+	 * the scanner will rest on the bss channel at least min_rest_time
+	 * after min_rest_time the scanner will start checking for tx/rx
+	 * activity on all VDEVs. if there is no activity the scanner will
+	 * switch to off channel. if there is activity the scanner will let
+	 * the radio on the bss channel until max_rest_time expires.at
+	 * max_rest_time scanner will switch to off channel irrespective of
+	 * activity. activity is determined by the idle_time parameter.
+	 */
+	__le32 max_rest_time;
+	/*
+	 * time before sending next set of probe requests.
+	 * The scanner keeps repeating probe requests transmission with
+	 * period specified by repeat_probe_time.
+	 * The number of probe requests specified depends on the ssid_list
+	 * and bssid_list
+	 */
+	__le32 repeat_probe_time;
+	/* time in msec between 2 consequetive probe requests with in a set. */
+	__le32 probe_spacing_time;
+	/*
+	 * data inactivity time in msec on bss channel that will be used by
+	 * scanner for measuring the inactivity.
+	 */
+	__le32 idle_time;
+	/* maximum time in msec allowed for scan  */
+	__le32 max_scan_time;
+	/*
+	 * delay in msec before sending first probe request after switching
+	 * to a channel
+	 */
+	__le32 probe_delay;
+	/* Scan control flags */
+	__le32 scan_ctrl_flags;
+} __packed;
+
+struct wmi_start_scan_tlvs {
+	/* TLV parameters. These includes channel list, ssid list, bssid list,
+	 * extra ies.
+	 */
+	u8 tlvs[0];
+} __packed;
+
+struct wmi_start_scan_cmd {
+	struct wmi_start_scan_common common;
+	__le32 burst_duration_ms;
+	struct wmi_start_scan_tlvs tlvs;
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_start_scan_cmd {
+	struct wmi_start_scan_common common;
+	struct wmi_start_scan_tlvs tlvs;
+} __packed;
+
+struct wmi_ssid_arg {
+	int len;
+	const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+	const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+	u32 scan_id;
+	u32 scan_req_id;
+	u32 vdev_id;
+	u32 scan_priority;
+	u32 notify_scan_events;
+	u32 dwell_time_active;
+	u32 dwell_time_passive;
+	u32 min_rest_time;
+	u32 max_rest_time;
+	u32 repeat_probe_time;
+	u32 probe_spacing_time;
+	u32 idle_time;
+	u32 max_scan_time;
+	u32 probe_delay;
+	u32 scan_ctrl_flags;
+	u32 burst_duration_ms;
+
+	u32 ie_len;
+	u32 n_channels;
+	u32 n_ssids;
+	u32 n_bssids;
+
+	u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+	u16 channels[64];
+	struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+	struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE        0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames  */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that. */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+enum wmi_stop_scan_type {
+	WMI_SCAN_STOP_ONE	= 0x00000000, /* stop by scan_id */
+	WMI_SCAN_STOP_VDEV_ALL	= 0x01000000, /* stop by vdev_id */
+	WMI_SCAN_STOP_ALL	= 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+	__le32 scan_req_id;
+	__le32 scan_id;
+	__le32 req_type;
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+	u32 req_id;
+	enum wmi_stop_scan_type req_type;
+	union {
+		u32 scan_id;
+		u32 vdev_id;
+	} u;
+};
+
+struct wmi_scan_chan_list_cmd {
+	__le32 num_scan_chans;
+	struct wmi_channel chan_info[0];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+	u32 n_channels;
+	struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+	WMI_BSS_FILTER_NONE = 0,        /* no beacons forwarded */
+	WMI_BSS_FILTER_ALL,             /* all beacons forwarded */
+	WMI_BSS_FILTER_PROFILE,         /* only beacons matching profile */
+	WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+	WMI_BSS_FILTER_CURRENT_BSS,     /* only beacons matching current BSS */
+	WMI_BSS_FILTER_ALL_BUT_BSS,     /* all but beacons matching BSS */
+	WMI_BSS_FILTER_PROBED_SSID,     /* beacons matching probed ssid */
+	WMI_BSS_FILTER_LAST_BSS,        /* marker only */
+};
+
+enum wmi_scan_event_type {
+	WMI_SCAN_EVENT_STARTED              = BIT(0),
+	WMI_SCAN_EVENT_COMPLETED            = BIT(1),
+	WMI_SCAN_EVENT_BSS_CHANNEL          = BIT(2),
+	WMI_SCAN_EVENT_FOREIGN_CHANNEL      = BIT(3),
+	WMI_SCAN_EVENT_DEQUEUED             = BIT(4),
+	/* possibly by high-prio scan */
+	WMI_SCAN_EVENT_PREEMPTED            = BIT(5),
+	WMI_SCAN_EVENT_START_FAILED         = BIT(6),
+	WMI_SCAN_EVENT_RESTARTED            = BIT(7),
+	WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+	WMI_SCAN_EVENT_MAX                  = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+	WMI_SCAN_REASON_COMPLETED,
+	WMI_SCAN_REASON_CANCELLED,
+	WMI_SCAN_REASON_PREEMPTED,
+	WMI_SCAN_REASON_TIMEDOUT,
+	WMI_SCAN_REASON_INTERNAL_FAILURE,
+	WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+	__le32 event_type; /* %WMI_SCAN_EVENT_ */
+	__le32 reason; /* %WMI_SCAN_REASON_ */
+	__le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+	__le32 scan_req_id;
+	__le32 scan_id;
+	__le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM    52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr_v1 {
+	__le32 channel;
+	__le32 snr;
+	__le32 rate;
+	__le32 phy_mode;
+	__le32 buf_len;
+	__le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_hdr_v2 {
+	struct wmi_mgmt_rx_hdr_v1 v1;
+	__le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+	struct wmi_mgmt_rx_hdr_v1 hdr;
+	u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+	struct wmi_mgmt_rx_hdr_v2 hdr;
+	u8 buf[0];
+} __packed;
+
+struct wmi_10_4_mgmt_rx_hdr {
+	__le32 channel;
+	__le32 snr;
+	    u8 rssi_ctl[4];
+	__le32 rate;
+	__le32 phy_mode;
+	__le32 buf_len;
+	__le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+	struct wmi_10_4_mgmt_rx_hdr hdr;
+	u8 buf[0];
+} __packed;
+
+#define WMI_RX_STATUS_OK			0x00
+#define WMI_RX_STATUS_ERR_CRC			0x01
+#define WMI_RX_STATUS_ERR_DECRYPT		0x08
+#define WMI_RX_STATUS_ERR_MIC			0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20
+
+#define PHY_ERROR_GEN_SPECTRAL_SCAN		0x26
+#define PHY_ERROR_GEN_FALSE_RADAR_EXT		0x24
+#define PHY_ERROR_GEN_RADAR			0x05
+
+#define PHY_ERROR_10_4_RADAR_MASK               0x4
+#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK       0x4000000
+
+enum phy_err_type {
+	PHY_ERROR_UNKNOWN,
+	PHY_ERROR_SPECTRAL_SCAN,
+	PHY_ERROR_FALSE_RADAR_EXT,
+	PHY_ERROR_RADAR
+};
+
+struct wmi_phyerr {
+	__le32 tsf_timestamp;
+	__le16 freq1;
+	__le16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u8 rsvd0;
+	__le32 rssi_chains[4];
+	__le16 nf_chains[4];
+	__le32 buf_len;
+	u8 buf[0];
+} __packed;
+
+struct wmi_phyerr_event {
+	__le32 num_phyerrs;
+	__le32 tsf_l32;
+	__le32 tsf_u32;
+	struct wmi_phyerr phyerrs[0];
+} __packed;
+
+struct wmi_10_4_phyerr_event {
+	__le32 tsf_l32;
+	__le32 tsf_u32;
+	__le16 freq1;
+	__le16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u8 rsvd0;
+	__le32 rssi_chains[4];
+	__le16 nf_chains[4];
+	__le32 phy_err_mask[2];
+	__le32 tsf_timestamp;
+	__le32 buf_len;
+	u8 buf[0];
+} __packed;
+
+#define PHYERR_TLV_SIG				0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT	0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY	0xF8
+#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT	0xF9
+
+struct phyerr_radar_report {
+	__le32 reg0; /* RADAR_REPORT_REG0_* */
+	__le32 reg1; /* REDAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK		0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB		31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK	0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB	30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK		0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB		20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK		0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB		16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK		0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB		10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK		0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB		0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK	0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB	31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK	0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB		24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK	0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB	16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK		0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB		8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK		0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB			0
+
+struct phyerr_fft_report {
+	__le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+	__le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK	0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB	23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK		0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB		14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK		0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB		12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK		0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB		0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK		0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB		26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK		0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB		18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK		0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB		8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK	0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB	0
+
+struct phyerr_tlv {
+	__le16 len;
+	u8 tag;
+	u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE			50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE	40
+
+struct wmi_mgmt_tx_hdr {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 tx_rate;
+	__le32 tx_power;
+	__le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+	struct wmi_mgmt_tx_hdr hdr;
+	u8 buf[0];
+} __packed;
+
+struct wmi_echo_event {
+	__le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+	__le32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+	__le32 reg_domain;
+	__le32 reg_domain_2G;
+	__le32 reg_domain_5G;
+	__le32 conformance_test_limit_2G;
+	__le32 conformance_test_limit_5G;
+} __packed;
+
+enum wmi_dfs_region {
+	/* Uninitialized dfs domain */
+	WMI_UNINIT_DFS_DOMAIN = 0,
+
+	/* FCC3 dfs domain */
+	WMI_FCC_DFS_DOMAIN = 1,
+
+	/* ETSI dfs domain */
+	WMI_ETSI_DFS_DOMAIN = 2,
+
+	/*Japan dfs domain */
+	WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+	__le32 reg_domain;
+	__le32 reg_domain_2G;
+	__le32 reg_domain_5G;
+	__le32 conformance_test_limit_2G;
+	__le32 conformance_test_limit_5G;
+
+	/* dfs domain from wmi_dfs_region */
+	__le32 dfs_domain;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+	/* period in TUs */
+	__le32 period;
+
+	/* duration in TUs */
+	__le32 duration;
+
+	/* offset in TUs */
+	__le32 next_start;
+
+	/* enable/disable */
+	__le32 enabled;
+} __packed;
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+	ATH10K_PROT_NONE     = 0,    /* no protection */
+	ATH10K_PROT_CTSONLY  = 1,    /* CTS to self */
+	ATH10K_PROT_RTSCTS   = 2,    /* RTS-CTS */
+};
+
+enum wmi_rtscts_profile {
+	WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+	WMI_RTSCTS_FOR_SECOND_RATESERIES,
+	WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED		1
+#define WMI_RTSCTS_SET_MASK		0x0f
+#define WMI_RTSCTS_SET_LSB		0
+
+#define WMI_RTSCTS_PROFILE_MASK		0xf0
+#define WMI_RTSCTS_PROFILE_LSB		4
+
+enum wmi_beacon_gen_mode {
+	WMI_BEACON_STAGGERED_MODE = 0,
+	WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+	WMI_CSA_IE_PRESENT = 0x00000001,
+	WMI_XCSA_IE_PRESENT = 0x00000002,
+	WMI_WBW_IE_PRESENT = 0x00000004,
+	WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+	__le32 i_fc_dur;
+	/* Bit 0-15: FC */
+	/* Bit 16-31: DUR */
+	struct wmi_mac_addr i_addr1;
+	struct wmi_mac_addr i_addr2;
+	__le32 csa_ie[2];
+	__le32 xcsa_ie[2];
+	__le32 wb_ie[2];
+	__le32 cswarp_ie;
+	__le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD    500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD    500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD    500
+
+struct wmi_pdev_param_map {
+	u32 tx_chain_mask;
+	u32 rx_chain_mask;
+	u32 txpower_limit2g;
+	u32 txpower_limit5g;
+	u32 txpower_scale;
+	u32 beacon_gen_mode;
+	u32 beacon_tx_mode;
+	u32 resmgr_offchan_mode;
+	u32 protection_mode;
+	u32 dynamic_bw;
+	u32 non_agg_sw_retry_th;
+	u32 agg_sw_retry_th;
+	u32 sta_kickout_th;
+	u32 ac_aggrsize_scaling;
+	u32 ltr_enable;
+	u32 ltr_ac_latency_be;
+	u32 ltr_ac_latency_bk;
+	u32 ltr_ac_latency_vi;
+	u32 ltr_ac_latency_vo;
+	u32 ltr_ac_latency_timeout;
+	u32 ltr_sleep_override;
+	u32 ltr_rx_override;
+	u32 ltr_tx_activity_timeout;
+	u32 l1ss_enable;
+	u32 dsleep_enable;
+	u32 pcielp_txbuf_flush;
+	u32 pcielp_txbuf_watermark;
+	u32 pcielp_txbuf_tmo_en;
+	u32 pcielp_txbuf_tmo_value;
+	u32 pdev_stats_update_period;
+	u32 vdev_stats_update_period;
+	u32 peer_stats_update_period;
+	u32 bcnflt_stats_update_period;
+	u32 pmf_qos;
+	u32 arp_ac_override;
+	u32 dcs;
+	u32 ani_enable;
+	u32 ani_poll_period;
+	u32 ani_listen_period;
+	u32 ani_ofdm_level;
+	u32 ani_cck_level;
+	u32 dyntxchain;
+	u32 proxy_sta;
+	u32 idle_ps_config;
+	u32 power_gating_sleep;
+	u32 fast_channel_reset;
+	u32 burst_dur;
+	u32 burst_enable;
+	u32 cal_period;
+	u32 aggr_burst;
+	u32 rx_decap_mode;
+	u32 smart_antenna_default_antenna;
+	u32 igmpmld_override;
+	u32 igmpmld_tid;
+	u32 antenna_gain;
+	u32 rx_filter;
+	u32 set_mcast_to_ucast_tid;
+	u32 proxy_sta_mode;
+	u32 set_mcast2ucast_mode;
+	u32 set_mcast2ucast_buffer;
+	u32 remove_mcast2ucast_buffer;
+	u32 peer_sta_ps_statechg_enable;
+	u32 igmpmld_ac_override;
+	u32 block_interbss;
+	u32 set_disable_reset_cmdid;
+	u32 set_msdu_ttl_cmdid;
+	u32 set_ppdu_duration_cmdid;
+	u32 txbf_sound_period_cmdid;
+	u32 set_promisc_mode_cmdid;
+	u32 set_burst_mode_cmdid;
+	u32 en_stats;
+	u32 mu_group_policy;
+	u32 noise_detection;
+	u32 noise_threshold;
+	u32 dpd_enable;
+	u32 set_mcast_bcast_echo;
+	u32 atf_strict_sch;
+	u32 atf_sched_duration;
+	u32 ant_plzn;
+	u32 mgmt_retry_limit;
+	u32 sensitivity_level;
+	u32 signed_txpower_2g;
+	u32 signed_txpower_5g;
+	u32 enable_per_tid_amsdu;
+	u32 enable_per_tid_ampdu;
+	u32 cca_threshold;
+	u32 rts_fixed_rate;
+	u32 pdev_reset;
+	u32 wapi_mbssid_offset;
+	u32 arp_srcaddr;
+	u32 arp_dstaddr;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_pdev_param {
+	/* TX chain mask */
+	WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	/* RX chain mask */
+	WMI_PDEV_PARAM_RX_CHAIN_MASK,
+	/* TX power limit for 2G Radio */
+	WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+	/* TX power limit for 5G Radio */
+	WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+	/* TX power scale */
+	WMI_PDEV_PARAM_TXPOWER_SCALE,
+	/* Beacon generation mode . 0: host, 1: target   */
+	WMI_PDEV_PARAM_BEACON_GEN_MODE,
+	/* Beacon generation mode . 0: staggered 1: bursted   */
+	WMI_PDEV_PARAM_BEACON_TX_MODE,
+	/*
+	 * Resource manager off chan mode .
+	 * 0: turn off off chan mode. 1: turn on offchan mode
+	 */
+	WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	/*
+	 * Protection mode:
+	 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+	 */
+	WMI_PDEV_PARAM_PROTECTION_MODE,
+	/*
+	 * Dynamic bandwidth - 0: disable, 1: enable
+	 *
+	 * When enabled HW rate control tries different bandwidths when
+	 * retransmitting frames.
+	 */
+	WMI_PDEV_PARAM_DYNAMIC_BW,
+	/* Non aggregrate/ 11g sw retry threshold.0-disable */
+	WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	/* aggregrate sw retry threshold. 0-disable*/
+	WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+	/* Station kickout threshold (non of consecutive failures).0-disable */
+	WMI_PDEV_PARAM_STA_KICKOUT_TH,
+	/* Aggerate size scaling configuration per AC */
+	WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	/* LTR enable */
+	WMI_PDEV_PARAM_LTR_ENABLE,
+	/* LTR latency for BE, in us */
+	WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	/* LTR latency for BK, in us */
+	WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	/* LTR latency for VI, in us */
+	WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	/* LTR latency for VO, in us  */
+	WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	/* LTR AC latency timeout, in ms */
+	WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	/* LTR platform latency override, in us */
+	WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	/* LTR-RX override, in us */
+	WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+	/* Tx activity timeout for LTR, in us */
+	WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	/* L1SS state machine enable */
+	WMI_PDEV_PARAM_L1SS_ENABLE,
+	/* Deep sleep state machine enable */
+	WMI_PDEV_PARAM_DSLEEP_ENABLE,
+	/* RX buffering flush enable */
+	WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	/* RX buffering matermark */
+	WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	/* RX buffering timeout enable */
+	WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	/* RX buffering timeout value */
+	WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	/* pdev level stats update period in ms */
+	WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	/* vdev level stats update period in ms */
+	WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	/* peer level stats update period in ms */
+	WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	/* beacon filter status update period */
+	WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	/* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+	WMI_PDEV_PARAM_PMF_QOS,
+	/* Access category on which ARP frames are sent */
+	WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+	/* DCS configuration */
+	WMI_PDEV_PARAM_DCS,
+	/* Enable/Disable ANI on target */
+	WMI_PDEV_PARAM_ANI_ENABLE,
+	/* configure the ANI polling period */
+	WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+	/* configure the ANI listening period */
+	WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	/* configure OFDM immunity level */
+	WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+	/* configure CCK immunity level */
+	WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+	/* Enable/Disable CDD for 1x1 STAs in rate control module */
+	WMI_PDEV_PARAM_DYNTXCHAIN,
+	/* Enable/Disable proxy STA */
+	WMI_PDEV_PARAM_PROXY_STA,
+	/* Enable/Disable low power state when all VDEVs are inactive/idle. */
+	WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+	/* Enable/Disable power gating sleep */
+	WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+enum wmi_10x_pdev_param {
+	/* TX chian mask */
+	WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	/* RX chian mask */
+	WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+	/* TX power limit for 2G Radio */
+	WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+	/* TX power limit for 5G Radio */
+	WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+	/* TX power scale */
+	WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+	/* Beacon generation mode . 0: host, 1: target   */
+	WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+	/* Beacon generation mode . 0: staggered 1: bursted   */
+	WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+	/*
+	 * Resource manager off chan mode .
+	 * 0: turn off off chan mode. 1: turn on offchan mode
+	 */
+	WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	/*
+	 * Protection mode:
+	 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+	 */
+	WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+	/* Dynamic bandwidth 0: disable 1: enable */
+	WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+	/* Non aggregrate/ 11g sw retry threshold.0-disable */
+	WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	/* aggregrate sw retry threshold. 0-disable*/
+	WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+	/* Station kickout threshold (non of consecutive failures).0-disable */
+	WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+	/* Aggerate size scaling configuration per AC */
+	WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	/* LTR enable */
+	WMI_10X_PDEV_PARAM_LTR_ENABLE,
+	/* LTR latency for BE, in us */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	/* LTR latency for BK, in us */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	/* LTR latency for VI, in us */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	/* LTR latency for VO, in us  */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	/* LTR AC latency timeout, in ms */
+	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	/* LTR platform latency override, in us */
+	WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	/* LTR-RX override, in us */
+	WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+	/* Tx activity timeout for LTR, in us */
+	WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	/* L1SS state machine enable */
+	WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+	/* Deep sleep state machine enable */
+	WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+	/* pdev level stats update period in ms */
+	WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	/* vdev level stats update period in ms */
+	WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	/* peer level stats update period in ms */
+	WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	/* beacon filter status update period */
+	WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	/* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+	WMI_10X_PDEV_PARAM_PMF_QOS,
+	/* Access category on which ARP and DHCP frames are sent */
+	WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+	/* DCS configuration */
+	WMI_10X_PDEV_PARAM_DCS,
+	/* Enable/Disable ANI on target */
+	WMI_10X_PDEV_PARAM_ANI_ENABLE,
+	/* configure the ANI polling period */
+	WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+	/* configure the ANI listening period */
+	WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	/* configure OFDM immunity level */
+	WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+	/* configure CCK immunity level */
+	WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+	/* Enable/Disable CDD for 1x1 STAs in rate control module */
+	WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+	/* Enable/Disable Fast channel reset*/
+	WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+	/* Set Bursting DUR */
+	WMI_10X_PDEV_PARAM_BURST_DUR,
+	/* Set Bursting Enable*/
+	WMI_10X_PDEV_PARAM_BURST_ENABLE,
+
+	/* following are available as of firmware 10.2 */
+	WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	WMI_10X_PDEV_PARAM_IGMPMLD_TID,
+	WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
+	WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
+	WMI_10X_PDEV_PARAM_RX_FILTER,
+	WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
+	WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
+	WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
+	WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10X_PDEV_PARAM_CAL_PERIOD
+};
+
+enum wmi_10_4_pdev_param {
+	WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+	WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+	WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+	WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+	WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+	WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+	WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+	WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+	WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+	WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+	WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+	WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_PMF_QOS,
+	WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_DCS,
+	WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+	WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+	WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+	WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+	WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+	WMI_10_4_PDEV_PARAM_PROXY_STA,
+	WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+	WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+	WMI_10_4_PDEV_PARAM_AGGR_BURST,
+	WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+	WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+	WMI_10_4_PDEV_PARAM_BURST_DUR,
+	WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+	WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+	WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+	WMI_10_4_PDEV_PARAM_RX_FILTER,
+	WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+	WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+	WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+	WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+	WMI_10_4_PDEV_PARAM_EN_STATS,
+	WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+	WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+	WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+	WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+	WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+	WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+	WMI_10_4_PDEV_PARAM_ANT_PLZN,
+	WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+	WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+	WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+	WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+	WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+	WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+	WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+	WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+	WMI_10_4_PDEV_PARAM_PDEV_RESET,
+	WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+	WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+	WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
+struct wmi_pdev_set_param_cmd {
+	__le32 param_id;
+	__le32 param_value;
+} __packed;
+
+/* valid period is 1 ~ 60000ms, unit in millisecond */
+#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
+
+struct wmi_pdev_get_tpc_config_cmd {
+	/* parameter   */
+	__le32 param;
+} __packed;
+
+#define WMI_TPC_CONFIG_PARAM		1
+#define WMI_TPC_RATE_MAX		160
+#define WMI_TPC_TX_N_CHAIN		4
+#define WMI_TPC_PREAM_TABLE_MAX		10
+#define WMI_TPC_FLAG			3
+#define WMI_TPC_BUF_SIZE		10
+
+enum wmi_tpc_table_type {
+	WMI_TPC_TABLE_TYPE_CDD = 0,
+	WMI_TPC_TABLE_TYPE_STBC = 1,
+	WMI_TPC_TABLE_TYPE_TXBF = 2,
+};
+
+enum wmi_tpc_config_event_flag {
+	WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD	= 0x1,
+	WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC	= 0x2,
+	WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF	= 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+	__le32 reg_domain;
+	__le32 chan_freq;
+	__le32 phy_mode;
+	__le32 twice_antenna_reduction;
+	__le32 twice_max_rd_power;
+	a_sle32 twice_antenna_gain;
+	__le32 power_limit;
+	__le32 rate_max;
+	__le32 num_tx_chain;
+	__le32 ctl;
+	__le32 flags;
+	s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+	s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+	s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+	s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+	u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+	WMI_TP_SCALE_MAX    = 0,	/* no scaling (default) */
+	WMI_TP_SCALE_50     = 1,	/* 50% of max (-3 dBm) */
+	WMI_TP_SCALE_25     = 2,	/* 25% of max (-6 dBm) */
+	WMI_TP_SCALE_12     = 3,	/* 12% of max (-9 dBm) */
+	WMI_TP_SCALE_MIN    = 4,	/* min, but still on   */
+	WMI_TP_SCALE_SIZE   = 5,	/* max num of enum     */
+};
+
+struct wmi_pdev_chanlist_update_event {
+	/* number of channels */
+	__le32 num_chan;
+	/* array of channels */
+	struct wmi_channel channel_list[1];
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+	/* message buffer, NULL terminated */
+	char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+	/* P2P device */
+	VDEV_SUBTYPE_P2PDEV = 0,
+	/* P2P client */
+	VDEV_SUBTYPE_P2PCLI,
+	/* P2P GO */
+	VDEV_SUBTYPE_P2PGO,
+	/* BT3.0 HS */
+	VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+	/* idnore power , only use flags , mode and freq */
+	struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_pktlog_enable_cmd {
+	__le32 ev_bitmap;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX    (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+	/* map indicating DSCP to TID conversion */
+	__le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+	WMI_SET_MCAST_RATE,
+	WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+	enum mcast_bcast_rate_id rate_id;
+	__le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+	__le32 cwmin;
+	__le32 cwmax;
+	__le32 aifs;
+	__le32 txop;
+	__le32 acm;
+	__le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+	struct wmi_wmm_params ac_be;
+	struct wmi_wmm_params ac_bk;
+	struct wmi_wmm_params ac_vi;
+	struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+	u32 cwmin;
+	u32 cwmax;
+	u32 aifs;
+	u32 txop;
+	u32 acm;
+	u32 no_ack;
+};
+
+struct wmi_wmm_params_all_arg {
+	struct wmi_wmm_params_arg ac_be;
+	struct wmi_wmm_params_arg ac_bk;
+	struct wmi_wmm_params_arg ac_vi;
+	struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wmi_pdev_stats_tx {
+	/* Num HTT cookies queued to dispatch list */
+	__le32 comp_queued;
+
+	/* Num HTT cookies dispatched */
+	__le32 comp_delivered;
+
+	/* Num MSDU queued to WAL */
+	__le32 msdu_enqued;
+
+	/* Num MPDU queue to WAL */
+	__le32 mpdu_enqued;
+
+	/* Num MSDUs dropped by WMM limit */
+	__le32 wmm_drop;
+
+	/* Num Local frames queued */
+	__le32 local_enqued;
+
+	/* Num Local frames done */
+	__le32 local_freed;
+
+	/* Num queued to HW */
+	__le32 hw_queued;
+
+	/* Num PPDU reaped from HW */
+	__le32 hw_reaped;
+
+	/* Num underruns */
+	__le32 underrun;
+
+	/* Num PPDUs cleaned up in TX abort */
+	__le32 tx_abort;
+
+	/* Num MPDUs requed by SW */
+	__le32 mpdus_requed;
+
+	/* excessive retries */
+	__le32 tx_ko;
+
+	/* data hw rate code */
+	__le32 data_rc;
+
+	/* Scheduler self triggers */
+	__le32 self_triggers;
+
+	/* frames dropped due to excessive sw retries */
+	__le32 sw_retry_failure;
+
+	/* illegal rate phy errors  */
+	__le32 illgl_rate_phy_err;
+
+	/* wal pdev continous xretry */
+	__le32 pdev_cont_xretry;
+
+	/* wal pdev continous xretry */
+	__le32 pdev_tx_timeout;
+
+	/* wal pdev resets  */
+	__le32 pdev_resets;
+
+	/* frames dropped due to non-availability of stateless TIDs */
+	__le32 stateless_tid_alloc_failure;
+
+	__le32 phy_underrun;
+
+	/* MPDU is more than txop limit */
+	__le32 txop_ovf;
+} __packed;
+
+struct wmi_10_4_pdev_stats_tx {
+	/* Num HTT cookies queued to dispatch list */
+	__le32 comp_queued;
+
+	/* Num HTT cookies dispatched */
+	__le32 comp_delivered;
+
+	/* Num MSDU queued to WAL */
+	__le32 msdu_enqued;
+
+	/* Num MPDU queue to WAL */
+	__le32 mpdu_enqued;
+
+	/* Num MSDUs dropped by WMM limit */
+	__le32 wmm_drop;
+
+	/* Num Local frames queued */
+	__le32 local_enqued;
+
+	/* Num Local frames done */
+	__le32 local_freed;
+
+	/* Num queued to HW */
+	__le32 hw_queued;
+
+	/* Num PPDU reaped from HW */
+	__le32 hw_reaped;
+
+	/* Num underruns */
+	__le32 underrun;
+
+	/* HW Paused. */
+	__le32  hw_paused;
+
+	/* Num PPDUs cleaned up in TX abort */
+	__le32 tx_abort;
+
+	/* Num MPDUs requed by SW */
+	__le32 mpdus_requed;
+
+	/* excessive retries */
+	__le32 tx_ko;
+
+	/* data hw rate code */
+	__le32 data_rc;
+
+	/* Scheduler self triggers */
+	__le32 self_triggers;
+
+	/* frames dropped due to excessive sw retries */
+	__le32 sw_retry_failure;
+
+	/* illegal rate phy errors  */
+	__le32 illgl_rate_phy_err;
+
+	/* wal pdev continuous xretry */
+	__le32 pdev_cont_xretry;
+
+	/* wal pdev tx timeouts */
+	__le32 pdev_tx_timeout;
+
+	/* wal pdev resets  */
+	__le32 pdev_resets;
+
+	/* frames dropped due to non-availability of stateless TIDs */
+	__le32 stateless_tid_alloc_failure;
+
+	__le32 phy_underrun;
+
+	/* MPDU is more than txop limit */
+	__le32 txop_ovf;
+
+	/* Number of Sequences posted */
+	__le32 seq_posted;
+
+	/* Number of Sequences failed queueing */
+	__le32 seq_failed_queueing;
+
+	/* Number of Sequences completed */
+	__le32 seq_completed;
+
+	/* Number of Sequences restarted */
+	__le32 seq_restarted;
+
+	/* Number of MU Sequences posted */
+	__le32 mu_seq_posted;
+
+	/* Num MPDUs flushed by SW, HWPAUSED,SW TXABORT(Reset,channel change) */
+	__le32 mpdus_sw_flush;
+
+	/* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+	__le32 mpdus_hw_filter;
+
+	/* Num MPDUs truncated by PDG
+	 * (TXOP, TBTT, PPDU_duration based on rate, dyn_bw)
+	 */
+	__le32 mpdus_truncated;
+
+	/* Num MPDUs that was tried but didn't receive ACK or BA */
+	__le32 mpdus_ack_failed;
+
+	/* Num MPDUs that was dropped due to expiry. */
+	__le32 mpdus_expired;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+	/* Cnts any change in ring routing mid-ppdu */
+	__le32 mid_ppdu_route_change;
+
+	/* Total number of statuses processed */
+	__le32 status_rcvd;
+
+	/* Extra frags on rings 0-3 */
+	__le32 r0_frags;
+	__le32 r1_frags;
+	__le32 r2_frags;
+	__le32 r3_frags;
+
+	/* MSDUs / MPDUs delivered to HTT */
+	__le32 htt_msdus;
+	__le32 htt_mpdus;
+
+	/* MSDUs / MPDUs delivered to local stack */
+	__le32 loc_msdus;
+	__le32 loc_mpdus;
+
+	/* AMSDUs that have more MSDUs than the status ring size */
+	__le32 oversize_amsdu;
+
+	/* Number of PHY errors */
+	__le32 phy_errs;
+
+	/* Number of PHY errors drops */
+	__le32 phy_err_drop;
+
+	/* Number of mpdu errors - FCS, MIC, ENC etc. */
+	__le32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats_peer {
+	/* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+	__le32 dummy;
+} __packed;
+
+enum wmi_stats_id {
+	WMI_STAT_PEER = BIT(0),
+	WMI_STAT_AP = BIT(1),
+	WMI_STAT_PDEV = BIT(2),
+	WMI_STAT_VDEV = BIT(3),
+	WMI_STAT_BCNFLT = BIT(4),
+	WMI_STAT_VDEV_RATE = BIT(5),
+};
+
+struct wlan_inst_rssi_args {
+	__le16 cfg_retry_count;
+	__le16 retry_count;
+};
+
+struct wmi_request_stats_cmd {
+	__le32 stats_id;
+
+	__le32 vdev_id;
+
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+
+	/* Instantaneous RSSI arguments */
+	struct wlan_inst_rssi_args inst_rssi_args;
+} __packed;
+
+/* Suspend option */
+enum {
+	/* suspend */
+	WMI_PDEV_SUSPEND,
+
+	/* suspend and disable all interrupts */
+	WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+	/* suspend option sent to target */
+	__le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+	__le32 stats_id; /* WMI_STAT_ */
+	/*
+	 * number of pdev stats event structures
+	 * (wmi_pdev_stats) 0 or 1
+	 */
+	__le32 num_pdev_stats;
+	/*
+	 * number of vdev stats event structures
+	 * (wmi_vdev_stats) 0 or max vdevs
+	 */
+	__le32 num_vdev_stats;
+	/*
+	 * number of peer stats event structures
+	 * (wmi_peer_stats) 0 or max peers
+	 */
+	__le32 num_peer_stats;
+	__le32 num_bcnflt_stats;
+	/*
+	 * followed by
+	 *   num_pdev_stats * size of(struct wmi_pdev_stats)
+	 *   num_vdev_stats * size of(struct wmi_vdev_stats)
+	 *   num_peer_stats * size of(struct wmi_peer_stats)
+	 *
+	 *  By having a zero sized array, the pointer to data area
+	 *  becomes available without increasing the struct size
+	 */
+	u8 data[0];
+} __packed;
+
+struct wmi_10_2_stats_event {
+	__le32 stats_id; /* %WMI_REQUEST_ */
+	__le32 num_pdev_stats;
+	__le32 num_pdev_ext_stats;
+	__le32 num_vdev_stats;
+	__le32 num_peer_stats;
+	__le32 num_bcnflt_stats;
+	u8 data[0];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats_base {
+	__le32 chan_nf;
+	__le32 tx_frame_count;
+	__le32 rx_frame_count;
+	__le32 rx_clear_count;
+	__le32 cycle_count;
+	__le32 phy_err_count;
+	__le32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats {
+	struct wmi_pdev_stats_base base;
+	struct wmi_pdev_stats_tx tx;
+	struct wmi_pdev_stats_rx rx;
+	struct wmi_pdev_stats_peer peer;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+	__le32 ack_rx_bad;
+	__le32 rts_bad;
+	__le32 rts_good;
+	__le32 fcs_bad;
+	__le32 no_beacons;
+	__le32 mib_int_count;
+} __packed;
+
+struct wmi_10x_pdev_stats {
+	struct wmi_pdev_stats_base base;
+	struct wmi_pdev_stats_tx tx;
+	struct wmi_pdev_stats_rx rx;
+	struct wmi_pdev_stats_peer peer;
+	struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_pdev_stats_mem {
+	__le32 dram_free;
+	__le32 iram_free;
+} __packed;
+
+struct wmi_10_2_pdev_stats {
+	struct wmi_pdev_stats_base base;
+	struct wmi_pdev_stats_tx tx;
+	__le32 mc_drop;
+	struct wmi_pdev_stats_rx rx;
+	__le32 pdev_rx_timeout;
+	struct wmi_pdev_stats_mem mem;
+	struct wmi_pdev_stats_peer peer;
+	struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_10_4_pdev_stats {
+	struct wmi_pdev_stats_base base;
+	struct wmi_10_4_pdev_stats_tx tx;
+	struct wmi_pdev_stats_rx rx;
+	__le32 rx_ovfl_errs;
+	struct wmi_pdev_stats_mem mem;
+	__le32 sram_free_size;
+	struct wmi_pdev_stats_extra extra;
+} __packed;
+
+/*
+ * VDEV statistics
+ * TODO: add all VDEV stats here
+ */
+struct wmi_vdev_stats {
+	__le32 vdev_id;
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+	struct wmi_mac_addr peer_macaddr;
+	__le32 peer_rssi;
+	__le32 peer_tx_rate;
+} __packed;
+
+struct wmi_10x_peer_stats {
+	struct wmi_peer_stats old;
+	__le32 peer_rx_rate;
+} __packed;
+
+struct wmi_10_2_peer_stats {
+	struct wmi_peer_stats old;
+	__le32 peer_rx_rate;
+	__le32 current_per;
+	__le32 retries;
+	__le32 tx_rate_count;
+	__le32 max_4ms_frame_len;
+	__le32 total_sub_frames;
+	__le32 tx_bytes;
+	__le32 num_pkt_loss_overflow[4];
+	__le32 num_pkt_loss_excess_retry[4];
+} __packed;
+
+struct wmi_10_2_4_peer_stats {
+	struct wmi_10_2_peer_stats common;
+	__le32 unknown_value; /* FIXME: what is this word? */
+} __packed;
+
+struct wmi_10_4_peer_stats {
+	struct wmi_mac_addr peer_macaddr;
+	__le32 peer_rssi;
+	__le32 peer_rssi_seq_num;
+	__le32 peer_tx_rate;
+	__le32 peer_rx_rate;
+	__le32 current_per;
+	__le32 retries;
+	__le32 tx_rate_count;
+	__le32 max_4ms_frame_len;
+	__le32 total_sub_frames;
+	__le32 tx_bytes;
+	__le32 num_pkt_loss_overflow[4];
+	__le32 num_pkt_loss_excess_retry[4];
+	__le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_2_pdev_ext_stats {
+	__le32 rx_rssi_comb;
+	__le32 rx_rssi[4];
+	__le32 rx_mcs[10];
+	__le32 tx_mcs[10];
+	__le32 ack_rssi;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+	__le32 vdev_id;
+	__le32 vdev_type;
+	__le32 vdev_subtype;
+	struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+	WMI_VDEV_TYPE_AP      = 1,
+	WMI_VDEV_TYPE_STA     = 2,
+	WMI_VDEV_TYPE_IBSS    = 3,
+	WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+	WMI_VDEV_SUBTYPE_NONE       = 0,
+	WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
+	WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
+	WMI_VDEV_SUBTYPE_P2P_GO     = 3,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ *  AP/GO */
+#define WMI_VDEV_START_HIDDEN_SSID  (1<<0)
+/*
+ * Indicates if robust management frame/management frame
+ *  protection is enabled. For GO/AP vdevs, it indicates that
+ *  it may support station/client associations with RMF enabled.
+ *  For STA/client vdevs, it indicates that sta will
+ *  associate with AP with RMF enabled. */
+#define WMI_VDEV_START_PMF_ENABLED  (1<<1)
+
+struct wmi_p2p_noa_descriptor {
+	__le32 type_count; /* 255: continuous schedule, 0: reserved */
+	__le32 duration;  /* Absent period duration in micro seconds */
+	__le32 interval;   /* Absent period interval in micro seconds */
+	__le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+	/* WMI channel */
+	struct wmi_channel chan;
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* requestor id identifying the caller module */
+	__le32 requestor_id;
+	/* beacon interval from received beacon */
+	__le32 beacon_interval;
+	/* DTIM Period from the received beacon */
+	__le32 dtim_period;
+	/* Flags */
+	__le32 flags;
+	/* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+	struct wmi_ssid ssid;
+	/* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+	__le32 bcn_tx_rate;
+	/* beacon/probe reponse xmit power. Applicable for SoftAP. */
+	__le32 bcn_tx_power;
+	/* number of p2p NOA descriptor(s) from scan entry */
+	__le32 num_noa_descriptors;
+	/*
+	 * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+	 * During CAC, Our HW shouldn't ack ditected frames
+	 */
+	__le32 disable_hw_ack;
+	/* actual p2p NOA descriptor from scan entry */
+	struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+	struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+	u32 vdev_id;
+	struct wmi_channel_arg channel;
+	u32 bcn_intval;
+	u32 dtim_period;
+	u8 *ssid;
+	u32 ssid_len;
+	u32 bcn_tx_rate;
+	u32 bcn_tx_power;
+	bool disable_hw_ack;
+	bool hidden_ssid;
+	bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+	__le32 vdev_id;
+	__le32 vdev_assoc_id;
+	struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+	__le32 vdev_id;
+	__le32 param_id;
+	__le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX   3
+#define WMI_MAX_KEY_LEN     32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP    0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+	__le32 key_seq_counter_l;
+	__le32 key_seq_counter_h;
+} __packed;
+
+#define WMI_CIPHER_NONE     0x0 /* clear key */
+#define WMI_CIPHER_WEP      0x1
+#define WMI_CIPHER_TKIP     0x2
+#define WMI_CIPHER_AES_OCB  0x3
+#define WMI_CIPHER_AES_CCM  0x4
+#define WMI_CIPHER_WAPI     0x5
+#define WMI_CIPHER_CKIP     0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+
+struct wmi_vdev_install_key_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 key_idx;
+	__le32 key_flags;
+	__le32 key_cipher; /* %WMI_CIPHER_ */
+	struct wmi_key_seq_counter key_rsc_counter;
+	struct wmi_key_seq_counter key_global_rsc_counter;
+	struct wmi_key_seq_counter key_tsc_counter;
+	u8 wpi_key_rsc_counter[16];
+	u8 wpi_key_tsc_counter[16];
+	__le32 key_len;
+	__le32 key_txmic_len;
+	__le32 key_rxmic_len;
+
+	/* contains key followed by tx mic followed by rx mic */
+	u8 key_data[0];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+	u32 vdev_id;
+	const u8 *macaddr;
+	u32 key_idx;
+	u32 key_flags;
+	u32 key_cipher;
+	u32 key_len;
+	u32 key_txmic_len;
+	u32 key_rxmic_len;
+	const void *key_data;
+};
+
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss      - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ *    CCK:  0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ *          4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ *    OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ *          4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ *    HT/VHT: MCS index
+ */
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+	WMI_RATE_PREAMBLE_OFDM,
+	WMI_RATE_PREAMBLE_CCK,
+	WMI_RATE_PREAMBLE_HT,
+	WMI_RATE_PREAMBLE_VHT,
+};
+
+#define ATH10K_HW_NSS(rate)		(1 + (((rate) >> 4) & 0x3))
+#define ATH10K_HW_PREAMBLE(rate)	(((rate) >> 6) & 0x3)
+#define ATH10K_HW_RATECODE(rate, nss, preamble)	\
+	(((preamble) << 6) | ((nss) << 4) | (rate))
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE    (0xff)
+
+struct wmi_vdev_param_map {
+	u32 rts_threshold;
+	u32 fragmentation_threshold;
+	u32 beacon_interval;
+	u32 listen_interval;
+	u32 multicast_rate;
+	u32 mgmt_tx_rate;
+	u32 slot_time;
+	u32 preamble;
+	u32 swba_time;
+	u32 wmi_vdev_stats_update_period;
+	u32 wmi_vdev_pwrsave_ageout_time;
+	u32 wmi_vdev_host_swba_interval;
+	u32 dtim_period;
+	u32 wmi_vdev_oc_scheduler_air_time_limit;
+	u32 wds;
+	u32 atim_window;
+	u32 bmiss_count_max;
+	u32 bmiss_first_bcnt;
+	u32 bmiss_final_bcnt;
+	u32 feature_wmm;
+	u32 chwidth;
+	u32 chextoffset;
+	u32 disable_htprotection;
+	u32 sta_quickkickout;
+	u32 mgmt_rate;
+	u32 protection_mode;
+	u32 fixed_rate;
+	u32 sgi;
+	u32 ldpc;
+	u32 tx_stbc;
+	u32 rx_stbc;
+	u32 intra_bss_fwd;
+	u32 def_keyid;
+	u32 nss;
+	u32 bcast_data_rate;
+	u32 mcast_data_rate;
+	u32 mcast_indicate;
+	u32 dhcp_indicate;
+	u32 unknown_dest_indicate;
+	u32 ap_keepalive_min_idle_inactive_time_secs;
+	u32 ap_keepalive_max_idle_inactive_time_secs;
+	u32 ap_keepalive_max_unresponsive_time_secs;
+	u32 ap_enable_nawds;
+	u32 mcast2ucast_set;
+	u32 enable_rtscts;
+	u32 txbf;
+	u32 packet_powersave;
+	u32 drop_unencry;
+	u32 tx_encap_type;
+	u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+	u32 rc_num_retries;
+	u32 cabq_maxdur;
+	u32 mfptest_set;
+	u32 rts_fixed_rate;
+	u32 vht_sgimask;
+	u32 vht80_ratemask;
+	u32 early_rx_adjust_enable;
+	u32 early_rx_tgt_bmiss_num;
+	u32 early_rx_bmiss_sample_cycle;
+	u32 early_rx_slop_step;
+	u32 early_rx_init_slop;
+	u32 early_rx_adjust_pause;
+	u32 proxy_sta;
+	u32 meru_vc;
+	u32 rx_decap_type;
+	u32 bw_nss_ratemask;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+	/* RTS Threshold */
+	WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	/* Fragmentation threshold */
+	WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	/* beacon interval in TUs */
+	WMI_VDEV_PARAM_BEACON_INTERVAL,
+	/* Listen interval in TUs */
+	WMI_VDEV_PARAM_LISTEN_INTERVAL,
+	/* muticast rate in Mbps */
+	WMI_VDEV_PARAM_MULTICAST_RATE,
+	/* management frame rate in Mbps */
+	WMI_VDEV_PARAM_MGMT_TX_RATE,
+	/* slot time (long vs short) */
+	WMI_VDEV_PARAM_SLOT_TIME,
+	/* preamble (long vs short) */
+	WMI_VDEV_PARAM_PREAMBLE,
+	/* SWBA time (time before tbtt in msec) */
+	WMI_VDEV_PARAM_SWBA_TIME,
+	/* time period for updating VDEV stats */
+	WMI_VDEV_STATS_UPDATE_PERIOD,
+	/* age out time in msec for frames queued for station in power save */
+	WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+	/*
+	 * Host SWBA interval (time in msec before tbtt for SWBA event
+	 * generation).
+	 */
+	WMI_VDEV_HOST_SWBA_INTERVAL,
+	/* DTIM period (specified in units of num beacon intervals) */
+	WMI_VDEV_PARAM_DTIM_PERIOD,
+	/*
+	 * scheduler air time limit for this VDEV. used by off chan
+	 * scheduler.
+	 */
+	WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	/* enable/dsiable WDS for this VDEV  */
+	WMI_VDEV_PARAM_WDS,
+	/* ATIM Window */
+	WMI_VDEV_PARAM_ATIM_WINDOW,
+	/* BMISS max */
+	WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+	/* BMISS first time */
+	WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+	/* BMISS final time */
+	WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+	/* WMM enables/disabled */
+	WMI_VDEV_PARAM_FEATURE_WMM,
+	/* Channel width */
+	WMI_VDEV_PARAM_CHWIDTH,
+	/* Channel Offset */
+	WMI_VDEV_PARAM_CHEXTOFFSET,
+	/* Disable HT Protection */
+	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+	/* Quick STA Kickout */
+	WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+	/* Rate to be used with Management frames */
+	WMI_VDEV_PARAM_MGMT_RATE,
+	/* Protection Mode */
+	WMI_VDEV_PARAM_PROTECTION_MODE,
+	/* Fixed rate setting */
+	WMI_VDEV_PARAM_FIXED_RATE,
+	/* Short GI Enable/Disable */
+	WMI_VDEV_PARAM_SGI,
+	/* Enable LDPC */
+	WMI_VDEV_PARAM_LDPC,
+	/* Enable Tx STBC */
+	WMI_VDEV_PARAM_TX_STBC,
+	/* Enable Rx STBC */
+	WMI_VDEV_PARAM_RX_STBC,
+	/* Intra BSS forwarding  */
+	WMI_VDEV_PARAM_INTRA_BSS_FWD,
+	/* Setting Default xmit key for Vdev */
+	WMI_VDEV_PARAM_DEF_KEYID,
+	/* NSS width */
+	WMI_VDEV_PARAM_NSS,
+	/* Set the custom rate for the broadcast data frames */
+	WMI_VDEV_PARAM_BCAST_DATA_RATE,
+	/* Set the custom rate (rate-code) for multicast data frames */
+	WMI_VDEV_PARAM_MCAST_DATA_RATE,
+	/* Tx multicast packet indicate Enable/Disable */
+	WMI_VDEV_PARAM_MCAST_INDICATE,
+	/* Tx DHCP packet indicate Enable/Disable */
+	WMI_VDEV_PARAM_DHCP_INDICATE,
+	/* Enable host inspection of Tx unicast packet to unknown destination */
+	WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+	/* The minimum amount of time AP begins to consider STA inactive */
+	WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+	/*
+	 * An associated STA is considered inactive when there is no recent
+	 * TX/RX activity and no downlink frames are buffered for it. Once a
+	 * STA exceeds the maximum idle inactive time, the AP will send an
+	 * 802.11 data-null as a keep alive to verify the STA is still
+	 * associated. If the STA does ACK the data-null, or if the data-null
+	 * is buffered and the STA does not retrieve it, the STA will be
+	 * considered unresponsive
+	 * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+	 */
+	WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+	/*
+	 * An associated STA is considered unresponsive if there is no recent
+	 * TX/RX activity and downlink frames are buffered for it. Once a STA
+	 * exceeds the maximum unresponsive time, the AP will send a
+	 * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
+	WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+	/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+	WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+	/* Enable/Disable RTS-CTS */
+	WMI_VDEV_PARAM_ENABLE_RTSCTS,
+	/* Enable TXBFee/er */
+	WMI_VDEV_PARAM_TXBF,
+
+	/* Set packet power save */
+	WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+	/*
+	 * Drops un-encrypted packets if eceived in an encrypted connection
+	 * otherwise forwards to host.
+	 */
+	WMI_VDEV_PARAM_DROP_UNENCRY,
+
+	/*
+	 * Set the encapsulation type for frames.
+	 */
+	WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+	/* RTS Threshold */
+	WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	/* Fragmentation threshold */
+	WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	/* beacon interval in TUs */
+	WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+	/* Listen interval in TUs */
+	WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+	/* muticast rate in Mbps */
+	WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+	/* management frame rate in Mbps */
+	WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+	/* slot time (long vs short) */
+	WMI_10X_VDEV_PARAM_SLOT_TIME,
+	/* preamble (long vs short) */
+	WMI_10X_VDEV_PARAM_PREAMBLE,
+	/* SWBA time (time before tbtt in msec) */
+	WMI_10X_VDEV_PARAM_SWBA_TIME,
+	/* time period for updating VDEV stats */
+	WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+	/* age out time in msec for frames queued for station in power save */
+	WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+	/*
+	 * Host SWBA interval (time in msec before tbtt for SWBA event
+	 * generation).
+	 */
+	WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+	/* DTIM period (specified in units of num beacon intervals) */
+	WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+	/*
+	 * scheduler air time limit for this VDEV. used by off chan
+	 * scheduler.
+	 */
+	WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	/* enable/dsiable WDS for this VDEV  */
+	WMI_10X_VDEV_PARAM_WDS,
+	/* ATIM Window */
+	WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+	/* BMISS max */
+	WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+	/* WMM enables/disabled */
+	WMI_10X_VDEV_PARAM_FEATURE_WMM,
+	/* Channel width */
+	WMI_10X_VDEV_PARAM_CHWIDTH,
+	/* Channel Offset */
+	WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+	/* Disable HT Protection */
+	WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+	/* Quick STA Kickout */
+	WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+	/* Rate to be used with Management frames */
+	WMI_10X_VDEV_PARAM_MGMT_RATE,
+	/* Protection Mode */
+	WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+	/* Fixed rate setting */
+	WMI_10X_VDEV_PARAM_FIXED_RATE,
+	/* Short GI Enable/Disable */
+	WMI_10X_VDEV_PARAM_SGI,
+	/* Enable LDPC */
+	WMI_10X_VDEV_PARAM_LDPC,
+	/* Enable Tx STBC */
+	WMI_10X_VDEV_PARAM_TX_STBC,
+	/* Enable Rx STBC */
+	WMI_10X_VDEV_PARAM_RX_STBC,
+	/* Intra BSS forwarding  */
+	WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+	/* Setting Default xmit key for Vdev */
+	WMI_10X_VDEV_PARAM_DEF_KEYID,
+	/* NSS width */
+	WMI_10X_VDEV_PARAM_NSS,
+	/* Set the custom rate for the broadcast data frames */
+	WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+	/* Set the custom rate (rate-code) for multicast data frames */
+	WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+	/* Tx multicast packet indicate Enable/Disable */
+	WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+	/* Tx DHCP packet indicate Enable/Disable */
+	WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+	/* Enable host inspection of Tx unicast packet to unknown destination */
+	WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+	/* The minimum amount of time AP begins to consider STA inactive */
+	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+	/*
+	 * An associated STA is considered inactive when there is no recent
+	 * TX/RX activity and no downlink frames are buffered for it. Once a
+	 * STA exceeds the maximum idle inactive time, the AP will send an
+	 * 802.11 data-null as a keep alive to verify the STA is still
+	 * associated. If the STA does ACK the data-null, or if the data-null
+	 * is buffered and the STA does not retrieve it, the STA will be
+	 * considered unresponsive
+	 * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+	 */
+	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+	/*
+	 * An associated STA is considered unresponsive if there is no recent
+	 * TX/RX activity and downlink frames are buffered for it. Once a STA
+	 * exceeds the maximum unresponsive time, the AP will send a
+	 * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+	/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+	WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+	WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+	/* Enable/Disable RTS-CTS */
+	WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+	WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+
+	/* following are available as of firmware 10.2 */
+	WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
+	WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
+	WMI_10X_VDEV_PARAM_MFPTEST_SET,
+	WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10X_VDEV_PARAM_VHT_SGIMASK,
+	WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+};
+
+enum wmi_10_4_vdev_param {
+	WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+	WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+	WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+	WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+	WMI_10_4_VDEV_PARAM_SLOT_TIME,
+	WMI_10_4_VDEV_PARAM_PREAMBLE,
+	WMI_10_4_VDEV_PARAM_SWBA_TIME,
+	WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+	WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+	WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+	WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	WMI_10_4_VDEV_PARAM_WDS,
+	WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+	WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+	WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+	WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+	WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+	WMI_10_4_VDEV_PARAM_CHWIDTH,
+	WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+	WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+	WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+	WMI_10_4_VDEV_PARAM_MGMT_RATE,
+	WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+	WMI_10_4_VDEV_PARAM_FIXED_RATE,
+	WMI_10_4_VDEV_PARAM_SGI,
+	WMI_10_4_VDEV_PARAM_LDPC,
+	WMI_10_4_VDEV_PARAM_TX_STBC,
+	WMI_10_4_VDEV_PARAM_RX_STBC,
+	WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+	WMI_10_4_VDEV_PARAM_DEF_KEYID,
+	WMI_10_4_VDEV_PARAM_NSS,
+	WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+	WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+	WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+	WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+	WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+	WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+	WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+	WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+	WMI_10_4_VDEV_PARAM_TXBF,
+	WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+	WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+	WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+	WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+	WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+	WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+	WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	WMI_10_4_VDEV_PARAM_PROXY_STA,
+	WMI_10_4_VDEV_PARAM_MERU_VC,
+	WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+	WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB	4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK	0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB	8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK	0xf00
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG		0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT	0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG		0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT		0x2
+
+enum wmi_start_event_param {
+	WMI_VDEV_RESP_START_EVENT = 0,
+	WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+	__le32 vdev_id;
+	__le32 req_id;
+	__le32 resp_type; /* %WMI_VDEV_RESP_ */
+	__le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV succesfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS	0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID	0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED	0x2
+
+/* TODO: please add more comments if you have in-depth information */
+struct wmi_vdev_spectral_conf_cmd {
+	__le32 vdev_id;
+
+	/* number of fft samples to send (0 for infinite) */
+	__le32 scan_count;
+	__le32 scan_period;
+	__le32 scan_priority;
+
+	/* number of bins in the FFT: 2^(fft_size - bin_scale) */
+	__le32 scan_fft_size;
+	__le32 scan_gc_ena;
+	__le32 scan_restart_ena;
+	__le32 scan_noise_floor_ref;
+	__le32 scan_init_delay;
+	__le32 scan_nb_tone_thr;
+	__le32 scan_str_bin_thr;
+	__le32 scan_wb_rpt_mode;
+	__le32 scan_rssi_rpt_mode;
+	__le32 scan_rssi_thr;
+	__le32 scan_pwr_format;
+
+	/* rpt_mode: Format of FFT report to software for spectral scan
+	 * triggered FFTs:
+	 *	0: No FFT report (only spectral scan summary report)
+	 *	1: 2-dword summary of metrics for each completed FFT + spectral
+	 *	   scan	summary report
+	 *	2: 2-dword summary of metrics for each completed FFT +
+	 *	   1x- oversampled bins(in-band) per FFT + spectral scan summary
+	 *	   report
+	 *	3: 2-dword summary of metrics for each completed FFT +
+	 *	   2x- oversampled bins	(all) per FFT + spectral scan summary
+	 */
+	__le32 scan_rpt_mode;
+	__le32 scan_bin_scale;
+	__le32 scan_dbm_adj;
+	__le32 scan_chn_mask;
+} __packed;
+
+struct wmi_vdev_spectral_conf_arg {
+	u32 vdev_id;
+	u32 scan_count;
+	u32 scan_period;
+	u32 scan_priority;
+	u32 scan_fft_size;
+	u32 scan_gc_ena;
+	u32 scan_restart_ena;
+	u32 scan_noise_floor_ref;
+	u32 scan_init_delay;
+	u32 scan_nb_tone_thr;
+	u32 scan_str_bin_thr;
+	u32 scan_wb_rpt_mode;
+	u32 scan_rssi_rpt_mode;
+	u32 scan_rssi_thr;
+	u32 scan_pwr_format;
+	u32 scan_rpt_mode;
+	u32 scan_bin_scale;
+	u32 scan_dbm_adj;
+	u32 scan_chn_mask;
+};
+
+#define WMI_SPECTRAL_ENABLE_DEFAULT              0
+#define WMI_SPECTRAL_COUNT_DEFAULT               0
+#define WMI_SPECTRAL_PERIOD_DEFAULT             35
+#define WMI_SPECTRAL_PRIORITY_DEFAULT            1
+#define WMI_SPECTRAL_FFT_SIZE_DEFAULT            7
+#define WMI_SPECTRAL_GC_ENA_DEFAULT              1
+#define WMI_SPECTRAL_RESTART_ENA_DEFAULT         0
+#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT   -96
+#define WMI_SPECTRAL_INIT_DELAY_DEFAULT         80
+#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT        12
+#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT         8
+#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT         0
+#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT       0
+#define WMI_SPECTRAL_RSSI_THR_DEFAULT         0xf0
+#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT          0
+#define WMI_SPECTRAL_RPT_MODE_DEFAULT            2
+#define WMI_SPECTRAL_BIN_SCALE_DEFAULT           1
+#define WMI_SPECTRAL_DBM_ADJ_DEFAULT             1
+#define WMI_SPECTRAL_CHN_MASK_DEFAULT            1
+
+struct wmi_vdev_spectral_enable_cmd {
+	__le32 vdev_id;
+	__le32 trigger_cmd;
+	__le32 enable_cmd;
+} __packed;
+
+#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER  1
+#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR    2
+#define WMI_SPECTRAL_ENABLE_CMD_ENABLE    1
+#define WMI_SPECTRAL_ENABLE_CMD_DISABLE   2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+	__le32 vdev_id;
+	__le32 tx_rate;
+	__le32 tx_power;
+	__le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+	struct wmi_bcn_tx_hdr hdr;
+	u8 *bcn[0];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+	u32 vdev_id;
+	u32 tx_rate;
+	u32 tx_power;
+	u32 bcn_len;
+	const void *bcn;
+};
+
+enum wmi_bcn_tx_ref_flags {
+	WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+	WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+/* TODO: It is unclear why "no antenna" works while any other seemingly valid
+ * chainmask yields no beacons on the air at all.
+ */
+#define WMI_BCN_TX_REF_DEF_ANTENNA 0
+
+struct wmi_bcn_tx_ref_cmd {
+	__le32 vdev_id;
+	__le32 data_len;
+	/* physical address of the frame - dma pointer */
+	__le32 data_ptr;
+	/* id for host to track */
+	__le32 msdu_id;
+	/* frame ctrl to setup PPDU desc */
+	__le32 frame_control;
+	/* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+	__le32 flags;
+	/* introduced in 10.2 */
+	__le32 antenna_mask;
+} __packed;
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL   0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE  1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI  2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID  4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+	/* Filter ID */
+	__le32 bcn_filter_id;
+	/* Filter type - wmi_bcn_filter */
+	__le32 bcn_filter;
+	/* Buffer len */
+	__le32 bcn_filter_len;
+	/* Filter info (threshold, BSSID, RSSI) */
+	u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+	/* Capabilities */
+	__le32 caps;
+	/* ERP info */
+	__le32 erp;
+	/* Advanced capabilities */
+	/* HT capabilities */
+	/* HT Info */
+	/* ibss_dfs */
+	/* wpa Info */
+	/* rsn Info */
+	/* rrm info */
+	/* ath_ext */
+	/* app IE */
+} __packed;
+
+struct wmi_bcn_tmpl_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* TIM IE offset from the beginning of the template. */
+	__le32 tim_ie_offset;
+	/* beacon probe capabilities and IEs */
+	struct wmi_bcn_prb_info bcn_prb_info;
+	/* beacon buffer length */
+	__le32 buf_len;
+	/* variable length data */
+	u8 data[1];
+} __packed;
+
+struct wmi_prb_tmpl_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* beacon probe capabilities and IEs */
+	struct wmi_bcn_prb_info bcn_prb_info;
+	/* beacon buffer length */
+	__le32 buf_len;
+	/* Variable length data */
+	u8 data[1];
+} __packed;
+
+enum wmi_sta_ps_mode {
+	/* enable power save for the given STA VDEV */
+	WMI_STA_PS_MODE_DISABLED = 0,
+	/* disable power save  for a given STA VDEV */
+	WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+
+	/*
+	 * Power save mode
+	 * (see enum wmi_sta_ps_mode)
+	 */
+	__le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+	WMI_CSA_OFFLOAD_DISABLE = 0,
+	WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+	__le32 vdev_id;
+	__le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+	__le32 vdev_id;
+	struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+	/*
+	 * Wake up when ever there is an  RX activity on the VDEV. In this mode
+	 * the Power save SM(state machine) will come out of sleep by either
+	 * sending null frame (or) a data frame (with PS==0) in response to TIM
+	 * bit set in the received beacon frame from AP.
+	 */
+	WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+	/*
+	 * Here the power save state machine will not wakeup in response to TIM
+	 * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+	 * configuration setup by WMISET_PS_SET_UAPSD  WMI command.  When all
+	 * access categories are delivery-enabled, the station will send a
+	 * UAPSD trigger frame, otherwise it will send a PS-Poll.
+	 */
+	WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon  that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time.  It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+	WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+	WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+	/*
+	 * Values greater than one indicate that many TX attempts per beacon
+	 * interval before the STA will wake up
+	 */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+	WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+	/*
+	 * Values greater than 0 indicate the maximum numer of PS-Poll frames
+	 * FW will send before waking up.
+	 */
+
+	/* When u-APSD is enabled the firmware will be very reluctant to exit
+	 * STA PS. This could result in very poor Rx performance with STA doing
+	 * PS-Poll for each and every buffered frame. This value is a bit
+	 * arbitrary.
+	 */
+	WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+	((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+
+enum wmi_sta_ps_param_uapsd {
+	WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+	WMI_STA_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+	WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+	WMI_STA_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+	WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+	WMI_STA_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+	WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+	WMI_STA_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+	__le32 wmm_ac;
+	__le32 user_priority;
+	__le32 service_interval;
+	__le32 suspend_interval;
+	__le32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+	u32 wmm_ac;
+	u32 user_priority;
+	u32 service_interval;
+	u32 suspend_interval;
+	u32 delay_interval;
+};
+
+enum wmi_sta_powersave_param {
+	/*
+	 * Controls how frames are retrievd from AP while STA is sleeping
+	 *
+	 * (see enum wmi_sta_ps_param_rx_wake_policy)
+	 */
+	WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+	/*
+	 * The STA will go active after this many TX
+	 *
+	 * (see enum wmi_sta_ps_param_tx_wake_threshold)
+	 */
+	WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+	/*
+	 * Number of PS-Poll to send before STA wakes up
+	 *
+	 * (see enum wmi_sta_ps_param_pspoll_count)
+	 *
+	 */
+	WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+	/*
+	 * TX/RX inactivity time in msec before going to sleep.
+	 *
+	 * The power save SM will monitor tx/rx activity on the VDEV, if no
+	 * activity for the specified msec of the parameter the Power save
+	 * SM will go to sleep.
+	 */
+	WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+	/*
+	 * Set uapsd configuration.
+	 *
+	 * (see enum wmi_sta_ps_param_uapsd)
+	 */
+	WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+	__le32 vdev_id;
+	__le32 param_id; /* %WMI_STA_PS_PARAM_ */
+	__le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* mimo powersave mode as defined above */
+	__le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+	WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+	WMI_AP_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+	WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+	WMI_AP_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+	WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+	WMI_AP_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+	WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+	WMI_AP_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+	WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+	WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+	WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+	WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+	MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+	/* Set uapsd configuration for a given peer.
+	 *
+	 * Include the delivery and trigger enabled state for every AC.
+	 * The host  MLME needs to set this based on AP capability and stations
+	 * request Set in the association request  received from the station.
+	 *
+	 * Lower 8 bits of the value specify the UAPSD configuration.
+	 *
+	 * (see enum wmi_ap_ps_param_uapsd)
+	 * The default value is 0.
+	 */
+	WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+	/*
+	 * Set the service period for a UAPSD capable station
+	 *
+	 * The service period from wme ie in the (re)assoc request frame.
+	 *
+	 * (see enum wmi_ap_ps_peer_param_max_sp)
+	 */
+	WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+	/* Time in seconds for aging out buffered frames for STA in PS */
+	WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+
+	/* AP powersave param (see enum wmi_ap_ps_peer_param) */
+	__le32 param_id;
+
+	/* AP powersave param value */
+	__le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	__le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
+struct wmi_tim_info_arg {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	const __le32 *tim_bitmap;
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT	BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET	1
+#define WMI_P2P_NOA_CHANGED_BIT	BIT(0)
+
+struct wmi_p2p_noa_info {
+	/* Bit 0 - Flag to indicate an update in NOA schedule
+	   Bits 7-1 - Reserved */
+	u8 changed;
+	/* NOA index */
+	u8 index;
+	/* Bit 0 - Opp PS state of the AP
+	   Bits 1-7 - Ctwindow in TUs */
+	u8 ctwindow_oppps;
+	/* Number of NOA descriptors */
+	u8 num_descriptors;
+
+	struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+	struct wmi_tim_info tim_info;
+	struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+	__le32 vdev_map;
+	struct wmi_bcn_info bcn_info[0];
+} __packed;
+
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	__le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+	/* Bit 0 - Flag to indicate an update in NOA schedule
+	 * Bits 7-1 - Reserved
+	 */
+	u8 changed;
+	/* NOA index */
+	u8 index;
+	/* Bit 0 - Opp PS state of the AP
+	 * Bits 1-7 - Ctwindow in TUs
+	 */
+	u8 ctwindow_oppps;
+	/* Number of NOA descriptors */
+	u8 num_descriptors;
+
+	struct wmi_p2p_noa_descriptor
+		noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+	struct wmi_10_4_tim_info tim_info;
+	struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+	__le32 vdev_map;
+	struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+	__le32 vdev_map;
+	__le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+struct wmi_peer_create_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_peer_type {
+	WMI_PEER_TYPE_DEFAULT = 0,
+	WMI_PEER_TYPE_BSS = 1,
+	WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_delete_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+	/*
+	 * rate mode . 0: disable fixed rate (auto rate)
+	 *   1: legacy (non 11n) rate  specified as ieee rate 2*Mbps
+	 *   2: ht20 11n rate  specified as mcs index
+	 *   3: ht40 11n rate  specified as mcs index
+	 */
+	__le32  rate_mode;
+	/*
+	 * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+	 * and series 3 is stored at byte 3 (MSB)
+	 */
+	__le32  rate_series;
+	/*
+	 * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+	 * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+	 * (MSB)
+	 */
+	__le32  rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+	/* fixed rate */
+	struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID    17
+
+struct wmi_addba_clear_resp_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+	/* Tid number */
+	__le32 tid;
+	/* Buffer/Window size*/
+	__le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+	/* Tid number */
+	__le32 tid;
+	/* Is Initiator */
+	__le32 initiator;
+	/* Reason code */
+	__le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+	/* unique id identifying the vdev, generated by the caller */
+	__le32 vdev_id;
+	/* peer mac address */
+	struct wmi_mac_addr peer_macaddr;
+	/* Tid number */
+	__le32 tid;
+	/* status code */
+	__le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+	/* unique id identifying the vdev, generated by the caller */
+	__le32 vdev_id;
+	/* peer mac address */
+	struct wmi_mac_addr peer_macaddr;
+	/* Tid number */
+	__le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+	WMI_PEER_SMPS_PS_NONE = 0x0,
+	WMI_PEER_SMPS_STATIC  = 0x1,
+	WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+	WMI_PEER_CHWIDTH_20MHZ = 0,
+	WMI_PEER_CHWIDTH_40MHZ = 1,
+	WMI_PEER_CHWIDTH_80MHZ = 2,
+};
+
+enum wmi_peer_param {
+	WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+	WMI_PEER_AMPDU      = 0x2,
+	WMI_PEER_AUTHORIZE  = 0x3,
+	WMI_PEER_CHAN_WIDTH = 0x4,
+	WMI_PEER_NSS        = 0x5,
+	WMI_PEER_USE_4ADDR  = 0x6,
+	WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
+};
+
+struct wmi_peer_set_param_cmd {
+	__le32 vdev_id;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 param_id;
+	__le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+	/* total number of rates */
+	__le32 num_rates;
+	/*
+	 * rates (each 8bit value) packed into a 32 bit word.
+	 * the rates are filled from least significant byte to most
+	 * significant byte.
+	 */
+	__le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+} __packed;
+
+struct wmi_rate_set_arg {
+	unsigned int num_rates;
+	u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+	__le32 rx_max_rate; /* Max Rx data rate */
+	__le32 rx_mcs_set;  /* Negotiated RX VHT rates */
+	__le32 tx_max_rate; /* Max Tx data rate */
+	__le32 tx_mcs_set;  /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+	u32 rx_max_rate;
+	u32 rx_mcs_set;
+	u32 tx_max_rate;
+	u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+	/* legacy rate set */
+	struct wmi_rate_set peer_legacy_rates;
+	/* ht rate set */
+	struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+	/* unique id identifying the VDEV, generated by the caller */
+	__le32 vdev_id;
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+	__le32 callback_enable;
+} __packed;
+
+#define WMI_PEER_AUTH           0x00000001
+#define WMI_PEER_QOS            0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_APSD           0x00000800
+#define WMI_PEER_HT             0x00001000
+#define WMI_PEER_40MHZ          0x00002000
+#define WMI_PEER_STBC           0x00008000
+#define WMI_PEER_LDPC           0x00010000
+#define WMI_PEER_DYN_MIMOPS     0x00020000
+#define WMI_PEER_STATIC_MIMOPS  0x00040000
+#define WMI_PEER_SPATIAL_MUX    0x00200000
+#define WMI_PEER_VHT            0x02000000
+#define WMI_PEER_80MHZ          0x04000000
+#define WMI_PEER_VHT_2G         0x08000000
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG          0x01
+#define WMI_RC_CW40_FLAG        0x02
+#define WMI_RC_SGI_FLAG         0x04
+#define WMI_RC_HT_FLAG          0x08
+#define WMI_RC_RTSCTS_FLAG      0x10
+#define WMI_RC_TX_STBC_FLAG     0x20
+#define WMI_RC_RX_STBC_FLAG     0xC0
+#define WMI_RC_RX_STBC_FLAG_S   6
+#define WMI_RC_WEP_TKIP_FLAG    0x100
+#define WMI_RC_TS_FLAG          0x200
+#define WMI_RC_UAPSD_FLAG       0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_common_peer_assoc_complete_cmd {
+	struct wmi_mac_addr peer_macaddr;
+	__le32 vdev_id;
+	__le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+	__le32 peer_associd; /* 16 LSBs */
+	__le32 peer_flags;
+	__le32 peer_caps; /* 16 LSBs */
+	__le32 peer_listen_intval;
+	__le32 peer_ht_caps;
+	__le32 peer_max_mpdu;
+	__le32 peer_mpdu_density; /* 0..16 */
+	__le32 peer_rate_caps;
+	struct wmi_rate_set peer_legacy_rates;
+	struct wmi_rate_set peer_ht_rates;
+	__le32 peer_nss; /* num of spatial streams */
+	__le32 peer_vht_caps;
+	__le32 peer_phymode;
+	struct wmi_vht_rate_set peer_vht_rates;
+};
+
+struct wmi_main_peer_assoc_complete_cmd {
+	struct wmi_common_peer_assoc_complete_cmd cmd;
+
+	/* HT Operation Element of the peer. Five bytes packed in 2
+	 *  INT32 array and filled from lsb to msb. */
+	__le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_10_1_peer_assoc_complete_cmd {
+	struct wmi_common_peer_assoc_complete_cmd cmd;
+} __packed;
+
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
+
+struct wmi_10_2_peer_assoc_complete_cmd {
+	struct wmi_common_peer_assoc_complete_cmd cmd;
+	__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+	u8 addr[ETH_ALEN];
+	u32 vdev_id;
+	bool peer_reassoc;
+	u16 peer_aid;
+	u32 peer_flags; /* see %WMI_PEER_ */
+	u16 peer_caps;
+	u32 peer_listen_intval;
+	u32 peer_ht_caps;
+	u32 peer_max_mpdu;
+	u32 peer_mpdu_density; /* 0..16 */
+	u32 peer_rate_caps; /* see %WMI_RC_ */
+	struct wmi_rate_set_arg peer_legacy_rates;
+	struct wmi_rate_set_arg peer_ht_rates;
+	u32 peer_num_spatial_streams;
+	u32 peer_vht_caps;
+	enum wmi_phy_mode peer_phymode;
+	struct wmi_vht_rate_set_arg peer_vht_rates;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+	/* wds MAC addr */
+	struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+	/* wds MAC addr */
+	struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+	/* peer MAC address */
+	struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+	__le32 err_code;
+	__le32 freq;
+	__le32 cmd_flags;
+	__le32 noise_floor;
+	__le32 rx_clear_count;
+	__le32 cycle_count;
+} __packed;
+
+struct wmi_10_4_chan_info_event {
+	__le32 err_code;
+	__le32 freq;
+	__le32 cmd_flags;
+	__le32 noise_floor;
+	__le32 rx_clear_count;
+	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
+} __packed;
+
+struct wmi_peer_sta_kickout_event {
+	struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1)
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES	256
+#define BCN_FLT_MAX_ELEMS_IE_LIST	(BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+	__le32 vdev_id;
+	__le32 bss_bcnsdropped;
+	__le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+	__le32 bcns_dropped;
+	__le32 bcns_delivered;
+	__le32 activefilters;
+	struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+	u32 vdev_id;
+	u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+	WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+	WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+/* Firmware crashes if keepalive interval exceeds this limit */
+#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+	__be32 src_ip4_addr;
+	__be32 dest_ip4_addr;
+	struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+	__le32 vdev_id;
+	__le32 enabled;
+	__le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+	__le32 interval; /* in seconds */
+	struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+	u32 vdev_id;
+	u32 enabled;
+	u32 method;
+	u32 interval;
+	__be32 src_ip4_addr;
+	__be32 dest_ip4_addr;
+	const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_force_fw_hang_type {
+	WMI_FORCE_FW_HANG_ASSERT = 1,
+	WMI_FORCE_FW_HANG_NO_DETECT,
+	WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+	WMI_FORCE_FW_HANG_EMPTY_POINT,
+	WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+	WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+	__le32 type;
+	__le32 delay_ms;
+} __packed;
+
+enum ath10k_dbglog_level {
+	ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+	ATH10K_DBGLOG_LEVEL_INFO = 1,
+	ATH10K_DBGLOG_LEVEL_WARN = 2,
+	ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB		0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK		0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB	16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK	0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB	17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK	0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB	20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK	0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB		28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK		0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+	/* bitmask to hold mod id config*/
+	__le32 module_enable;
+
+	/* see ATH10K_DBGLOG_CFG_ */
+	__le32 config_enable;
+
+	/* mask of module id bits to be changed */
+	__le32 module_valid;
+
+	/* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+	__le32 config_valid;
+} __packed;
+
+enum wmi_roam_reason {
+	WMI_ROAM_REASON_BETTER_AP = 1,
+	WMI_ROAM_REASON_BEACON_MISS = 2,
+	WMI_ROAM_REASON_LOW_RSSI = 3,
+	WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+	WMI_ROAM_REASON_HO_FAILED = 5,
+
+	/* keep last */
+	WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+	__le32 vdev_id;
+	__le32 reason;
+} __packed;
+
+#define ATH10K_FRAGMT_THRESHOLD_MIN	540
+#define ATH10K_FRAGMT_THRESHOLD_MAX	2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+#define WMI_MAX_MEM_REQS 16
+
+struct wmi_scan_ev_arg {
+	__le32 event_type; /* %WMI_SCAN_EVENT_ */
+	__le32 reason; /* %WMI_SCAN_REASON_ */
+	__le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+	__le32 scan_req_id;
+	__le32 scan_id;
+	__le32 vdev_id;
+};
+
+struct wmi_mgmt_rx_ev_arg {
+	__le32 channel;
+	__le32 snr;
+	__le32 rate;
+	__le32 phy_mode;
+	__le32 buf_len;
+	__le32 status; /* %WMI_RX_STATUS_ */
+};
+
+struct wmi_ch_info_ev_arg {
+	__le32 err_code;
+	__le32 freq;
+	__le32 cmd_flags;
+	__le32 noise_floor;
+	__le32 rx_clear_count;
+	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
+};
+
+struct wmi_vdev_start_ev_arg {
+	__le32 vdev_id;
+	__le32 req_id;
+	__le32 resp_type; /* %WMI_VDEV_RESP_ */
+	__le32 status;
+};
+
+struct wmi_peer_kick_ev_arg {
+	const u8 *mac_addr;
+};
+
+struct wmi_swba_ev_arg {
+	__le32 vdev_map;
+	struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
+	const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
+};
+
+struct wmi_phyerr_ev_arg {
+	u32 tsf_timestamp;
+	u16 freq1;
+	u16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u16 nf_chains[4];
+	u32 buf_len;
+	const u8 *buf;
+	u8 hdr_len;
+};
+
+struct wmi_phyerr_hdr_arg {
+	u32 num_phyerrs;
+	u32 tsf_l32;
+	u32 tsf_u32;
+	u32 buf_len;
+	const void *phyerrs;
+};
+
+struct wmi_svc_rdy_ev_arg {
+	__le32 min_tx_power;
+	__le32 max_tx_power;
+	__le32 ht_cap;
+	__le32 vht_cap;
+	__le32 sw_ver0;
+	__le32 sw_ver1;
+	__le32 fw_build;
+	__le32 phy_capab;
+	__le32 num_rf_chains;
+	__le32 eeprom_rd;
+	__le32 num_mem_reqs;
+	const __le32 *service_map;
+	size_t service_map_len;
+	const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
+};
+
+struct wmi_rdy_ev_arg {
+	__le32 sw_version;
+	__le32 abi_version;
+	__le32 status;
+	const u8 *mac_addr;
+};
+
+struct wmi_roam_ev_arg {
+	__le32 vdev_id;
+	__le32 reason;
+	__le32 rssi;
+};
+
+struct wmi_pdev_temperature_event {
+	/* temperature value in Celcius degree */
+	__le32 temperature;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+	WOW_BMISS_EVENT = 0,
+	WOW_BETTER_AP_EVENT,
+	WOW_DEAUTH_RECVD_EVENT,
+	WOW_MAGIC_PKT_RECVD_EVENT,
+	WOW_GTK_ERR_EVENT,
+	WOW_FOURWAY_HSHAKE_EVENT,
+	WOW_EAPOL_RECVD_EVENT,
+	WOW_NLO_DETECTED_EVENT,
+	WOW_DISASSOC_RECVD_EVENT,
+	WOW_PATTERN_MATCH_EVENT,
+	WOW_CSA_IE_EVENT,
+	WOW_PROBE_REQ_WPS_IE_EVENT,
+	WOW_AUTH_REQ_EVENT,
+	WOW_ASSOC_REQ_EVENT,
+	WOW_HTT_EVENT,
+	WOW_RA_MATCH_EVENT,
+	WOW_HOST_AUTO_SHUTDOWN_EVENT,
+	WOW_IOAC_MAGIC_EVENT,
+	WOW_IOAC_SHORT_EVENT,
+	WOW_IOAC_EXTEND_EVENT,
+	WOW_IOAC_TIMER_EVENT,
+	WOW_DFS_PHYERR_RADAR_EVENT,
+	WOW_BEACON_EVENT,
+	WOW_CLIENT_KICKOUT_EVENT,
+	WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+	switch (ev) {
+	C2S(WOW_BMISS_EVENT);
+	C2S(WOW_BETTER_AP_EVENT);
+	C2S(WOW_DEAUTH_RECVD_EVENT);
+	C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+	C2S(WOW_GTK_ERR_EVENT);
+	C2S(WOW_FOURWAY_HSHAKE_EVENT);
+	C2S(WOW_EAPOL_RECVD_EVENT);
+	C2S(WOW_NLO_DETECTED_EVENT);
+	C2S(WOW_DISASSOC_RECVD_EVENT);
+	C2S(WOW_PATTERN_MATCH_EVENT);
+	C2S(WOW_CSA_IE_EVENT);
+	C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+	C2S(WOW_AUTH_REQ_EVENT);
+	C2S(WOW_ASSOC_REQ_EVENT);
+	C2S(WOW_HTT_EVENT);
+	C2S(WOW_RA_MATCH_EVENT);
+	C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+	C2S(WOW_IOAC_MAGIC_EVENT);
+	C2S(WOW_IOAC_SHORT_EVENT);
+	C2S(WOW_IOAC_EXTEND_EVENT);
+	C2S(WOW_IOAC_TIMER_EVENT);
+	C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+	C2S(WOW_BEACON_EVENT);
+	C2S(WOW_CLIENT_KICKOUT_EVENT);
+	C2S(WOW_EVENT_MAX);
+	default:
+		return NULL;
+	}
+}
+
+enum wmi_wow_wake_reason {
+	WOW_REASON_UNSPECIFIED = -1,
+	WOW_REASON_NLOD = 0,
+	WOW_REASON_AP_ASSOC_LOST,
+	WOW_REASON_LOW_RSSI,
+	WOW_REASON_DEAUTH_RECVD,
+	WOW_REASON_DISASSOC_RECVD,
+	WOW_REASON_GTK_HS_ERR,
+	WOW_REASON_EAP_REQ,
+	WOW_REASON_FOURWAY_HS_RECV,
+	WOW_REASON_TIMER_INTR_RECV,
+	WOW_REASON_PATTERN_MATCH_FOUND,
+	WOW_REASON_RECV_MAGIC_PATTERN,
+	WOW_REASON_P2P_DISC,
+	WOW_REASON_WLAN_HB,
+	WOW_REASON_CSA_EVENT,
+	WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+	WOW_REASON_AUTH_REQ_RECV,
+	WOW_REASON_ASSOC_REQ_RECV,
+	WOW_REASON_HTT_EVENT,
+	WOW_REASON_RA_MATCH,
+	WOW_REASON_HOST_AUTO_SHUTDOWN,
+	WOW_REASON_IOAC_MAGIC_EVENT,
+	WOW_REASON_IOAC_SHORT_EVENT,
+	WOW_REASON_IOAC_EXTEND_EVENT,
+	WOW_REASON_IOAC_TIMER_EVENT,
+	WOW_REASON_ROAM_HO,
+	WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+	WOW_REASON_BEACON_RECV,
+	WOW_REASON_CLIENT_KICKOUT_EVENT,
+	WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+	switch (reason) {
+	C2S(WOW_REASON_UNSPECIFIED);
+	C2S(WOW_REASON_NLOD);
+	C2S(WOW_REASON_AP_ASSOC_LOST);
+	C2S(WOW_REASON_LOW_RSSI);
+	C2S(WOW_REASON_DEAUTH_RECVD);
+	C2S(WOW_REASON_DISASSOC_RECVD);
+	C2S(WOW_REASON_GTK_HS_ERR);
+	C2S(WOW_REASON_EAP_REQ);
+	C2S(WOW_REASON_FOURWAY_HS_RECV);
+	C2S(WOW_REASON_TIMER_INTR_RECV);
+	C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+	C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+	C2S(WOW_REASON_P2P_DISC);
+	C2S(WOW_REASON_WLAN_HB);
+	C2S(WOW_REASON_CSA_EVENT);
+	C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+	C2S(WOW_REASON_AUTH_REQ_RECV);
+	C2S(WOW_REASON_ASSOC_REQ_RECV);
+	C2S(WOW_REASON_HTT_EVENT);
+	C2S(WOW_REASON_RA_MATCH);
+	C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+	C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+	C2S(WOW_REASON_IOAC_SHORT_EVENT);
+	C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+	C2S(WOW_REASON_IOAC_TIMER_EVENT);
+	C2S(WOW_REASON_ROAM_HO);
+	C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+	C2S(WOW_REASON_BEACON_RECV);
+	C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+	C2S(WOW_REASON_DEBUG_TEST);
+	default:
+		return NULL;
+	}
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+	u32 vdev_id;
+	u32 flag;
+	enum wmi_wow_wake_reason wake_reason;
+	u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE	1
+#define WOW_MAX_PATTERN_SIZE	148
+#define WOW_MAX_PKT_OFFSET	128
+
+enum wmi_tdls_state {
+	WMI_TDLS_DISABLE,
+	WMI_TDLS_ENABLE_PASSIVE,
+	WMI_TDLS_ENABLE_ACTIVE,
+};
+
+enum wmi_tdls_peer_state {
+	WMI_TDLS_PEER_STATE_PEERING,
+	WMI_TDLS_PEER_STATE_CONNECTED,
+	WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+	u32 vdev_id;
+	enum wmi_tdls_peer_state peer_state;
+	u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+struct wmi_tdls_peer_capab_arg {
+	u8 peer_uapsd_queues;
+	u8 peer_max_sp;
+	u32 buff_sta_support;
+	u32 off_chan_support;
+	u32 peer_curr_operclass;
+	u32 self_curr_operclass;
+	u32 peer_chan_len;
+	u32 peer_operclass_len;
+	u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+	u32 is_peer_responder;
+	u32 pref_offchan_num;
+	u32 pref_offchan_bw;
+};
+
+enum wmi_txbf_conf {
+	WMI_TXBF_CONF_UNSUPPORTED,
+	WMI_TXBF_CONF_BEFORE_ASSOC,
+	WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
+#define	WMI_CCA_DETECT_LEVEL_AUTO	0
+#define	WMI_CCA_DETECT_MARGIN_AUTO	0
+
+struct wmi_pdev_set_adaptive_cca_params {
+	__le32 enable;
+	__le32 cca_detect_level;
+	__le32 cca_detect_margin;
+} __packed;
+
+struct ath10k;
+struct ath10k_vif;
+struct ath10k_fw_stats_pdev;
+struct ath10k_fw_stats_peer;
+struct ath10k_fw_stats;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+void ath10k_wmi_free_host_mem(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_connect(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+			       u32 cmd_id);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+				     struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+				   struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+				   struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+				      struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+				struct ath10k_fw_stats_peer *dst);
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+				    struct wmi_host_mem_chunks *chunks);
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+				      const struct wmi_start_scan_arg *arg);
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+			      const struct wmi_wmm_params_arg *arg);
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+				const struct wmi_channel_arg *arg);
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+			  struct wmi_phyerr_ev_arg *phyerr, u64 tsf);
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+				    struct wmi_phyerr_ev_arg *phyerr,
+				    u64 tsf);
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+					     struct sk_buff *skb);
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+					     struct sk_buff *skb);
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+					 struct sk_buff *skb);
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+						struct sk_buff *skb);
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
+				 int left_len, struct wmi_phyerr_ev_arg *arg);
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf);
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+				     struct ath10k_fw_stats *fw_stats,
+				     char *buf);
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf);
+
+#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644
index 0000000..8e02b38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+	.flags = WIPHY_WOWLAN_DISCONNECT |
+		 WIPHY_WOWLAN_MAGIC_PKT,
+	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
+	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
+	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	int i, ret;
+
+	for (i = 0; i < WOW_EVENT_MAX; i++) {
+		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+		if (ret) {
+			ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+				    wow_wakeup_event(i), arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	for (i = 0; i < ar->wow.max_num_patterns; i++) {
+		ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+		if (ret) {
+			ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+				    i, arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ret = ath10k_wow_vif_cleanup(arvif);
+		if (ret) {
+			ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+				      struct cfg80211_wowlan *wowlan)
+{
+	int ret, i;
+	unsigned long wow_mask = 0;
+	struct ath10k *ar = arvif->ar;
+	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+	int pattern_id = 0;
+
+	/* Setup requested WOW features */
+	switch (arvif->vdev_type) {
+	case WMI_VDEV_TYPE_IBSS:
+		__set_bit(WOW_BEACON_EVENT, &wow_mask);
+		 /* fall through */
+	case WMI_VDEV_TYPE_AP:
+		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+		__set_bit(WOW_HTT_EVENT, &wow_mask);
+		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+		break;
+	case WMI_VDEV_TYPE_STA:
+		if (wowlan->disconnect) {
+			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+			__set_bit(WOW_BMISS_EVENT, &wow_mask);
+			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+		}
+
+		if (wowlan->magic_pkt)
+			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+		break;
+	default:
+		break;
+	}
+
+	for (i = 0; i < wowlan->n_patterns; i++) {
+		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+		int j;
+
+		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+			continue;
+
+		/* convert bytemask to bitmask */
+		for (j = 0; j < patterns[i].pattern_len; j++)
+			if (patterns[i].mask[j / 8] & BIT(j % 8))
+				bitmask[j] = 0xff;
+
+		ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+						 pattern_id,
+						 patterns[i].pattern,
+						 bitmask,
+						 patterns[i].pattern_len,
+						 patterns[i].pkt_offset);
+		if (ret) {
+			ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+				    pattern_id,
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+
+		pattern_id++;
+		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+	}
+
+	for (i = 0; i < WOW_EVENT_MAX; i++) {
+		if (!test_bit(i, &wow_mask))
+			continue;
+		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+				    wow_wakeup_event(i), arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+				  struct cfg80211_wowlan *wowlan)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+		if (ret) {
+			ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->target_suspend);
+
+	ret = ath10k_wmi_wow_enable(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+	if (ret == 0) {
+		ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+	int ret;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->wow.wakeup_completed);
+
+	ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+	if (ret == 0) {
+		ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+			  struct cfg80211_wowlan *wowlan)
+{
+	struct ath10k *ar = hw->priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+			      ar->fw_features))) {
+		ret = 1;
+		goto exit;
+	}
+
+	ret =  ath10k_wow_cleanup(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+			    ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_set_wakeups(ar, wowlan);
+	if (ret) {
+		ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+			    ret);
+		goto cleanup;
+	}
+
+	ret = ath10k_wow_enable(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to start wow: %d\n", ret);
+		goto cleanup;
+	}
+
+	ret = ath10k_hif_suspend(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+		goto wakeup;
+	}
+
+	goto exit;
+
+wakeup:
+	ath10k_wow_wakeup(ar);
+
+cleanup:
+	ath10k_wow_cleanup(ar);
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret ? 1 : 0;
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+			      ar->fw_features))) {
+		ret = 1;
+		goto exit;
+	}
+
+	ret = ath10k_hif_resume(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+		goto exit;
+	}
+
+	ret = ath10k_wow_wakeup(ar);
+	if (ret)
+		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+	if (ret) {
+		switch (ar->state) {
+		case ATH10K_STATE_ON:
+			ar->state = ATH10K_STATE_RESTARTING;
+			ret = 1;
+			break;
+		case ATH10K_STATE_OFF:
+		case ATH10K_STATE_RESTARTING:
+		case ATH10K_STATE_RESTARTED:
+		case ATH10K_STATE_UTF:
+		case ATH10K_STATE_WEDGED:
+			ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+				    ar->state);
+			ret = -EIO;
+			break;
+		}
+	}
+
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+		return 0;
+
+	if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+		return -EINVAL;
+
+	ar->wow.wowlan_support = ath10k_wowlan_support;
+	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644
index 0000000..abbb04b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+	u32 max_num_patterns;
+	struct completion wakeup_completed;
+	struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+			  struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+	return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */