File-copy from v4.4.100

This is the result of 'cp' from a linux-stable tree with the 'v4.4.100'
tag checked out (commit 26d6298789e695c9f627ce49a7bbd2286405798a) on
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git

Please refer to that tree for all history prior to this point.

Change-Id: I8a9ee2aea93cd29c52c847d0ce33091a73ae6afe
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
new file mode 100644
index 0000000..dc4c750
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -0,0 +1,57 @@
+#
+# Makefile fragment for Broadcom 802.11n Networking Device Driver
+#
+# Copyright (c) 2010 Broadcom Corporation
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ccflags-y += \
+	-Idrivers/net/wireless/brcm80211/brcmfmac	\
+	-Idrivers/net/wireless/brcm80211/include
+
+ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
+brcmfmac-objs += \
+		cfg80211.o \
+		chip.o \
+		fwil.o \
+		fweh.o \
+		fwsignal.o \
+		p2p.o \
+		proto.o \
+		common.o \
+		core.o \
+		firmware.o \
+		feature.o \
+		btcoex.o \
+		vendor.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
+		bcdc.o
+brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
+		commonring.o \
+		flowring.o \
+		msgbuf.o
+brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+		sdio.o \
+		bcmsdh.o
+brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
+		usb.o
+brcmfmac-$(CONFIG_BRCMFMAC_PCIE) += \
+		pcie.o
+brcmfmac-$(CONFIG_BRCMDBG) += \
+		debug.o
+brcmfmac-$(CONFIG_BRCM_TRACING) += \
+		tracepoint.o
+brcmfmac-$(CONFIG_OF) += \
+		of.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
new file mode 100644
index 0000000..288c84e
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "core.h"
+#include "bus.h"
+#include "fwsignal.h"
+#include "debug.h"
+#include "tracepoint.h"
+#include "proto.h"
+#include "bcdc.h"
+
+struct brcmf_proto_bcdc_dcmd {
+	__le32 cmd;	/* dongle command value */
+	__le32 len;	/* lower 16: output buflen;
+			 * upper 16: input buflen (excludes header) */
+	__le32 flags;	/* flag defns given below */
+	__le32 status;	/* status code returned from the device */
+};
+
+/* BCDC flag definitions */
+#define BCDC_DCMD_ERROR		0x01		/* 1=cmd failed */
+#define BCDC_DCMD_SET		0x02		/* 0=get, 1=set cmd */
+#define BCDC_DCMD_IF_MASK	0xF000		/* I/F index */
+#define BCDC_DCMD_IF_SHIFT	12
+#define BCDC_DCMD_ID_MASK	0xFFFF0000	/* id an cmd pairing */
+#define BCDC_DCMD_ID_SHIFT	16		/* ID Mask shift bits */
+#define BCDC_DCMD_ID(flags)	\
+	(((flags) & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT)
+
+/*
+ * BCDC header - Broadcom specific extension of CDC.
+ * Used on data packets to convey priority across USB.
+ */
+#define	BCDC_HEADER_LEN		4
+#define BCDC_PROTO_VER		2	/* Protocol version */
+#define BCDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BCDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+#define BCDC_FLAG_SUM_GOOD	0x04	/* Good RX checksums */
+#define BCDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums */
+#define BCDC_PRIORITY_MASK	0x7
+#define BCDC_FLAG2_IF_MASK	0x0f	/* packet rx interface in APSTA */
+#define BCDC_FLAG2_IF_SHIFT	0
+
+#define BCDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BCDC_FLAG2_IF_MASK) >> BCDC_FLAG2_IF_SHIFT))
+#define BCDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BCDC_FLAG2_IF_MASK) | \
+	((idx) << BCDC_FLAG2_IF_SHIFT)))
+
+/**
+ * struct brcmf_proto_bcdc_header - BCDC header format
+ *
+ * @flags: flags contain protocol and checksum info.
+ * @priority: 802.1d priority and USB flow control info (bit 4:7).
+ * @flags2: additional flags containing dongle interface index.
+ * @data_offset: start of packet data. header is following by firmware signals.
+ */
+struct brcmf_proto_bcdc_header {
+	u8 flags;
+	u8 priority;
+	u8 flags2;
+	u8 data_offset;
+};
+
+/*
+ * maximum length of firmware signal data between
+ * the BCDC header and packet data in the tx path.
+ */
+#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES	12
+
+#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
+#define BUS_HEADER_LEN	(16+64)		/* Must be atleast SDPCM_RESERVE
+					 * (amount of header tha might be added)
+					 * plus any space that might be needed
+					 * for bus alignment padding.
+					 */
+struct brcmf_bcdc {
+	u16 reqid;
+	u8 bus_header[BUS_HEADER_LEN];
+	struct brcmf_proto_bcdc_dcmd msg;
+	unsigned char buf[BRCMF_DCMD_MAXLEN];
+};
+
+
+static int
+brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+		     uint len, bool set)
+{
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+	u32 flags;
+
+	brcmf_dbg(BCDC, "Enter\n");
+
+	memset(msg, 0, sizeof(struct brcmf_proto_bcdc_dcmd));
+
+	msg->cmd = cpu_to_le32(cmd);
+	msg->len = cpu_to_le32(len);
+	flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT);
+	if (set)
+		flags |= BCDC_DCMD_SET;
+	flags = (flags & ~BCDC_DCMD_IF_MASK) |
+		(ifidx << BCDC_DCMD_IF_SHIFT);
+	msg->flags = cpu_to_le32(flags);
+
+	if (buf)
+		memcpy(bcdc->buf, buf, len);
+
+	len += sizeof(*msg);
+	if (len > BRCMF_TX_IOCTL_MAX_MSG_SIZE)
+		len = BRCMF_TX_IOCTL_MAX_MSG_SIZE;
+
+	/* Send request */
+	return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len);
+}
+
+static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
+{
+	int ret;
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+
+	brcmf_dbg(BCDC, "Enter\n");
+	len += sizeof(struct brcmf_proto_bcdc_dcmd);
+	do {
+		ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&bcdc->msg,
+				      len);
+		if (ret < 0)
+			break;
+	} while (BCDC_DCMD_ID(le32_to_cpu(bcdc->msg.flags)) != id);
+
+	return ret;
+}
+
+static int
+brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			    void *buf, uint len)
+{
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+	void *info;
+	int ret = 0, retries = 0;
+	u32 id, flags;
+
+	brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+	ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
+	if (ret < 0) {
+		brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
+			  ret);
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+	if (ret < 0)
+		goto done;
+
+	flags = le32_to_cpu(msg->flags);
+	id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+	if ((id < bcdc->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != bcdc->reqid) {
+		brcmf_err("%s: unexpected request id %d (expected %d)\n",
+			  brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check info buffer */
+	info = (void *)&msg[1];
+
+	/* Copy info buffer */
+	if (buf) {
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, info, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & BCDC_DCMD_ERROR)
+		ret = le32_to_cpu(msg->status);
+
+done:
+	return ret;
+}
+
+static int
+brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			  void *buf, uint len)
+{
+	struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
+	struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
+	int ret = 0;
+	u32 flags, id;
+
+	brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+
+	ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
+	if (ret < 0)
+		goto done;
+
+	ret = brcmf_proto_bcdc_cmplt(drvr, bcdc->reqid, len);
+	if (ret < 0)
+		goto done;
+
+	flags = le32_to_cpu(msg->flags);
+	id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT;
+
+	if (id != bcdc->reqid) {
+		brcmf_err("%s: unexpected request id %d (expected %d)\n",
+			  brcmf_ifname(drvr, ifidx), id, bcdc->reqid);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & BCDC_DCMD_ERROR)
+		ret = le32_to_cpu(msg->status);
+
+done:
+	return ret;
+}
+
+static void
+brcmf_proto_bcdc_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
+			 struct sk_buff *pktbuf)
+{
+	struct brcmf_proto_bcdc_header *h;
+
+	brcmf_dbg(BCDC, "Enter\n");
+
+	/* Push BDC header used to convey priority for buses that don't */
+	skb_push(pktbuf, BCDC_HEADER_LEN);
+
+	h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+	h->flags = (BCDC_PROTO_VER << BCDC_FLAG_VER_SHIFT);
+	if (pktbuf->ip_summed == CHECKSUM_PARTIAL)
+		h->flags |= BCDC_FLAG_SUM_NEEDED;
+
+	h->priority = (pktbuf->priority & BCDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->data_offset = offset;
+	BCDC_SET_IF_IDX(h, ifidx);
+	trace_brcmf_bcdchdr(pktbuf->data);
+}
+
+static int
+brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+			 struct sk_buff *pktbuf, struct brcmf_if **ifp)
+{
+	struct brcmf_proto_bcdc_header *h;
+	struct brcmf_if *tmp_if;
+
+	brcmf_dbg(BCDC, "Enter\n");
+
+	/* Pop BCDC header used to convey priority for buses that don't */
+	if (pktbuf->len <= BCDC_HEADER_LEN) {
+		brcmf_dbg(INFO, "rx data too short (%d <= %d)\n",
+			  pktbuf->len, BCDC_HEADER_LEN);
+		return -EBADE;
+	}
+
+	trace_brcmf_bcdchdr(pktbuf->data);
+	h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
+
+	tmp_if = brcmf_get_ifp(drvr, BCDC_GET_IF_IDX(h));
+	if (!tmp_if) {
+		brcmf_dbg(INFO, "no matching ifp found\n");
+		return -EBADE;
+	}
+	if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
+	    BCDC_PROTO_VER) {
+		brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
+			  brcmf_ifname(drvr, tmp_if->ifidx), h->flags);
+		return -EBADE;
+	}
+
+	if (h->flags & BCDC_FLAG_SUM_GOOD) {
+		brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
+			  brcmf_ifname(drvr, tmp_if->ifidx), h->flags);
+		pktbuf->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+
+	pktbuf->priority = h->priority & BCDC_PRIORITY_MASK;
+
+	skb_pull(pktbuf, BCDC_HEADER_LEN);
+	if (do_fws)
+		brcmf_fws_hdrpull(tmp_if, h->data_offset << 2, pktbuf);
+	else
+		skb_pull(pktbuf, h->data_offset << 2);
+
+	if (pktbuf->len == 0)
+		return -ENODATA;
+
+	*ifp = tmp_if;
+	return 0;
+}
+
+static int
+brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset,
+			struct sk_buff *pktbuf)
+{
+	brcmf_proto_bcdc_hdrpush(drvr, ifidx, offset, pktbuf);
+	return brcmf_bus_txdata(drvr->bus_if, pktbuf);
+}
+
+static void
+brcmf_proto_bcdc_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
+				     enum proto_addr_mode addr_mode)
+{
+}
+
+static void
+brcmf_proto_bcdc_delete_peer(struct brcmf_pub *drvr, int ifidx,
+			     u8 peer[ETH_ALEN])
+{
+}
+
+static void
+brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
+			       u8 peer[ETH_ALEN])
+{
+}
+
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_bcdc *bcdc;
+
+	bcdc = kzalloc(sizeof(*bcdc), GFP_ATOMIC);
+	if (!bcdc)
+		goto fail;
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) {
+		brcmf_err("struct brcmf_proto_bcdc is not correctly defined\n");
+		goto fail;
+	}
+
+	drvr->proto->hdrpull = brcmf_proto_bcdc_hdrpull;
+	drvr->proto->query_dcmd = brcmf_proto_bcdc_query_dcmd;
+	drvr->proto->set_dcmd = brcmf_proto_bcdc_set_dcmd;
+	drvr->proto->txdata = brcmf_proto_bcdc_txdata;
+	drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
+	drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
+	drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
+	drvr->proto->pd = bcdc;
+
+	drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
+	drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
+			sizeof(struct brcmf_proto_bcdc_dcmd);
+	return 0;
+
+fail:
+	kfree(bcdc);
+	return -ENOMEM;
+}
+
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
+{
+	kfree(drvr->proto->pd);
+	drvr->proto->pd = NULL;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
new file mode 100644
index 0000000..6003179
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_BCDC_H
+#define BRCMFMAC_BCDC_H
+
+#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
+int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+#else
+static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
+static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
+#endif
+
+#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
new file mode 100644
index 0000000..59cef6c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -0,0 +1,1382 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ****************** SDIO CARD Interface Functions **************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <net/cfg80211.h>
+
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <chipcommon.h>
+#include <soc.h>
+#include "chip.h"
+#include "bus.h"
+#include "debug.h"
+#include "sdio.h"
+#include "of.h"
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+
+#define DMA_ALIGN_MASK	0x03
+
+#define SDIO_FUNC1_BLOCKSIZE		64
+#define SDIO_FUNC2_BLOCKSIZE		512
+/* Maximum milliseconds to wait for F2 to come up */
+#define SDIO_WAIT_F2RDY	3000
+
+#define BRCMF_DEFAULT_TXGLOM_SIZE	32  /* max tx frames in glom chain */
+#define BRCMF_DEFAULT_RXGLOM_SIZE	32  /* max rx frames in glom chain */
+
+struct brcmf_sdiod_freezer {
+	atomic_t freezing;
+	atomic_t thread_count;
+	u32 frozen_count;
+	wait_queue_head_t thread_freeze;
+	struct completion resumed;
+};
+
+static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
+
+static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+	brcmf_dbg(INTR, "OOB intr triggered\n");
+
+	/* out-of-band interrupt is level-triggered which won't
+	 * be cleared until dpc
+	 */
+	if (sdiodev->irq_en) {
+		disable_irq_nosync(irq);
+		sdiodev->irq_en = false;
+	}
+
+	brcmf_sdio_isr(sdiodev->bus);
+
+	return IRQ_HANDLED;
+}
+
+static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+	brcmf_dbg(INTR, "IB intr triggered\n");
+
+	brcmf_sdio_isr(sdiodev->bus);
+}
+
+/* dummy handler for SDIO function 2 interrupt */
+static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
+{
+}
+
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
+{
+	int ret = 0;
+	u8 data;
+	u32 addr, gpiocontrol;
+	unsigned long flags;
+
+	if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
+		brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
+			  sdiodev->pdata->oob_irq_nr);
+		ret = request_irq(sdiodev->pdata->oob_irq_nr,
+				  brcmf_sdiod_oob_irqhandler,
+				  sdiodev->pdata->oob_irq_flags,
+				  "brcmf_oob_intr",
+				  &sdiodev->func[1]->dev);
+		if (ret != 0) {
+			brcmf_err("request_irq failed %d\n", ret);
+			return ret;
+		}
+		sdiodev->oob_irq_requested = true;
+		spin_lock_init(&sdiodev->irq_en_lock);
+		spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
+		sdiodev->irq_en = true;
+		spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
+
+		ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+		if (ret != 0) {
+			brcmf_err("enable_irq_wake failed %d\n", ret);
+			return ret;
+		}
+		sdiodev->irq_wake = true;
+
+		sdio_claim_host(sdiodev->func[1]);
+
+		if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
+			/* assign GPIO to SDIO core */
+			addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
+			gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+			gpiocontrol |= 0x2;
+			brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+
+			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
+					  &ret);
+			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+		}
+
+		/* must configure SDIO_CCCR_IENx to enable irq */
+		data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+		data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+
+		/* redirect, configure and enable io for interrupt signal */
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+		if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
+			data |= SDIO_SEPINT_ACT_HI;
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
+
+		sdio_release_host(sdiodev->func[1]);
+	} else {
+		brcmf_dbg(SDIO, "Entering\n");
+		sdio_claim_host(sdiodev->func[1]);
+		sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
+		sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
+		sdio_release_host(sdiodev->func[1]);
+	}
+
+	return 0;
+}
+
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+{
+	brcmf_dbg(SDIO, "Entering\n");
+
+	if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
+		sdio_claim_host(sdiodev->func[1]);
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+		sdio_release_host(sdiodev->func[1]);
+
+		if (sdiodev->oob_irq_requested) {
+			sdiodev->oob_irq_requested = false;
+			if (sdiodev->irq_wake) {
+				disable_irq_wake(sdiodev->pdata->oob_irq_nr);
+				sdiodev->irq_wake = false;
+			}
+			free_irq(sdiodev->pdata->oob_irq_nr,
+				 &sdiodev->func[1]->dev);
+			sdiodev->irq_en = false;
+		}
+	} else {
+		sdio_claim_host(sdiodev->func[1]);
+		sdio_release_irq(sdiodev->func[2]);
+		sdio_release_irq(sdiodev->func[1]);
+		sdio_release_host(sdiodev->func[1]);
+	}
+
+	return 0;
+}
+
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+			      enum brcmf_sdiod_state state)
+{
+	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
+	    state == sdiodev->state)
+		return;
+
+	brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
+	switch (sdiodev->state) {
+	case BRCMF_SDIOD_DATA:
+		/* any other state means bus interface is down */
+		brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+		break;
+	case BRCMF_SDIOD_DOWN:
+		/* transition from DOWN to DATA means bus interface is up */
+		if (state == BRCMF_SDIOD_DATA)
+			brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
+		break;
+	default:
+		break;
+	}
+	sdiodev->state = state;
+}
+
+static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
+					uint regaddr, u8 byte)
+{
+	int err_ret;
+
+	/*
+	 * Can only directly write to some F0 registers.
+	 * Handle CCCR_IENx and CCCR_ABORT command
+	 * as a special case.
+	 */
+	if ((regaddr == SDIO_CCCR_ABORT) ||
+	    (regaddr == SDIO_CCCR_IENx))
+		sdio_writeb(func, byte, regaddr, &err_ret);
+	else
+		sdio_f0_writeb(func, byte, regaddr, &err_ret);
+
+	return err_ret;
+}
+
+static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
+				    u32 addr, u8 regsz, void *data, bool write)
+{
+	struct sdio_func *func;
+	int ret;
+
+	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+		  write, fn, addr, regsz);
+
+	/* only allow byte access on F0 */
+	if (WARN_ON(regsz > 1 && !fn))
+		return -EINVAL;
+	func = sdiodev->func[fn];
+
+	switch (regsz) {
+	case sizeof(u8):
+		if (write) {
+			if (fn)
+				sdio_writeb(func, *(u8 *)data, addr, &ret);
+			else
+				ret = brcmf_sdiod_f0_writeb(func, addr,
+							    *(u8 *)data);
+		} else {
+			if (fn)
+				*(u8 *)data = sdio_readb(func, addr, &ret);
+			else
+				*(u8 *)data = sdio_f0_readb(func, addr, &ret);
+		}
+		break;
+	case sizeof(u16):
+		if (write)
+			sdio_writew(func, *(u16 *)data, addr, &ret);
+		else
+			*(u16 *)data = sdio_readw(func, addr, &ret);
+		break;
+	case sizeof(u32):
+		if (write)
+			sdio_writel(func, *(u32 *)data, addr, &ret);
+		else
+			*(u32 *)data = sdio_readl(func, addr, &ret);
+		break;
+	default:
+		brcmf_err("invalid size: %d\n", regsz);
+		break;
+	}
+
+	if (ret)
+		brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+			  write ? "write" : "read", fn, addr, ret);
+
+	return ret;
+}
+
+static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+				   u8 regsz, void *data, bool write)
+{
+	u8 func;
+	s32 retry = 0;
+	int ret;
+
+	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
+		return -ENOMEDIUM;
+
+	/*
+	 * figure out how to read the register based on address range
+	 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
+	 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
+	 * The rest: function 1 silicon backplane core registers
+	 */
+	if ((addr & ~REG_F0_REG_MASK) == 0)
+		func = SDIO_FUNC_0;
+	else
+		func = SDIO_FUNC_1;
+
+	do {
+		if (!write)
+			memset(data, 0, regsz);
+		/* for retry wait for 1 ms till bus get settled down */
+		if (retry)
+			usleep_range(1000, 2000);
+		ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
+					       data, write);
+	} while (ret != 0 && ret != -ENOMEDIUM &&
+		 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+	if (ret == -ENOMEDIUM)
+		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+	else if (ret != 0) {
+		/*
+		 * SleepCSR register access can fail when
+		 * waking up the device so reduce this noise
+		 * in the logs.
+		 */
+		if (addr != SBSDIO_FUNC1_SLEEPCSR)
+			brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+				  write ? "write" : "read", func, addr, ret);
+		else
+			brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+				  write ? "write" : "read", func, addr, ret);
+	}
+	return ret;
+}
+
+static int
+brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+{
+	int err = 0, i;
+	u8 addr[3];
+
+	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
+		return -ENOMEDIUM;
+
+	addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
+	addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
+	addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
+
+	for (i = 0; i < 3; i++) {
+		err = brcmf_sdiod_regrw_helper(sdiodev,
+					       SBSDIO_FUNC1_SBADDRLOW + i,
+					       sizeof(u8), &addr[i], true);
+		if (err) {
+			brcmf_err("failed at addr: 0x%0x\n",
+				  SBSDIO_FUNC1_SBADDRLOW + i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+static int
+brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
+{
+	uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+	int err = 0;
+
+	if (bar0 != sdiodev->sbwad) {
+		err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
+		if (err)
+			return err;
+
+		sdiodev->sbwad = bar0;
+	}
+
+	*addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	if (width == 4)
+		*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	return 0;
+}
+
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+{
+	u8 data;
+	int retval;
+
+	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  false);
+	brcmf_dbg(SDIO, "data:0x%02x\n", data);
+
+	if (ret)
+		*ret = retval;
+
+	return data;
+}
+
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+{
+	u32 data;
+	int retval;
+
+	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
+	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	if (retval)
+		goto done;
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  false);
+	brcmf_dbg(SDIO, "data:0x%08x\n", data);
+
+done:
+	if (ret)
+		*ret = retval;
+
+	return data;
+}
+
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+		      u8 data, int *ret)
+{
+	int retval;
+
+	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  true);
+	if (ret)
+		*ret = retval;
+}
+
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+		      u32 data, int *ret)
+{
+	int retval;
+
+	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
+	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	if (retval)
+		goto done;
+	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
+					  true);
+
+done:
+	if (ret)
+		*ret = retval;
+}
+
+static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+			     bool write, u32 addr, struct sk_buff *pkt)
+{
+	unsigned int req_sz;
+	int err;
+
+	/* Single skb use the standard mmc interface */
+	req_sz = pkt->len + 3;
+	req_sz &= (uint)~3;
+
+	if (write)
+		err = sdio_memcpy_toio(sdiodev->func[fn], addr,
+				       ((u8 *)(pkt->data)), req_sz);
+	else if (fn == 1)
+		err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
+					 addr, req_sz);
+	else
+		/* function 2 read is FIFO operation */
+		err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
+				  req_sz);
+	if (err == -ENOMEDIUM)
+		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+	return err;
+}
+
+/**
+ * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
+ * @sdiodev: brcmfmac sdio device
+ * @fn: SDIO function number
+ * @write: direction flag
+ * @addr: dongle memory address as source/destination
+ * @pkt: skb pointer
+ *
+ * This function takes the respbonsibility as the interface function to MMC
+ * stack for block data access. It assumes that the skb passed down by the
+ * caller has already been padded and aligned.
+ */
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+				 bool write, u32 addr,
+				 struct sk_buff_head *pktlist)
+{
+	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
+	unsigned int max_req_sz, orig_offset, dst_offset;
+	unsigned short max_seg_cnt, seg_sz;
+	unsigned char *pkt_data, *orig_data, *dst_data;
+	struct sk_buff *pkt_next = NULL, *local_pkt_next;
+	struct sk_buff_head local_list, *target_list;
+	struct mmc_request mmc_req;
+	struct mmc_command mmc_cmd;
+	struct mmc_data mmc_dat;
+	struct scatterlist *sgl;
+	int ret = 0;
+
+	if (!pktlist->qlen)
+		return -EINVAL;
+
+	target_list = pktlist;
+	/* for host with broken sg support, prepare a page aligned list */
+	__skb_queue_head_init(&local_list);
+	if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
+		req_sz = 0;
+		skb_queue_walk(pktlist, pkt_next)
+			req_sz += pkt_next->len;
+		req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
+		while (req_sz > PAGE_SIZE) {
+			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
+			if (pkt_next == NULL) {
+				ret = -ENOMEM;
+				goto exit;
+			}
+			__skb_queue_tail(&local_list, pkt_next);
+			req_sz -= PAGE_SIZE;
+		}
+		pkt_next = brcmu_pkt_buf_get_skb(req_sz);
+		if (pkt_next == NULL) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+		__skb_queue_tail(&local_list, pkt_next);
+		target_list = &local_list;
+	}
+
+	func_blk_sz = sdiodev->func[fn]->cur_blksize;
+	max_req_sz = sdiodev->max_request_size;
+	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
+			    target_list->qlen);
+	seg_sz = target_list->qlen;
+	pkt_offset = 0;
+	pkt_next = target_list->next;
+
+	memset(&mmc_req, 0, sizeof(struct mmc_request));
+	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+	memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+	mmc_dat.sg = sdiodev->sgtable.sgl;
+	mmc_dat.blksz = func_blk_sz;
+	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
+	mmc_cmd.arg |= (fn & 0x7) << 28;	/* SDIO func num */
+	mmc_cmd.arg |= 1<<27;			/* block mode */
+	/* for function 1 the addr will be incremented */
+	mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+	mmc_req.cmd = &mmc_cmd;
+	mmc_req.data = &mmc_dat;
+
+	while (seg_sz) {
+		req_sz = 0;
+		sg_cnt = 0;
+		sgl = sdiodev->sgtable.sgl;
+		/* prep sg table */
+		while (pkt_next != (struct sk_buff *)target_list) {
+			pkt_data = pkt_next->data + pkt_offset;
+			sg_data_sz = pkt_next->len - pkt_offset;
+			if (sg_data_sz > sdiodev->max_segment_size)
+				sg_data_sz = sdiodev->max_segment_size;
+			if (sg_data_sz > max_req_sz - req_sz)
+				sg_data_sz = max_req_sz - req_sz;
+
+			sg_set_buf(sgl, pkt_data, sg_data_sz);
+
+			sg_cnt++;
+			sgl = sg_next(sgl);
+			req_sz += sg_data_sz;
+			pkt_offset += sg_data_sz;
+			if (pkt_offset == pkt_next->len) {
+				pkt_offset = 0;
+				pkt_next = pkt_next->next;
+			}
+
+			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
+				break;
+		}
+		seg_sz -= sg_cnt;
+
+		if (req_sz % func_blk_sz != 0) {
+			brcmf_err("sg request length %u is not %u aligned\n",
+				  req_sz, func_blk_sz);
+			ret = -ENOTBLK;
+			goto exit;
+		}
+
+		mmc_dat.sg_len = sg_cnt;
+		mmc_dat.blocks = req_sz / func_blk_sz;
+		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;	/* address */
+		mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;	/* block count */
+		/* incrementing addr for function 1 */
+		if (fn == 1)
+			addr += req_sz;
+
+		mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
+		mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
+
+		ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
+		if (ret == -ENOMEDIUM) {
+			brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+			break;
+		} else if (ret != 0) {
+			brcmf_err("CMD53 sg block %s failed %d\n",
+				  write ? "write" : "read", ret);
+			ret = -EIO;
+			break;
+		}
+	}
+
+	if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
+		local_pkt_next = local_list.next;
+		orig_offset = 0;
+		skb_queue_walk(pktlist, pkt_next) {
+			dst_offset = 0;
+			do {
+				req_sz = local_pkt_next->len - orig_offset;
+				req_sz = min_t(uint, pkt_next->len - dst_offset,
+					       req_sz);
+				orig_data = local_pkt_next->data + orig_offset;
+				dst_data = pkt_next->data + dst_offset;
+				memcpy(dst_data, orig_data, req_sz);
+				orig_offset += req_sz;
+				dst_offset += req_sz;
+				if (orig_offset == local_pkt_next->len) {
+					orig_offset = 0;
+					local_pkt_next = local_pkt_next->next;
+				}
+				if (dst_offset == pkt_next->len)
+					break;
+			} while (!skb_queue_empty(&local_list));
+		}
+	}
+
+exit:
+	sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
+	while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
+		brcmu_pkt_buf_free_skb(pkt_next);
+
+	return ret;
+}
+
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
+{
+	struct sk_buff *mypkt;
+	int err;
+
+	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+	if (!mypkt) {
+		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
+			  nbytes);
+		return -EIO;
+	}
+
+	err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
+	if (!err)
+		memcpy(buf, mypkt->data, nbytes);
+
+	brcmu_pkt_buf_free_skb(mypkt);
+	return err;
+}
+
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
+{
+	u32 addr = sdiodev->sbwad;
+	int err = 0;
+
+	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
+
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	if (err)
+		goto done;
+
+	err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
+
+done:
+	return err;
+}
+
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+			   struct sk_buff_head *pktq, uint totlen)
+{
+	struct sk_buff *glom_skb;
+	struct sk_buff *skb;
+	u32 addr = sdiodev->sbwad;
+	int err = 0;
+
+	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
+		  addr, pktq->qlen);
+
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	if (err)
+		goto done;
+
+	if (pktq->qlen == 1)
+		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+					 pktq->next);
+	else if (!sdiodev->sg_support) {
+		glom_skb = brcmu_pkt_buf_get_skb(totlen);
+		if (!glom_skb)
+			return -ENOMEM;
+		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
+					 glom_skb);
+		if (err) {
+			brcmu_pkt_buf_free_skb(glom_skb);
+			goto done;
+		}
+
+		skb_queue_walk(pktq, skb) {
+			memcpy(skb->data, glom_skb->data, skb->len);
+			skb_pull(glom_skb, skb->len);
+		}
+	} else
+		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
+					    pktq);
+
+done:
+	return err;
+}
+
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
+{
+	struct sk_buff *mypkt;
+	u32 addr = sdiodev->sbwad;
+	int err;
+
+	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+	if (!mypkt) {
+		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
+			  nbytes);
+		return -EIO;
+	}
+
+	memcpy(mypkt->data, buf, nbytes);
+
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+
+	if (!err)
+		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
+					 mypkt);
+
+	brcmu_pkt_buf_free_skb(mypkt);
+	return err;
+
+}
+
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+			 struct sk_buff_head *pktq)
+{
+	struct sk_buff *skb;
+	u32 addr = sdiodev->sbwad;
+	int err;
+
+	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
+
+	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	if (err)
+		return err;
+
+	if (pktq->qlen == 1 || !sdiodev->sg_support)
+		skb_queue_walk(pktq, skb) {
+			err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
+						 addr, skb);
+			if (err)
+				break;
+		}
+	else
+		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
+					    pktq);
+
+	return err;
+}
+
+int
+brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+		  u8 *data, uint size)
+{
+	int bcmerror = 0;
+	struct sk_buff *pkt;
+	u32 sdaddr;
+	uint dsize;
+
+	dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
+	pkt = dev_alloc_skb(dsize);
+	if (!pkt) {
+		brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
+		return -EIO;
+	}
+	pkt->priority = 0;
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	sdio_claim_host(sdiodev->func[1]);
+
+	/* Do the transfer(s) */
+	while (size) {
+		/* Set the backplane window to include the start address */
+		bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
+		if (bcmerror)
+			break;
+
+		brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
+			  write ? "write" : "read", dsize,
+			  sdaddr, address & SBSDIO_SBWINDOW_MASK);
+
+		sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
+		sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+		skb_put(pkt, dsize);
+		if (write)
+			memcpy(pkt->data, data, dsize);
+		bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
+					      sdaddr, pkt);
+		if (bcmerror) {
+			brcmf_err("membytes transfer failed\n");
+			break;
+		}
+		if (!write)
+			memcpy(data, pkt->data, dsize);
+		skb_trim(pkt, 0);
+
+		/* Adjust for next transfer (if any) */
+		size -= dsize;
+		if (size) {
+			data += dsize;
+			address += dsize;
+			sdaddr = 0;
+			dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+	}
+
+	dev_kfree_skb(pkt);
+
+	/* Return the window to backplane enumeration space for core access */
+	if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
+		brcmf_err("FAILED to set window back to 0x%x\n",
+			  sdiodev->sbwad);
+
+	sdio_release_host(sdiodev->func[1]);
+
+	return bcmerror;
+}
+
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+{
+	char t_func = (char)fn;
+	brcmf_dbg(SDIO, "Enter\n");
+
+	/* issue abort cmd52 command through F0 */
+	brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
+				 sizeof(t_func), &t_func, true);
+
+	brcmf_dbg(SDIO, "Exit\n");
+	return 0;
+}
+
+static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+{
+	uint nents;
+	int err;
+
+	if (!sdiodev->sg_support)
+		return;
+
+	nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
+	nents += (nents >> 4) + 1;
+
+	WARN_ON(nents > sdiodev->max_segment_count);
+
+	brcmf_dbg(TRACE, "nents=%d\n", nents);
+	err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
+	if (err < 0) {
+		brcmf_err("allocation failed: disable scatter-gather");
+		sdiodev->sg_support = false;
+	}
+
+	sdiodev->txglomsz = brcmf_sdiod_txglomsz;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+	sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
+	if (!sdiodev->freezer)
+		return -ENOMEM;
+	atomic_set(&sdiodev->freezer->thread_count, 0);
+	atomic_set(&sdiodev->freezer->freezing, 0);
+	init_waitqueue_head(&sdiodev->freezer->thread_freeze);
+	init_completion(&sdiodev->freezer->resumed);
+	return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+	if (sdiodev->freezer) {
+		WARN_ON(atomic_read(&sdiodev->freezer->freezing));
+		kfree(sdiodev->freezer);
+	}
+}
+
+static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
+{
+	atomic_t *expect = &sdiodev->freezer->thread_count;
+	int res = 0;
+
+	sdiodev->freezer->frozen_count = 0;
+	reinit_completion(&sdiodev->freezer->resumed);
+	atomic_set(&sdiodev->freezer->freezing, 1);
+	brcmf_sdio_trigger_dpc(sdiodev->bus);
+	wait_event(sdiodev->freezer->thread_freeze,
+		   atomic_read(expect) == sdiodev->freezer->frozen_count);
+	sdio_claim_host(sdiodev->func[1]);
+	res = brcmf_sdio_sleep(sdiodev->bus, true);
+	sdio_release_host(sdiodev->func[1]);
+	return res;
+}
+
+static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
+{
+	sdio_claim_host(sdiodev->func[1]);
+	brcmf_sdio_sleep(sdiodev->bus, false);
+	sdio_release_host(sdiodev->func[1]);
+	atomic_set(&sdiodev->freezer->freezing, 0);
+	complete_all(&sdiodev->freezer->resumed);
+}
+
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+	return atomic_read(&sdiodev->freezer->freezing);
+}
+
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+	if (!brcmf_sdiod_freezing(sdiodev))
+		return;
+	sdiodev->freezer->frozen_count++;
+	wake_up(&sdiodev->freezer->thread_freeze);
+	wait_for_completion(&sdiodev->freezer->resumed);
+}
+
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+	atomic_inc(&sdiodev->freezer->thread_count);
+}
+
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+	atomic_dec(&sdiodev->freezer->thread_count);
+}
+#else
+static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
+{
+	return 0;
+}
+
+static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
+{
+	sdiodev->state = BRCMF_SDIOD_DOWN;
+	if (sdiodev->bus) {
+		brcmf_sdio_remove(sdiodev->bus);
+		sdiodev->bus = NULL;
+	}
+
+	brcmf_sdiod_freezer_detach(sdiodev);
+
+	/* Disable Function 2 */
+	sdio_claim_host(sdiodev->func[2]);
+	sdio_disable_func(sdiodev->func[2]);
+	sdio_release_host(sdiodev->func[2]);
+
+	/* Disable Function 1 */
+	sdio_claim_host(sdiodev->func[1]);
+	sdio_disable_func(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func[1]);
+
+	sg_free_table(&sdiodev->sgtable);
+	sdiodev->sbwad = 0;
+
+	pm_runtime_allow(sdiodev->func[1]->card->host->parent);
+	return 0;
+}
+
+static void brcmf_sdiod_host_fixup(struct mmc_host *host)
+{
+	/* runtime-pm powers off the device */
+	pm_runtime_forbid(host->parent);
+	/* avoid removal detection upon resume */
+	host->caps |= MMC_CAP_NONREMOVABLE;
+}
+
+static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+{
+	struct sdio_func *func;
+	struct mmc_host *host;
+	uint max_blocks;
+	int ret = 0;
+
+	sdiodev->num_funcs = 2;
+
+	sdio_claim_host(sdiodev->func[1]);
+
+	ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+	if (ret) {
+		brcmf_err("Failed to set F1 blocksize\n");
+		sdio_release_host(sdiodev->func[1]);
+		goto out;
+	}
+	ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+	if (ret) {
+		brcmf_err("Failed to set F2 blocksize\n");
+		sdio_release_host(sdiodev->func[1]);
+		goto out;
+	}
+
+	/* increase F2 timeout */
+	sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+
+	/* Enable Function 1 */
+	ret = sdio_enable_func(sdiodev->func[1]);
+	sdio_release_host(sdiodev->func[1]);
+	if (ret) {
+		brcmf_err("Failed to enable F1: err=%d\n", ret);
+		goto out;
+	}
+
+	/*
+	 * determine host related variables after brcmf_sdiod_probe()
+	 * as func->cur_blksize is properly set and F2 init has been
+	 * completed successfully.
+	 */
+	func = sdiodev->func[2];
+	host = func->card->host;
+	sdiodev->sg_support = host->max_segs > 1;
+	max_blocks = min_t(uint, host->max_blk_count, 511u);
+	sdiodev->max_request_size = min_t(uint, host->max_req_size,
+					  max_blocks * func->cur_blksize);
+	sdiodev->max_segment_count = min_t(uint, host->max_segs,
+					   SG_MAX_SINGLE_ALLOC);
+	sdiodev->max_segment_size = host->max_seg_size;
+
+	/* allocate scatter-gather table. sg support
+	 * will be disabled upon allocation failure.
+	 */
+	brcmf_sdiod_sgtable_alloc(sdiodev);
+
+	ret = brcmf_sdiod_freezer_attach(sdiodev);
+	if (ret)
+		goto out;
+
+	/* try to attach to the target device */
+	sdiodev->bus = brcmf_sdio_probe(sdiodev);
+	if (!sdiodev->bus) {
+		ret = -ENODEV;
+		goto out;
+	}
+	brcmf_sdiod_host_fixup(host);
+out:
+	if (ret)
+		brcmf_sdiod_remove(sdiodev);
+
+	return ret;
+}
+
+#define BRCMF_SDIO_DEVICE(dev_id)	\
+	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
+	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
+	{ /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+
+
+static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+						  int val)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+	struct acpi_device *adev;
+
+	adev = ACPI_COMPANION(dev);
+	if (adev)
+		adev->flags.power_manageable = 0;
+#endif
+}
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+				const struct sdio_device_id *id)
+{
+	int err;
+	struct brcmf_sdio_dev *sdiodev;
+	struct brcmf_bus *bus_if;
+	struct device *dev;
+
+	brcmf_dbg(SDIO, "Enter\n");
+	brcmf_dbg(SDIO, "Class=%x\n", func->class);
+	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+
+	dev = &func->dev;
+	/* prohibit ACPI power management for this device */
+	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+
+	/* Consume func num 1 but dont do anything with it. */
+	if (func->num == 1)
+		return 0;
+
+	/* Ignore anything but func 2 */
+	if (func->num != 2)
+		return -ENODEV;
+
+	bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+	if (!bus_if)
+		return -ENOMEM;
+	sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+	if (!sdiodev) {
+		kfree(bus_if);
+		return -ENOMEM;
+	}
+
+	/* store refs to functions used. mmc_card does
+	 * not hold the F0 function pointer.
+	 */
+	sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
+	sdiodev->func[0]->num = 0;
+	sdiodev->func[1] = func->card->sdio_func[0];
+	sdiodev->func[2] = func;
+
+	sdiodev->bus_if = bus_if;
+	bus_if->bus_priv.sdio = sdiodev;
+	bus_if->proto_type = BRCMF_PROTO_BCDC;
+	dev_set_drvdata(&func->dev, bus_if);
+	dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+	sdiodev->dev = &sdiodev->func[1]->dev;
+	sdiodev->pdata = brcmfmac_sdio_pdata;
+
+	if (!sdiodev->pdata)
+		brcmf_of_probe(sdiodev);
+
+#ifdef CONFIG_PM_SLEEP
+	/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
+	 * is true or when platform data OOB irq is true).
+	 */
+	if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
+	    ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+	     (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
+		bus_if->wowl_supported = true;
+#endif
+
+	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
+
+	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+	err = brcmf_sdiod_probe(sdiodev);
+	if (err) {
+		brcmf_err("F2 error, probe failed %d...\n", err);
+		goto fail;
+	}
+
+	brcmf_dbg(SDIO, "F2 init completed...\n");
+	return 0;
+
+fail:
+	dev_set_drvdata(&func->dev, NULL);
+	dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+	kfree(sdiodev->func[0]);
+	kfree(sdiodev);
+	kfree(bus_if);
+	return err;
+}
+
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+{
+	struct brcmf_bus *bus_if;
+	struct brcmf_sdio_dev *sdiodev;
+
+	brcmf_dbg(SDIO, "Enter\n");
+	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
+	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+	brcmf_dbg(SDIO, "Function: %d\n", func->num);
+
+	if (func->num != 1)
+		return;
+
+	bus_if = dev_get_drvdata(&func->dev);
+	if (bus_if) {
+		sdiodev = bus_if->bus_priv.sdio;
+		brcmf_sdiod_remove(sdiodev);
+
+		dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+		dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+
+		kfree(bus_if);
+		kfree(sdiodev->func[0]);
+		kfree(sdiodev);
+	}
+
+	brcmf_dbg(SDIO, "Exit\n");
+}
+
+void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+	brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+	sdiodev->wowl_enabled = enabled;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_ops_sdio_suspend(struct device *dev)
+{
+	struct sdio_func *func;
+	struct brcmf_bus *bus_if;
+	struct brcmf_sdio_dev *sdiodev;
+	mmc_pm_flag_t sdio_flags;
+
+	func = container_of(dev, struct sdio_func, dev);
+	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+	if (func->num != SDIO_FUNC_1)
+		return 0;
+
+
+	bus_if = dev_get_drvdata(dev);
+	sdiodev = bus_if->bus_priv.sdio;
+
+	brcmf_sdiod_freezer_on(sdiodev);
+	brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
+	sdio_flags = MMC_PM_KEEP_POWER;
+	if (sdiodev->wowl_enabled) {
+		if (sdiodev->pdata->oob_irq_supported)
+			enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+		else
+			sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+	}
+	if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+		brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+	return 0;
+}
+
+static int brcmf_ops_sdio_resume(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct sdio_func *func = container_of(dev, struct sdio_func, dev);
+
+	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
+	if (func->num != SDIO_FUNC_2)
+		return 0;
+
+	brcmf_sdiod_freezer_off(sdiodev);
+	return 0;
+}
+
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+	.suspend	= brcmf_ops_sdio_suspend,
+	.resume		= brcmf_ops_sdio_resume,
+};
+#endif	/* CONFIG_PM_SLEEP */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+	.probe = brcmf_ops_sdio_probe,
+	.remove = brcmf_ops_sdio_remove,
+	.name = BRCMFMAC_SDIO_PDATA_NAME,
+	.id_table = brcmf_sdmmc_ids,
+	.drv = {
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &brcmf_sdio_pm_ops,
+#endif	/* CONFIG_PM_SLEEP */
+	},
+};
+
+static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
+{
+	brcmf_dbg(SDIO, "Enter\n");
+
+	brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
+
+	if (brcmfmac_sdio_pdata->power_on)
+		brcmfmac_sdio_pdata->power_on();
+
+	return 0;
+}
+
+static int brcmf_sdio_pd_remove(struct platform_device *pdev)
+{
+	brcmf_dbg(SDIO, "Enter\n");
+
+	if (brcmfmac_sdio_pdata->power_off)
+		brcmfmac_sdio_pdata->power_off();
+
+	sdio_unregister_driver(&brcmf_sdmmc_driver);
+
+	return 0;
+}
+
+static struct platform_driver brcmf_sdio_pd = {
+	.remove		= brcmf_sdio_pd_remove,
+	.driver		= {
+		.name	= BRCMFMAC_SDIO_PDATA_NAME,
+	}
+};
+
+void brcmf_sdio_register(void)
+{
+	int ret;
+
+	ret = sdio_register_driver(&brcmf_sdmmc_driver);
+	if (ret)
+		brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
+void brcmf_sdio_exit(void)
+{
+	brcmf_dbg(SDIO, "Enter\n");
+
+	if (brcmfmac_sdio_pdata)
+		platform_driver_unregister(&brcmf_sdio_pd);
+	else
+		sdio_unregister_driver(&brcmf_sdmmc_driver);
+}
+
+void __init brcmf_sdio_init(void)
+{
+	int ret;
+
+	brcmf_dbg(SDIO, "Enter\n");
+
+	ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
+	if (ret == -ENODEV)
+		brcmf_dbg(SDIO, "No platform data available.\n");
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
new file mode 100644
index 0000000..4e33f96
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include "core.h"
+#include "debug.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "btcoex.h"
+#include "p2p.h"
+#include "cfg80211.h"
+
+/* T1 start SCO/eSCO priority suppression */
+#define BRCMF_BTCOEX_OPPR_WIN_TIME   2000
+
+/* BT registers values during DHCP */
+#define BRCMF_BT_DHCP_REG50 0x8022
+#define BRCMF_BT_DHCP_REG51 0
+#define BRCMF_BT_DHCP_REG64 0
+#define BRCMF_BT_DHCP_REG65 0
+#define BRCMF_BT_DHCP_REG71 0
+#define BRCMF_BT_DHCP_REG66 0x2710
+#define BRCMF_BT_DHCP_REG41 0x33
+#define BRCMF_BT_DHCP_REG68 0x190
+
+/* number of samples for SCO detection */
+#define BRCMF_BT_SCO_SAMPLES 12
+
+/**
+* enum brcmf_btcoex_state - BT coex DHCP state machine states
+* @BRCMF_BT_DHCP_IDLE: DCHP is idle
+* @BRCMF_BT_DHCP_START: DHCP started, wait before
+*	boosting wifi priority
+* @BRCMF_BT_DHCP_OPPR_WIN: graceful DHCP opportunity ended,
+*	boost wifi priority
+* @BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT: wifi priority boost end,
+*	restore defaults
+*/
+enum brcmf_btcoex_state {
+	BRCMF_BT_DHCP_IDLE,
+	BRCMF_BT_DHCP_START,
+	BRCMF_BT_DHCP_OPPR_WIN,
+	BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/**
+ * struct brcmf_btcoex_info - BT coex related information
+ * @vif: interface for which request was done.
+ * @timer: timer for DHCP state machine
+ * @timeout: configured timeout.
+ * @timer_on:  DHCP timer active
+ * @dhcp_done: DHCP finished before T1/T2 timer expiration
+ * @bt_state: DHCP state machine state
+ * @work: DHCP state machine work
+ * @cfg: driver private data for cfg80211 interface
+ * @reg66: saved value of btc_params 66
+ * @reg41: saved value of btc_params 41
+ * @reg68: saved value of btc_params 68
+ * @saved_regs_part1: flag indicating regs 66,41,68
+ *	have been saved
+ * @reg51: saved value of btc_params 51
+ * @reg64: saved value of btc_params 64
+ * @reg65: saved value of btc_params 65
+ * @reg71: saved value of btc_params 71
+ * @saved_regs_part1: flag indicating regs 50,51,64,65,71
+ *	have been saved
+ */
+struct brcmf_btcoex_info {
+	struct brcmf_cfg80211_vif *vif;
+	struct timer_list timer;
+	u16 timeout;
+	bool timer_on;
+	bool dhcp_done;
+	enum brcmf_btcoex_state bt_state;
+	struct work_struct work;
+	struct brcmf_cfg80211_info *cfg;
+	u32 reg66;
+	u32 reg41;
+	u32 reg68;
+	bool saved_regs_part1;
+	u32 reg50;
+	u32 reg51;
+	u32 reg64;
+	u32 reg65;
+	u32 reg71;
+	bool saved_regs_part2;
+};
+
+/**
+ * brcmf_btcoex_params_write() - write btc_params firmware variable
+ * @ifp: interface
+ * @addr: btc_params register number
+ * @data: data to write
+ */
+static s32 brcmf_btcoex_params_write(struct brcmf_if *ifp, u32 addr, u32 data)
+{
+	struct {
+		__le32 addr;
+		__le32 data;
+	} reg_write;
+
+	reg_write.addr = cpu_to_le32(addr);
+	reg_write.data = cpu_to_le32(data);
+	return brcmf_fil_iovar_data_set(ifp, "btc_params",
+					&reg_write, sizeof(reg_write));
+}
+
+/**
+ * brcmf_btcoex_params_read() - read btc_params firmware variable
+ * @ifp: interface
+ * @addr: btc_params register number
+ * @data: read data
+ */
+static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data)
+{
+	*data = addr;
+
+	return brcmf_fil_iovar_int_get(ifp, "btc_params", data);
+}
+
+/**
+ * brcmf_btcoex_boost_wifi() - control BT SCO/eSCO parameters
+ * @btci: BT coex info
+ * @trump_sco:
+ *	true - set SCO/eSCO parameters for compatibility
+ *		during DHCP window
+ *	false - restore saved parameter values
+ *
+ * Enhanced BT COEX settings for eSCO compatibility during DHCP window
+ */
+static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
+				    bool trump_sco)
+{
+	struct brcmf_if *ifp = brcmf_get_ifp(btci->cfg->pub, 0);
+
+	if (trump_sco && !btci->saved_regs_part2) {
+		/* this should reduce eSCO agressive
+		 * retransmit w/o breaking it
+		 */
+
+		/* save current */
+		brcmf_dbg(INFO, "new SCO/eSCO coex algo {save & override}\n");
+		brcmf_btcoex_params_read(ifp, 50, &btci->reg50);
+		brcmf_btcoex_params_read(ifp, 51, &btci->reg51);
+		brcmf_btcoex_params_read(ifp, 64, &btci->reg64);
+		brcmf_btcoex_params_read(ifp, 65, &btci->reg65);
+		brcmf_btcoex_params_read(ifp, 71, &btci->reg71);
+
+		btci->saved_regs_part2 = true;
+		brcmf_dbg(INFO,
+			  "saved bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			  btci->reg50, btci->reg51, btci->reg64,
+			  btci->reg65, btci->reg71);
+
+		/* pacify the eSco   */
+		brcmf_btcoex_params_write(ifp, 50, BRCMF_BT_DHCP_REG50);
+		brcmf_btcoex_params_write(ifp, 51, BRCMF_BT_DHCP_REG51);
+		brcmf_btcoex_params_write(ifp, 64, BRCMF_BT_DHCP_REG64);
+		brcmf_btcoex_params_write(ifp, 65, BRCMF_BT_DHCP_REG65);
+		brcmf_btcoex_params_write(ifp, 71, BRCMF_BT_DHCP_REG71);
+
+	} else if (btci->saved_regs_part2) {
+		/* restore previously saved bt params */
+		brcmf_dbg(INFO, "Do new SCO/eSCO coex algo {restore}\n");
+		brcmf_btcoex_params_write(ifp, 50, btci->reg50);
+		brcmf_btcoex_params_write(ifp, 51, btci->reg51);
+		brcmf_btcoex_params_write(ifp, 64, btci->reg64);
+		brcmf_btcoex_params_write(ifp, 65, btci->reg65);
+		brcmf_btcoex_params_write(ifp, 71, btci->reg71);
+
+		brcmf_dbg(INFO,
+			  "restored bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			  btci->reg50, btci->reg51, btci->reg64,
+			  btci->reg65, btci->reg71);
+
+		btci->saved_regs_part2 = false;
+	} else {
+		brcmf_dbg(INFO, "attempted to restore not saved BTCOEX params\n");
+	}
+}
+
+/**
+ * brcmf_btcoex_is_sco_active() - check if SCO/eSCO is active
+ * @ifp: interface
+ *
+ * return: true if SCO/eSCO session is active
+ */
+static bool brcmf_btcoex_is_sco_active(struct brcmf_if *ifp)
+{
+	int ioc_res = 0;
+	bool res = false;
+	int sco_id_cnt = 0;
+	u32 param27;
+	int i;
+
+	for (i = 0; i < BRCMF_BT_SCO_SAMPLES; i++) {
+		ioc_res = brcmf_btcoex_params_read(ifp, 27, &param27);
+
+		if (ioc_res < 0) {
+			brcmf_err("ioc read btc params error\n");
+			break;
+		}
+
+		brcmf_dbg(INFO, "sample[%d], btc_params 27:%x\n", i, param27);
+
+		if ((param27 & 0x6) == 2) { /* count both sco & esco  */
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			brcmf_dbg(INFO,
+				  "sco/esco detected, pkt id_cnt:%d samples:%d\n",
+				  sco_id_cnt, i);
+			res = true;
+			break;
+		}
+	}
+	brcmf_dbg(TRACE, "exit: result=%d\n", res);
+	return res;
+}
+
+/**
+ * btcmf_btcoex_save_part1() - save first step parameters.
+ */
+static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
+{
+	struct brcmf_if *ifp = btci->vif->ifp;
+
+	if (!btci->saved_regs_part1) {
+		/* Retrieve and save original reg value */
+		brcmf_btcoex_params_read(ifp, 66, &btci->reg66);
+		brcmf_btcoex_params_read(ifp, 41, &btci->reg41);
+		brcmf_btcoex_params_read(ifp, 68, &btci->reg68);
+		btci->saved_regs_part1 = true;
+		brcmf_dbg(INFO,
+			  "saved btc_params regs (66,41,68) 0x%x 0x%x 0x%x\n",
+			  btci->reg66, btci->reg41,
+			  btci->reg68);
+	}
+}
+
+/**
+ * brcmf_btcoex_restore_part1() - restore first step parameters.
+ */
+static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
+{
+	struct brcmf_if *ifp;
+
+	if (btci->saved_regs_part1) {
+		btci->saved_regs_part1 = false;
+		ifp = btci->vif->ifp;
+		brcmf_btcoex_params_write(ifp, 66, btci->reg66);
+		brcmf_btcoex_params_write(ifp, 41, btci->reg41);
+		brcmf_btcoex_params_write(ifp, 68, btci->reg68);
+		brcmf_dbg(INFO,
+			  "restored btc_params regs {66,41,68} 0x%x 0x%x 0x%x\n",
+			  btci->reg66, btci->reg41,
+			  btci->reg68);
+	}
+}
+
+/**
+ * brcmf_btcoex_timerfunc() - BT coex timer callback
+ */
+static void brcmf_btcoex_timerfunc(ulong data)
+{
+	struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+	brcmf_dbg(TRACE, "enter\n");
+
+	bt_local->timer_on = false;
+	schedule_work(&bt_local->work);
+}
+
+/**
+ * brcmf_btcoex_handler() - BT coex state machine work handler
+ * @work: work
+ */
+static void brcmf_btcoex_handler(struct work_struct *work)
+{
+	struct brcmf_btcoex_info *btci;
+	btci = container_of(work, struct brcmf_btcoex_info, work);
+	if (btci->timer_on) {
+		btci->timer_on = false;
+		del_timer_sync(&btci->timer);
+	}
+
+	switch (btci->bt_state) {
+	case BRCMF_BT_DHCP_START:
+		/* DHCP started provide OPPORTUNITY window
+		   to get DHCP address
+		*/
+		brcmf_dbg(INFO, "DHCP started\n");
+		btci->bt_state = BRCMF_BT_DHCP_OPPR_WIN;
+		if (btci->timeout < BRCMF_BTCOEX_OPPR_WIN_TIME) {
+			mod_timer(&btci->timer, btci->timer.expires);
+		} else {
+			btci->timeout -= BRCMF_BTCOEX_OPPR_WIN_TIME;
+			mod_timer(&btci->timer,
+				  jiffies +
+				  msecs_to_jiffies(BRCMF_BTCOEX_OPPR_WIN_TIME));
+		}
+		btci->timer_on = true;
+		break;
+
+	case BRCMF_BT_DHCP_OPPR_WIN:
+		if (btci->dhcp_done) {
+			brcmf_dbg(INFO, "DHCP done before T1 expiration\n");
+			goto idle;
+		}
+
+		/* DHCP is not over yet, start lowering BT priority */
+		brcmf_dbg(INFO, "DHCP T1:%d expired\n",
+			  BRCMF_BTCOEX_OPPR_WIN_TIME);
+		brcmf_btcoex_boost_wifi(btci, true);
+
+		btci->bt_state = BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT;
+		mod_timer(&btci->timer,
+			  jiffies + msecs_to_jiffies(btci->timeout));
+		btci->timer_on = true;
+		break;
+
+	case BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT:
+		if (btci->dhcp_done)
+			brcmf_dbg(INFO, "DHCP done before T2 expiration\n");
+		else
+			brcmf_dbg(INFO, "DHCP T2:%d expired\n",
+				  BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT);
+
+		goto idle;
+
+	default:
+		brcmf_err("invalid state=%d !!!\n", btci->bt_state);
+		goto idle;
+	}
+
+	return;
+
+idle:
+	btci->bt_state = BRCMF_BT_DHCP_IDLE;
+	btci->timer_on = false;
+	brcmf_btcoex_boost_wifi(btci, false);
+	cfg80211_crit_proto_stopped(&btci->vif->wdev, GFP_KERNEL);
+	brcmf_btcoex_restore_part1(btci);
+	btci->vif = NULL;
+}
+
+/**
+ * brcmf_btcoex_attach() - initialize BT coex data
+ * @cfg: driver private cfg80211 data
+ *
+ * return: 0 on success
+ */
+int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_btcoex_info *btci = NULL;
+	brcmf_dbg(TRACE, "enter\n");
+
+	btci = kmalloc(sizeof(struct brcmf_btcoex_info), GFP_KERNEL);
+	if (!btci)
+		return -ENOMEM;
+
+	btci->bt_state = BRCMF_BT_DHCP_IDLE;
+
+	/* Set up timer for BT  */
+	btci->timer_on = false;
+	btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
+	init_timer(&btci->timer);
+	btci->timer.data = (ulong)btci;
+	btci->timer.function = brcmf_btcoex_timerfunc;
+	btci->cfg = cfg;
+	btci->saved_regs_part1 = false;
+	btci->saved_regs_part2 = false;
+
+	INIT_WORK(&btci->work, brcmf_btcoex_handler);
+
+	cfg->btcoex = btci;
+	return 0;
+}
+
+/**
+ * brcmf_btcoex_detach - clean BT coex data
+ * @cfg: driver private cfg80211 data
+ */
+void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg)
+{
+	brcmf_dbg(TRACE, "enter\n");
+
+	if (!cfg->btcoex)
+		return;
+
+	if (cfg->btcoex->timer_on) {
+		cfg->btcoex->timer_on = false;
+		del_timer_sync(&cfg->btcoex->timer);
+	}
+
+	cancel_work_sync(&cfg->btcoex->work);
+
+	brcmf_btcoex_boost_wifi(cfg->btcoex, false);
+	brcmf_btcoex_restore_part1(cfg->btcoex);
+
+	kfree(cfg->btcoex);
+	cfg->btcoex = NULL;
+}
+
+static void brcmf_btcoex_dhcp_start(struct brcmf_btcoex_info *btci)
+{
+	struct brcmf_if *ifp = btci->vif->ifp;
+
+	btcmf_btcoex_save_part1(btci);
+	/* set new regs values */
+	brcmf_btcoex_params_write(ifp, 66, BRCMF_BT_DHCP_REG66);
+	brcmf_btcoex_params_write(ifp, 41, BRCMF_BT_DHCP_REG41);
+	brcmf_btcoex_params_write(ifp, 68, BRCMF_BT_DHCP_REG68);
+	btci->dhcp_done = false;
+	btci->bt_state = BRCMF_BT_DHCP_START;
+	schedule_work(&btci->work);
+	brcmf_dbg(TRACE, "enable BT DHCP Timer\n");
+}
+
+static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info *btci)
+{
+	/* Stop any bt timer because DHCP session is done */
+	btci->dhcp_done = true;
+	if (btci->timer_on) {
+		brcmf_dbg(INFO, "disable BT DHCP Timer\n");
+		btci->timer_on = false;
+		del_timer_sync(&btci->timer);
+
+		/* schedule worker if transition to IDLE is needed */
+		if (btci->bt_state != BRCMF_BT_DHCP_IDLE) {
+			brcmf_dbg(INFO, "bt_state:%d\n",
+				  btci->bt_state);
+			schedule_work(&btci->work);
+		}
+	} else {
+		/* Restore original values */
+		brcmf_btcoex_restore_part1(btci);
+	}
+}
+
+/**
+ * brcmf_btcoex_set_mode - set BT coex mode
+ * @cfg: driver private cfg80211 data
+ * @mode: Wifi-Bluetooth coexistence mode
+ *
+ * return: 0 on success
+ */
+int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
+			  enum brcmf_btcoex_mode mode, u16 duration)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(vif->wdev.wiphy);
+	struct brcmf_btcoex_info *btci = cfg->btcoex;
+	struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
+
+	switch (mode) {
+	case BRCMF_BTCOEX_DISABLED:
+		brcmf_dbg(INFO, "DHCP session starts\n");
+		if (btci->bt_state != BRCMF_BT_DHCP_IDLE)
+			return -EBUSY;
+		/* Start BT timer only for SCO connection */
+		if (brcmf_btcoex_is_sco_active(ifp)) {
+			btci->timeout = duration;
+			btci->vif = vif;
+			brcmf_btcoex_dhcp_start(btci);
+		}
+		break;
+
+	case BRCMF_BTCOEX_ENABLED:
+		brcmf_dbg(INFO, "DHCP session ends\n");
+		if (btci->bt_state != BRCMF_BT_DHCP_IDLE &&
+		    vif == btci->vif) {
+			brcmf_btcoex_dhcp_end(btci);
+		}
+		break;
+	default:
+		brcmf_dbg(INFO, "Unknown mode, ignored\n");
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h
new file mode 100644
index 0000000..19647c6
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef WL_BTCOEX_H_
+#define WL_BTCOEX_H_
+
+enum brcmf_btcoex_mode {
+	BRCMF_BTCOEX_DISABLED,
+	BRCMF_BTCOEX_ENABLED
+};
+
+int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg);
+void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg);
+int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
+			  enum brcmf_btcoex_mode mode, u16 duration);
+
+#endif /* WL_BTCOEX_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
new file mode 100644
index 0000000..230cad7
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BRCMFMAC_BUS_H
+#define BRCMFMAC_BUS_H
+
+#include "debug.h"
+
+/* IDs of the 6 default common rings of msgbuf protocol */
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT	0
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT		1
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE	2
+#define BRCMF_D2H_MSGRING_TX_COMPLETE		3
+#define BRCMF_D2H_MSGRING_RX_COMPLETE		4
+
+#define BRCMF_NROF_H2D_COMMON_MSGRINGS		2
+#define BRCMF_NROF_D2H_COMMON_MSGRINGS		3
+#define BRCMF_NROF_COMMON_MSGRINGS	(BRCMF_NROF_H2D_COMMON_MSGRINGS + \
+					 BRCMF_NROF_D2H_COMMON_MSGRINGS)
+
+/* The level of bus communication with the dongle */
+enum brcmf_bus_state {
+	BRCMF_BUS_DOWN,		/* Not ready for frame transfers */
+	BRCMF_BUS_UP		/* Ready for frame transfers */
+};
+
+/* The level of bus communication with the dongle */
+enum brcmf_bus_protocol_type {
+	BRCMF_PROTO_BCDC,
+	BRCMF_PROTO_MSGBUF
+};
+
+struct brcmf_bus_dcmd {
+	char *name;
+	char *param;
+	int param_len;
+	struct list_head list;
+};
+
+/**
+ * struct brcmf_bus_ops - bus callback operations.
+ *
+ * @preinit: execute bus/device specific dongle init commands (optional).
+ * @init: prepare for communication with dongle.
+ * @stop: clear pending frames, disable data flow.
+ * @txdata: send a data frame to the dongle. When the data
+ *	has been transferred, the common driver must be
+ *	notified using brcmf_txcomplete(). The common
+ *	driver calls this function with interrupts
+ *	disabled.
+ * @txctl: transmit a control request message to dongle.
+ * @rxctl: receive a control response message from dongle.
+ * @gettxq: obtain a reference of bus transmit queue (optional).
+ * @wowl_config: specify if dongle is configured for wowl when going to suspend
+ * @get_ramsize: obtain size of device memory.
+ * @get_memdump: obtain device memory dump in provided buffer.
+ *
+ * This structure provides an abstract interface towards the
+ * bus specific driver. For control messages to common driver
+ * will assure there is only one active transaction. Unless
+ * indicated otherwise these callbacks are mandatory.
+ */
+struct brcmf_bus_ops {
+	int (*preinit)(struct device *dev);
+	void (*stop)(struct device *dev);
+	int (*txdata)(struct device *dev, struct sk_buff *skb);
+	int (*txctl)(struct device *dev, unsigned char *msg, uint len);
+	int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
+	struct pktq * (*gettxq)(struct device *dev);
+	void (*wowl_config)(struct device *dev, bool enabled);
+	size_t (*get_ramsize)(struct device *dev);
+	int (*get_memdump)(struct device *dev, void *data, size_t len);
+};
+
+
+/**
+ * struct brcmf_bus_msgbuf - bus ringbuf if in case of msgbuf.
+ *
+ * @commonrings: commonrings which are always there.
+ * @flowrings: commonrings which are dynamically created and destroyed for data.
+ * @rx_dataoffset: if set then all rx data has this this offset.
+ * @max_rxbufpost: maximum number of buffers to post for rx.
+ * @nrof_flowrings: number of flowrings.
+ */
+struct brcmf_bus_msgbuf {
+	struct brcmf_commonring *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
+	struct brcmf_commonring **flowrings;
+	u32 rx_dataoffset;
+	u32 max_rxbufpost;
+	u32 nrof_flowrings;
+};
+
+
+/**
+ * struct brcmf_bus - interface structure between common and bus layer
+ *
+ * @bus_priv: pointer to private bus device.
+ * @proto_type: protocol type, bcdc or msgbuf
+ * @dev: device pointer of bus device.
+ * @drvr: public driver information.
+ * @state: operational state of the bus interface.
+ * @maxctl: maximum size for rxctl request message.
+ * @tx_realloc: number of tx packets realloced for headroom.
+ * @dstats: dongle-based statistical data.
+ * @dcmd_list: bus/device specific dongle initialization commands.
+ * @chip: device identifier of the dongle chip.
+ * @wowl_supported: is wowl supported by bus driver.
+ * @chiprev: revision of the dongle chip.
+ */
+struct brcmf_bus {
+	union {
+		struct brcmf_sdio_dev *sdio;
+		struct brcmf_usbdev *usb;
+		struct brcmf_pciedev *pcie;
+	} bus_priv;
+	enum brcmf_bus_protocol_type proto_type;
+	struct device *dev;
+	struct brcmf_pub *drvr;
+	enum brcmf_bus_state state;
+	uint maxctl;
+	unsigned long tx_realloc;
+	u32 chip;
+	u32 chiprev;
+	bool always_use_fws_queue;
+	bool wowl_supported;
+
+	struct brcmf_bus_ops *ops;
+	struct brcmf_bus_msgbuf *msgbuf;
+};
+
+/*
+ * callback wrappers
+ */
+static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
+{
+	if (!bus->ops->preinit)
+		return 0;
+	return bus->ops->preinit(bus->dev);
+}
+
+static inline void brcmf_bus_stop(struct brcmf_bus *bus)
+{
+	bus->ops->stop(bus->dev);
+}
+
+static inline int brcmf_bus_txdata(struct brcmf_bus *bus, struct sk_buff *skb)
+{
+	return bus->ops->txdata(bus->dev, skb);
+}
+
+static inline
+int brcmf_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
+{
+	return bus->ops->txctl(bus->dev, msg, len);
+}
+
+static inline
+int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
+{
+	return bus->ops->rxctl(bus->dev, msg, len);
+}
+
+static inline
+struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
+{
+	if (!bus->ops->gettxq)
+		return ERR_PTR(-ENOENT);
+
+	return bus->ops->gettxq(bus->dev);
+}
+
+static inline
+void brcmf_bus_wowl_config(struct brcmf_bus *bus, bool enabled)
+{
+	if (bus->ops->wowl_config)
+		bus->ops->wowl_config(bus->dev, enabled);
+}
+
+static inline size_t brcmf_bus_get_ramsize(struct brcmf_bus *bus)
+{
+	if (!bus->ops->get_ramsize)
+		return 0;
+
+	return bus->ops->get_ramsize(bus->dev);
+}
+
+static inline
+int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
+{
+	if (!bus->ops->get_memdump)
+		return -EOPNOTSUPP;
+
+	return bus->ops->get_memdump(bus->dev, data, len);
+}
+
+/*
+ * interface functions from common layer
+ */
+
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+		      int prec);
+
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
+
+/* Indication from bus module regarding presence/insertion of dongle. */
+int brcmf_attach(struct device *dev);
+/* Indication from bus module regarding removal/absence of dongle */
+void brcmf_detach(struct device *dev);
+/* Indication from bus module that dongle should be reset */
+void brcmf_dev_reset(struct device *dev);
+/* Indication from bus module to change flow-control state */
+void brcmf_txflowblock(struct device *dev, bool state);
+
+/* Notify the bus has transferred the tx packet to firmware */
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
+
+/* Configure the "global" bus state used by upper layers */
+void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
+
+int brcmf_bus_start(struct device *dev);
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
+
+#ifdef CONFIG_BRCMFMAC_SDIO
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
+#endif
+#ifdef CONFIG_BRCMFMAC_USB
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
+#endif
+
+#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
new file mode 100644
index 0000000..83e5aa6
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -0,0 +1,6363 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include "core.h"
+#include "debug.h"
+#include "tracepoint.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "btcoex.h"
+#include "cfg80211.h"
+#include "feature.h"
+#include "fwil.h"
+#include "proto.h"
+#include "vendor.h"
+#include "bus.h"
+#include "common.h"
+
+#define BRCMF_SCAN_IE_LEN_MAX		2048
+#define BRCMF_PNO_VERSION		2
+#define BRCMF_PNO_TIME			30
+#define BRCMF_PNO_REPEAT		4
+#define BRCMF_PNO_FREQ_EXPO_MAX		3
+#define BRCMF_PNO_MAX_PFN_COUNT		16
+#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT	6
+#define BRCMF_PNO_HIDDEN_BIT		2
+#define BRCMF_PNO_WPA_AUTH_ANY		0xFFFFFFFF
+#define BRCMF_PNO_SCAN_COMPLETE		1
+#define BRCMF_PNO_SCAN_INCOMPLETE	0
+
+#define WPA_OUI				"\x00\x50\xF2"	/* WPA OUI */
+#define WPA_OUI_TYPE			1
+#define RSN_OUI				"\x00\x0F\xAC"	/* RSN OUI */
+#define	WME_OUI_TYPE			2
+#define WPS_OUI_TYPE			4
+
+#define VS_IE_FIXED_HDR_LEN		6
+#define WPA_IE_VERSION_LEN		2
+#define WPA_IE_MIN_OUI_LEN		4
+#define WPA_IE_SUITE_COUNT_LEN		2
+
+#define WPA_CIPHER_NONE			0	/* None */
+#define WPA_CIPHER_WEP_40		1	/* WEP (40-bit) */
+#define WPA_CIPHER_TKIP			2	/* TKIP: default for WPA */
+#define WPA_CIPHER_AES_CCM		4	/* AES (CCM) */
+#define WPA_CIPHER_WEP_104		5	/* WEP (104-bit) */
+
+#define RSN_AKM_NONE			0	/* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED		1	/* Over 802.1x */
+#define RSN_AKM_PSK			2	/* Pre-shared Key */
+#define RSN_CAP_LEN			2	/* Length of RSN capabilities */
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+
+#define VNDR_IE_CMD_LEN			4	/* length of the set command
+						 * string :"add", "del" (+ NUL)
+						 */
+#define VNDR_IE_COUNT_OFFSET		4
+#define VNDR_IE_PKTFLAG_OFFSET		8
+#define VNDR_IE_VSIE_OFFSET		12
+#define VNDR_IE_HDR_SIZE		12
+#define VNDR_IE_PARSE_LIMIT		5
+
+#define	DOT11_MGMT_HDR_LEN		24	/* d11 management header len */
+#define	DOT11_BCN_PRB_FIXED_LEN		12	/* beacon/probe fixed length */
+
+#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS	320
+#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS	400
+#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS	20
+
+#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
+	(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
+
+static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
+{
+	if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
+		brcmf_dbg(INFO, "device is not ready : status (%lu)\n",
+			  vif->sme_state);
+		return false;
+	}
+	return true;
+}
+
+#define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+	{                                                               \
+		.bitrate        = RATE_TO_BASE100KBPS(_rateid),     \
+		.hw_value       = (_rateid),                            \
+		.flags          = (_flags),                             \
+	}
+
+static struct ieee80211_rate __wl_rates[] = {
+	RATETAB_ENT(BRCM_RATE_1M, 0),
+	RATETAB_ENT(BRCM_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(BRCM_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(BRCM_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(BRCM_RATE_6M, 0),
+	RATETAB_ENT(BRCM_RATE_9M, 0),
+	RATETAB_ENT(BRCM_RATE_12M, 0),
+	RATETAB_ENT(BRCM_RATE_18M, 0),
+	RATETAB_ENT(BRCM_RATE_24M, 0),
+	RATETAB_ENT(BRCM_RATE_36M, 0),
+	RATETAB_ENT(BRCM_RATE_48M, 0),
+	RATETAB_ENT(BRCM_RATE_54M, 0),
+};
+
+#define wl_g_rates		(__wl_rates + 0)
+#define wl_g_rates_size		ARRAY_SIZE(__wl_rates)
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size		(wl_g_rates_size - 4)
+
+#define CHAN2G(_channel, _freq) {				\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= IEEE80211_CHAN_DISABLED,	\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel) {					\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= IEEE80211_CHAN_DISABLED,	\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
+	CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
+	CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
+	CHAN2G(13, 2472), CHAN2G(14, 2484)
+};
+
+static struct ieee80211_channel __wl_5ghz_channels[] = {
+	CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
+	CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
+	CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
+	CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
+	CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
+	CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
+};
+
+/* Band templates duplicated per wiphy. The channel info
+ * above is added to the band during setup.
+ */
+static const struct ieee80211_supported_band __wl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+	.bitrates = wl_g_rates,
+	.n_bitrates = wl_g_rates_size,
+};
+
+static const struct ieee80211_supported_band __wl_band_5ghz = {
+	.band = IEEE80211_BAND_5GHZ,
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size,
+};
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags
+ * NL80211_RRF_NO_IR for 5GHz channels (for * 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't * start p2p
+ * operations on 5GHz channels. All the changes in world regulatory
+ * domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcmf_regdom = {
+	.n_reg_rules = 4,
+	.alpha2 =  "99",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..11 */
+		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		/* If any */
+		/* IEEE 802.11 channel 14 - Only JP enables
+		 * this and for 802.11b only
+		 */
+		REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+		/* IEEE 802.11a, channel 36..64 */
+		REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
+		/* IEEE 802.11a, channel 100..165 */
+		REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
+};
+
+static const u32 __wl_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+	WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* Vendor specific ie. id = 221, oui and type defines exact ie */
+struct brcmf_vs_tlv {
+	u8 id;
+	u8 len;
+	u8 oui[3];
+	u8 oui_type;
+};
+
+struct parsed_vndr_ie_info {
+	u8 *ie_ptr;
+	u32 ie_len;	/* total length including id & length field */
+	struct brcmf_vs_tlv vndrie;
+};
+
+struct parsed_vndr_ies {
+	u32 count;
+	struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
+};
+
+static int brcmf_roamoff;
+module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
+MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
+
+
+static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
+			       struct cfg80211_chan_def *ch)
+{
+	struct brcmu_chan ch_inf;
+	s32 primary_offset;
+
+	brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
+		  ch->chan->center_freq, ch->center_freq1, ch->width);
+	ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
+	primary_offset = ch->center_freq1 - ch->chan->center_freq;
+	switch (ch->width) {
+	case NL80211_CHAN_WIDTH_20:
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		ch_inf.bw = BRCMU_CHAN_BW_20;
+		WARN_ON(primary_offset != 0);
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		ch_inf.bw = BRCMU_CHAN_BW_40;
+		if (primary_offset < 0)
+			ch_inf.sb = BRCMU_CHAN_SB_U;
+		else
+			ch_inf.sb = BRCMU_CHAN_SB_L;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_inf.bw = BRCMU_CHAN_BW_80;
+		if (primary_offset < 0) {
+			if (primary_offset < -CH_10MHZ_APART)
+				ch_inf.sb = BRCMU_CHAN_SB_UU;
+			else
+				ch_inf.sb = BRCMU_CHAN_SB_UL;
+		} else {
+			if (primary_offset > CH_10MHZ_APART)
+				ch_inf.sb = BRCMU_CHAN_SB_LL;
+			else
+				ch_inf.sb = BRCMU_CHAN_SB_LU;
+		}
+		break;
+	case NL80211_CHAN_WIDTH_80P80:
+	case NL80211_CHAN_WIDTH_160:
+	case NL80211_CHAN_WIDTH_5:
+	case NL80211_CHAN_WIDTH_10:
+	default:
+		WARN_ON_ONCE(1);
+	}
+	switch (ch->chan->band) {
+	case IEEE80211_BAND_2GHZ:
+		ch_inf.band = BRCMU_CHAN_BAND_2G;
+		break;
+	case IEEE80211_BAND_5GHZ:
+		ch_inf.band = BRCMU_CHAN_BAND_5G;
+		break;
+	case IEEE80211_BAND_60GHZ:
+	default:
+		WARN_ON_ONCE(1);
+	}
+	d11inf->encchspec(&ch_inf);
+
+	return ch_inf.chspec;
+}
+
+u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
+			struct ieee80211_channel *ch)
+{
+	struct brcmu_chan ch_inf;
+
+	ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq);
+	ch_inf.bw = BRCMU_CHAN_BW_20;
+	d11inf->encchspec(&ch_inf);
+
+	return ch_inf.chspec;
+}
+
+/* Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key)
+{
+	const struct brcmf_tlv *elt = buf;
+	int totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
+			return elt;
+
+		elt = (struct brcmf_tlv *)((u8 *)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+
+	return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
+		 const u8 *oui, u32 oui_len, u8 type)
+{
+	/* If the contents match the OUI and the type */
+	if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+	    !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+	    type == ie[TLV_BODY_OFF + oui_len]) {
+		return true;
+	}
+
+	if (tlvs == NULL)
+		return false;
+	/* point to the next ie */
+	ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+
+	return false;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpaie(const u8 *parse, u32 len)
+{
+	const struct brcmf_tlv *ie;
+
+	while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+		if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
+				     WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+			return (struct brcmf_vs_tlv *)ie;
+	}
+	return NULL;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpsie(const u8 *parse, u32 len)
+{
+	const struct brcmf_tlv *ie;
+
+	while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+		if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+				     WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE))
+			return (struct brcmf_vs_tlv *)ie;
+	}
+	return NULL;
+}
+
+static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
+				     struct brcmf_cfg80211_vif *vif,
+				     enum nl80211_iftype new_type)
+{
+	int iftype_num[NUM_NL80211_IFTYPES];
+	struct brcmf_cfg80211_vif *pos;
+
+	memset(&iftype_num[0], 0, sizeof(iftype_num));
+	list_for_each_entry(pos, &cfg->vif_list, list)
+		if (pos == vif)
+			iftype_num[new_type]++;
+		else
+			iftype_num[pos->wdev.iftype]++;
+
+	return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
+
+static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
+				  enum nl80211_iftype new_type)
+{
+	int iftype_num[NUM_NL80211_IFTYPES];
+	struct brcmf_cfg80211_vif *pos;
+
+	memset(&iftype_num[0], 0, sizeof(iftype_num));
+	list_for_each_entry(pos, &cfg->vif_list, list)
+		iftype_num[pos->wdev.iftype]++;
+
+	iftype_num[new_type]++;
+	return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
+
+static void convert_key_from_CPU(struct brcmf_wsec_key *key,
+				 struct brcmf_wsec_key_le *key_le)
+{
+	key_le->index = cpu_to_le32(key->index);
+	key_le->len = cpu_to_le32(key->len);
+	key_le->algo = cpu_to_le32(key->algo);
+	key_le->flags = cpu_to_le32(key->flags);
+	key_le->rxiv.hi = cpu_to_le32(key->rxiv.hi);
+	key_le->rxiv.lo = cpu_to_le16(key->rxiv.lo);
+	key_le->iv_initialized = cpu_to_le32(key->iv_initialized);
+	memcpy(key_le->data, key->data, sizeof(key->data));
+	memcpy(key_le->ea, key->ea, sizeof(key->ea));
+}
+
+static int
+send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
+{
+	int err;
+	struct brcmf_wsec_key_le key_le;
+
+	convert_key_from_CPU(key, &key_le);
+
+	brcmf_netdev_wait_pend8021x(ifp);
+
+	err = brcmf_fil_bsscfg_data_set(ifp, "wsec_key", &key_le,
+					sizeof(key_le));
+
+	if (err)
+		brcmf_err("wsec_key error (%d)\n", err);
+	return err;
+}
+
+static s32
+brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
+{
+	s32 err;
+	u32 mode;
+
+	if (enable)
+		mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY;
+	else
+		mode = 0;
+
+	/* Try to set and enable ARP offload feature, this may fail, then it  */
+	/* is simply not supported and err 0 will be returned                 */
+	err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode);
+	if (err) {
+		brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
+			  mode, err);
+		err = 0;
+	} else {
+		err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable);
+		if (err) {
+			brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n",
+				  enable, err);
+			err = 0;
+		} else
+			brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n",
+				  enable, mode);
+	}
+
+	return err;
+}
+
+static void
+brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
+{
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+	ifp = vif->ifp;
+
+	if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
+	    (wdev->iftype == NL80211_IFTYPE_AP) ||
+	    (wdev->iftype == NL80211_IFTYPE_P2P_GO))
+		brcmf_proto_configure_addr_mode(ifp->drvr, ifp->ifidx,
+						ADDR_DIRECT);
+	else
+		brcmf_proto_configure_addr_mode(ifp->drvr, ifp->ifidx,
+						ADDR_INDIRECT);
+}
+
+static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
+{
+	struct brcmf_mbss_ssid_le mbss_ssid_le;
+	int bsscfgidx;
+	int err;
+
+	memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
+	bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr);
+	if (bsscfgidx < 0)
+		return bsscfgidx;
+
+	mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
+	mbss_ssid_le.SSID_len = cpu_to_le32(5);
+	sprintf(mbss_ssid_le.SSID, "ssid%d" , bsscfgidx);
+
+	err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
+					sizeof(mbss_ssid_le));
+	if (err < 0)
+		brcmf_err("setting ssid failed %d\n", err);
+
+	return err;
+}
+
+/**
+ * brcmf_ap_add_vif() - create a new AP virtual interface for multiple BSS
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @flags: not used.
+ * @params: contains mac address for AP device.
+ */
+static
+struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
+				      u32 *flags, struct vif_params *params)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	struct brcmf_cfg80211_vif *vif;
+	int err;
+
+	if (brcmf_cfg80211_vif_event_armed(cfg))
+		return ERR_PTR(-EBUSY);
+
+	brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
+
+	vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false);
+	if (IS_ERR(vif))
+		return (struct wireless_dev *)vif;
+
+	brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+	err = brcmf_cfg80211_request_ap_if(ifp);
+	if (err) {
+		brcmf_cfg80211_arm_vif_event(cfg, NULL);
+		goto fail;
+	}
+
+	/* wait for firmware event */
+	err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+						    msecs_to_jiffies(1500));
+	brcmf_cfg80211_arm_vif_event(cfg, NULL);
+	if (!err) {
+		brcmf_err("timeout occurred\n");
+		err = -EIO;
+		goto fail;
+	}
+
+	/* interface created in firmware */
+	ifp = vif->ifp;
+	if (!ifp) {
+		brcmf_err("no if pointer provided\n");
+		err = -ENOENT;
+		goto fail;
+	}
+
+	strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+	err = brcmf_net_attach(ifp, true);
+	if (err) {
+		brcmf_err("Registering netdevice failed\n");
+		goto fail;
+	}
+
+	return &ifp->vif->wdev;
+
+fail:
+	brcmf_free_vif(vif);
+	return ERR_PTR(err);
+}
+
+static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
+{
+	enum nl80211_iftype iftype;
+
+	iftype = vif->wdev.iftype;
+	return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
+}
+
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
+{
+	return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
+}
+
+static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+						     const char *name,
+						     unsigned char name_assign_type,
+						     enum nl80211_iftype type,
+						     u32 *flags,
+						     struct vif_params *params)
+{
+	struct wireless_dev *wdev;
+	int err;
+
+	brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+	err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type);
+	if (err) {
+		brcmf_err("iface validation failed: err=%d\n", err);
+		return ERR_PTR(err);
+	}
+	switch (type) {
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_MESH_POINT:
+		return ERR_PTR(-EOPNOTSUPP);
+	case NL80211_IFTYPE_AP:
+		wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
+		if (!IS_ERR(wdev))
+			brcmf_cfg80211_update_proto_addr_mode(wdev);
+		return wdev;
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_P2P_DEVICE:
+		wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
+		if (!IS_ERR(wdev))
+			brcmf_cfg80211_update_proto_addr_mode(wdev);
+		return wdev;
+	case NL80211_IFTYPE_UNSPECIFIED:
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+}
+
+static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
+{
+	if (brcmf_feat_is_quirk_enabled(ifp, BRCMF_FEAT_QUIRK_NEED_MPC))
+		brcmf_set_mpc(ifp, mpc);
+}
+
+void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
+{
+	s32 err = 0;
+
+	if (check_vif_up(ifp->vif)) {
+		err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
+		if (err) {
+			brcmf_err("fail to set mpc\n");
+			return;
+		}
+		brcmf_dbg(INFO, "MPC : %d\n", mpc);
+	}
+}
+
+s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+				struct brcmf_if *ifp, bool aborted,
+				bool fw_abort)
+{
+	struct brcmf_scan_params_le params_le;
+	struct cfg80211_scan_request *scan_request;
+	s32 err = 0;
+
+	brcmf_dbg(SCAN, "Enter\n");
+
+	/* clear scan request, because the FW abort can cause a second call */
+	/* to this functon and might cause a double cfg80211_scan_done      */
+	scan_request = cfg->scan_request;
+	cfg->scan_request = NULL;
+
+	if (timer_pending(&cfg->escan_timeout))
+		del_timer_sync(&cfg->escan_timeout);
+
+	if (fw_abort) {
+		/* Do a scan abort to stop the driver's scan engine */
+		brcmf_dbg(SCAN, "ABORT scan in firmware\n");
+		memset(&params_le, 0, sizeof(params_le));
+		eth_broadcast_addr(params_le.bssid);
+		params_le.bss_type = DOT11_BSSTYPE_ANY;
+		params_le.scan_type = 0;
+		params_le.channel_num = cpu_to_le32(1);
+		params_le.nprobes = cpu_to_le32(1);
+		params_le.active_time = cpu_to_le32(-1);
+		params_le.passive_time = cpu_to_le32(-1);
+		params_le.home_time = cpu_to_le32(-1);
+		/* Scan is aborted by setting channel_list[0] to -1 */
+		params_le.channel_list[0] = cpu_to_le16(-1);
+		/* E-Scan (or anyother type) can be aborted by SCAN */
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+					     &params_le, sizeof(params_le));
+		if (err)
+			brcmf_err("Scan abort  failed\n");
+	}
+
+	brcmf_scan_config_mpc(ifp, 1);
+
+	/*
+	 * e-scan can be initiated by scheduled scan
+	 * which takes precedence.
+	 */
+	if (cfg->sched_escan) {
+		brcmf_dbg(SCAN, "scheduled scan completed\n");
+		cfg->sched_escan = false;
+		if (!aborted)
+			cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+	} else if (scan_request) {
+		brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+			  aborted ? "Aborted" : "Done");
+		cfg80211_scan_done(scan_request, aborted);
+	}
+	if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+		brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+
+	return err;
+}
+
+static
+int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = wdev->netdev;
+
+	/* vif event pending in firmware */
+	if (brcmf_cfg80211_vif_event_armed(cfg))
+		return -EBUSY;
+
+	if (ndev) {
+		if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
+		    cfg->escan_info.ifp == netdev_priv(ndev))
+			brcmf_notify_escan_complete(cfg, netdev_priv(ndev),
+						    true, true);
+
+		brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
+	}
+
+	switch (wdev->iftype) {
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_MESH_POINT:
+		return -EOPNOTSUPP;
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return brcmf_p2p_del_vif(wiphy, wdev);
+	case NL80211_IFTYPE_UNSPECIFIED:
+	default:
+		return -EINVAL;
+	}
+	return -EOPNOTSUPP;
+}
+
+static s32
+brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
+			 enum nl80211_iftype type, u32 *flags,
+			 struct vif_params *params)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_vif *vif = ifp->vif;
+	s32 infra = 0;
+	s32 ap = 0;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
+
+	/* WAR: There are a number of p2p interface related problems which
+	 * need to be handled initially (before doing the validate).
+	 * wpa_supplicant tends to do iface changes on p2p device/client/go
+	 * which are not always possible/allowed. However we need to return
+	 * OK otherwise the wpa_supplicant wont start. The situation differs
+	 * on configuration and setup (p2pon=1 module param). The first check
+	 * is to see if the request is a change to station for p2p iface.
+	 */
+	if ((type == NL80211_IFTYPE_STATION) &&
+	    ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
+	     (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) ||
+	     (vif->wdev.iftype == NL80211_IFTYPE_P2P_DEVICE))) {
+		brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
+		/* Now depending on whether module param p2pon=1 was used the
+		 * response needs to be either 0 or EOPNOTSUPP. The reason is
+		 * that if p2pon=1 is used, but a newer supplicant is used then
+		 * we should return an error, as this combination wont work.
+		 * In other situations 0 is returned and supplicant will start
+		 * normally. It will give a trace in cfg80211, but it is the
+		 * only way to get it working. Unfortunately this will result
+		 * in situation where we wont support new supplicant in
+		 * combination with module param p2pon=1, but that is the way
+		 * it is. If the user tries this then unloading of driver might
+		 * fail/lock.
+		 */
+		if (cfg->p2p.p2pdev_dynamically)
+			return -EOPNOTSUPP;
+		else
+			return 0;
+	}
+	err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
+	if (err) {
+		brcmf_err("iface validation failed: err=%d\n", err);
+		return err;
+	}
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+		brcmf_err("type (%d) : currently we do not support this type\n",
+			  type);
+		return -EOPNOTSUPP;
+	case NL80211_IFTYPE_ADHOC:
+		infra = 0;
+		break;
+	case NL80211_IFTYPE_STATION:
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		ap = 1;
+		break;
+	default:
+		err = -EINVAL;
+		goto done;
+	}
+
+	if (ap) {
+		if (type == NL80211_IFTYPE_P2P_GO) {
+			brcmf_dbg(INFO, "IF Type = P2P GO\n");
+			err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
+		}
+		if (!err) {
+			brcmf_dbg(INFO, "IF Type = AP\n");
+		}
+	} else {
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
+		if (err) {
+			brcmf_err("WLC_SET_INFRA error (%d)\n", err);
+			err = -EAGAIN;
+			goto done;
+		}
+		brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
+			  "Adhoc" : "Infra");
+	}
+	ndev->ieee80211_ptr->iftype = type;
+
+	brcmf_cfg80211_update_proto_addr_mode(&vif->wdev);
+
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+
+	return err;
+}
+
+static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+			     struct brcmf_scan_params_le *params_le,
+			     struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	s32 i;
+	s32 offset;
+	u16 chanspec;
+	char *ptr;
+	struct brcmf_ssid_le ssid_le;
+
+	eth_broadcast_addr(params_le->bssid);
+	params_le->bss_type = DOT11_BSSTYPE_ANY;
+	params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
+	params_le->channel_num = 0;
+	params_le->nprobes = cpu_to_le32(-1);
+	params_le->active_time = cpu_to_le32(-1);
+	params_le->passive_time = cpu_to_le32(-1);
+	params_le->home_time = cpu_to_le32(-1);
+	memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+		  n_channels);
+	if (n_channels > 0) {
+		for (i = 0; i < n_channels; i++) {
+			chanspec = channel_to_chanspec(&cfg->d11inf,
+						       request->channels[i]);
+			brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n",
+				  request->channels[i]->hw_value, chanspec);
+			params_le->channel_list[i] = cpu_to_le16(chanspec);
+		}
+	} else {
+		brcmf_dbg(SCAN, "Scanning all channels\n");
+	}
+	/* Copy ssid array if applicable */
+	brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
+	if (n_ssids > 0) {
+		offset = offsetof(struct brcmf_scan_params_le, channel_list) +
+				n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		ptr = (char *)params_le + offset;
+		for (i = 0; i < n_ssids; i++) {
+			memset(&ssid_le, 0, sizeof(ssid_le));
+			ssid_le.SSID_len =
+					cpu_to_le32(request->ssids[i].ssid_len);
+			memcpy(ssid_le.SSID, request->ssids[i].ssid,
+			       request->ssids[i].ssid_len);
+			if (!ssid_le.SSID_len)
+				brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
+			else
+				brcmf_dbg(SCAN, "%d: scan for  %s size =%d\n",
+					  i, ssid_le.SSID, ssid_le.SSID_len);
+			memcpy(ptr, &ssid_le, sizeof(ssid_le));
+			ptr += sizeof(ssid_le);
+		}
+	} else {
+		brcmf_dbg(SCAN, "Performing passive scan\n");
+		params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
+	}
+	/* Adding mask to channel numbers */
+	params_le->channel_num =
+		cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+			(n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
+		struct cfg80211_scan_request *request, u16 action)
+{
+	s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
+			  offsetof(struct brcmf_escan_params_le, params_le);
+	struct brcmf_escan_params_le *params;
+	s32 err = 0;
+
+	brcmf_dbg(SCAN, "E-SCAN START\n");
+
+	if (request != NULL) {
+		/* Allocate space for populating ssids in struct */
+		params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
+
+		/* Allocate space for populating ssids in struct */
+		params_size += sizeof(struct brcmf_ssid) * request->n_ssids;
+	}
+
+	params = kzalloc(params_size, GFP_KERNEL);
+	if (!params) {
+		err = -ENOMEM;
+		goto exit;
+	}
+	BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
+	brcmf_escan_prep(cfg, &params->params_le, request);
+	params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+	params->action = cpu_to_le16(action);
+	params->sync_id = cpu_to_le16(0x1234);
+
+	err = brcmf_fil_iovar_data_set(ifp, "escan", params, params_size);
+	if (err) {
+		if (err == -EBUSY)
+			brcmf_dbg(INFO, "system busy : escan canceled\n");
+		else
+			brcmf_err("error (%d)\n", err);
+	}
+
+	kfree(params);
+exit:
+	return err;
+}
+
+static s32
+brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
+	       struct brcmf_if *ifp, struct cfg80211_scan_request *request)
+{
+	s32 err;
+	u32 passive_scan;
+	struct brcmf_scan_results *results;
+	struct escan_info *escan = &cfg->escan_info;
+
+	brcmf_dbg(SCAN, "Enter\n");
+	escan->ifp = ifp;
+	escan->wiphy = wiphy;
+	escan->escan_state = WL_ESCAN_STATE_SCANNING;
+	passive_scan = cfg->active_scan ? 0 : 1;
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
+				    passive_scan);
+	if (err) {
+		brcmf_err("error (%d)\n", err);
+		return err;
+	}
+	brcmf_scan_config_mpc(ifp, 0);
+	results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
+
+	err = escan->run(cfg, ifp, request, WL_ESCAN_ACTION_START);
+	if (err)
+		brcmf_scan_config_mpc(ifp, 1);
+	return err;
+}
+
+static s32
+brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
+		     struct cfg80211_scan_request *request,
+		     struct cfg80211_ssid *this_ssid)
+{
+	struct brcmf_if *ifp = vif->ifp;
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct cfg80211_ssid *ssids;
+	struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int;
+	u32 passive_scan;
+	bool escan_req;
+	bool spec_scan;
+	s32 err;
+	u32 SSID_len;
+
+	brcmf_dbg(SCAN, "START ESCAN\n");
+
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+		brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
+		return -EAGAIN;
+	}
+	if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
+		brcmf_err("Scanning being aborted: status (%lu)\n",
+			  cfg->scan_status);
+		return -EAGAIN;
+	}
+	if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
+		brcmf_err("Scanning suppressed: status (%lu)\n",
+			  cfg->scan_status);
+		return -EAGAIN;
+	}
+	if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
+		brcmf_err("Connecting: status (%lu)\n", ifp->vif->sme_state);
+		return -EAGAIN;
+	}
+
+	/* If scan req comes for p2p0, send it over primary I/F */
+	if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+		vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
+	escan_req = false;
+	if (request) {
+		/* scan bss */
+		ssids = request->ssids;
+		escan_req = true;
+	} else {
+		/* scan in ibss */
+		/* we don't do escan in ibss */
+		ssids = this_ssid;
+	}
+
+	cfg->scan_request = request;
+	set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+	if (escan_req) {
+		cfg->escan_info.run = brcmf_run_escan;
+		err = brcmf_p2p_scan_prep(wiphy, request, vif);
+		if (err)
+			goto scan_out;
+
+		err = brcmf_do_escan(cfg, wiphy, vif->ifp, request);
+		if (err)
+			goto scan_out;
+	} else {
+		brcmf_dbg(SCAN, "ssid \"%s\", ssid_len (%d)\n",
+			  ssids->ssid, ssids->ssid_len);
+		memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
+		SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
+		sr->ssid_le.SSID_len = cpu_to_le32(0);
+		spec_scan = false;
+		if (SSID_len) {
+			memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
+			sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
+			spec_scan = true;
+		} else
+			brcmf_dbg(SCAN, "Broadcast scan\n");
+
+		passive_scan = cfg->active_scan ? 0 : 1;
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
+					    passive_scan);
+		if (err) {
+			brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
+			goto scan_out;
+		}
+		brcmf_scan_config_mpc(ifp, 0);
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+					     &sr->ssid_le, sizeof(sr->ssid_le));
+		if (err) {
+			if (err == -EBUSY)
+				brcmf_dbg(INFO, "BUSY: scan for \"%s\" canceled\n",
+					  sr->ssid_le.SSID);
+			else
+				brcmf_err("WLC_SCAN error (%d)\n", err);
+
+			brcmf_scan_config_mpc(ifp, 1);
+			goto scan_out;
+		}
+	}
+
+	/* Arm scan timeout timer */
+	mod_timer(&cfg->escan_timeout, jiffies +
+			WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
+	return 0;
+
+scan_out:
+	clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+	cfg->scan_request = NULL;
+	return err;
+}
+
+static s32
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+{
+	struct brcmf_cfg80211_vif *vif;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	vif = container_of(request->wdev, struct brcmf_cfg80211_vif, wdev);
+	if (!check_vif_up(vif))
+		return -EIO;
+
+	err = brcmf_cfg80211_escan(wiphy, vif, request, NULL);
+
+	if (err)
+		brcmf_err("scan error (%d)\n", err);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
+{
+	s32 err = 0;
+
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh",
+				      rts_threshold);
+	if (err)
+		brcmf_err("Error (%d)\n", err);
+
+	return err;
+}
+
+static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
+{
+	s32 err = 0;
+
+	err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh",
+				      frag_threshold);
+	if (err)
+		brcmf_err("Error (%d)\n", err);
+
+	return err;
+}
+
+static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
+{
+	s32 err = 0;
+	u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL);
+
+	err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry);
+	if (err) {
+		brcmf_err("cmd (%d) , error (%d)\n", cmd, err);
+		return err;
+	}
+	return err;
+}
+
+static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+	    (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+		cfg->conf->rts_threshold = wiphy->rts_threshold;
+		err = brcmf_set_rts(ndev, cfg->conf->rts_threshold);
+		if (!err)
+			goto done;
+	}
+	if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+	    (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+		cfg->conf->frag_threshold = wiphy->frag_threshold;
+		err = brcmf_set_frag(ndev, cfg->conf->frag_threshold);
+		if (!err)
+			goto done;
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG
+	    && (cfg->conf->retry_long != wiphy->retry_long)) {
+		cfg->conf->retry_long = wiphy->retry_long;
+		err = brcmf_set_retry(ndev, cfg->conf->retry_long, true);
+		if (!err)
+			goto done;
+	}
+	if (changed & WIPHY_PARAM_RETRY_SHORT
+	    && (cfg->conf->retry_short != wiphy->retry_short)) {
+		cfg->conf->retry_short = wiphy->retry_short;
+		err = brcmf_set_retry(ndev, cfg->conf->retry_short, false);
+		if (!err)
+			goto done;
+	}
+
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
+{
+	memset(prof, 0, sizeof(*prof));
+}
+
+static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
+{
+	u16 reason;
+
+	switch (e->event_code) {
+	case BRCMF_E_DEAUTH:
+	case BRCMF_E_DEAUTH_IND:
+	case BRCMF_E_DISASSOC_IND:
+		reason = e->reason;
+		break;
+	case BRCMF_E_LINK:
+	default:
+		reason = 0;
+		break;
+	}
+	return reason;
+}
+
+static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) {
+		brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
+		err = brcmf_fil_cmd_data_set(vif->ifp,
+					     BRCMF_C_DISASSOC, NULL, 0);
+		if (err) {
+			brcmf_err("WLC_DISASSOC failed (%d)\n", err);
+		}
+		clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
+		cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
+				      true, GFP_KERNEL);
+
+	}
+	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
+	clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
+	brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
+	brcmf_dbg(TRACE, "Exit\n");
+}
+
+static s32
+brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
+		      struct cfg80211_ibss_params *params)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct brcmf_join_params join_params;
+	size_t join_params_size = 0;
+	s32 err = 0;
+	s32 wsec = 0;
+	s32 bcnprd;
+	u16 chanspec;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	if (params->ssid)
+		brcmf_dbg(CONN, "SSID: %s\n", params->ssid);
+	else {
+		brcmf_dbg(CONN, "SSID: NULL, Not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+
+	if (params->bssid)
+		brcmf_dbg(CONN, "BSSID: %pM\n", params->bssid);
+	else
+		brcmf_dbg(CONN, "No BSSID specified\n");
+
+	if (params->chandef.chan)
+		brcmf_dbg(CONN, "channel: %d\n",
+			  params->chandef.chan->center_freq);
+	else
+		brcmf_dbg(CONN, "no channel specified\n");
+
+	if (params->channel_fixed)
+		brcmf_dbg(CONN, "fixed channel required\n");
+	else
+		brcmf_dbg(CONN, "no fixed channel required\n");
+
+	if (params->ie && params->ie_len)
+		brcmf_dbg(CONN, "ie len: %d\n", params->ie_len);
+	else
+		brcmf_dbg(CONN, "no ie specified\n");
+
+	if (params->beacon_interval)
+		brcmf_dbg(CONN, "beacon interval: %d\n",
+			  params->beacon_interval);
+	else
+		brcmf_dbg(CONN, "no beacon interval specified\n");
+
+	if (params->basic_rates)
+		brcmf_dbg(CONN, "basic rates: %08X\n", params->basic_rates);
+	else
+		brcmf_dbg(CONN, "no basic rates specified\n");
+
+	if (params->privacy)
+		brcmf_dbg(CONN, "privacy required\n");
+	else
+		brcmf_dbg(CONN, "no privacy required\n");
+
+	/* Configure Privacy for starter */
+	if (params->privacy)
+		wsec |= WEP_ENABLED;
+
+	err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec);
+	if (err) {
+		brcmf_err("wsec failed (%d)\n", err);
+		goto done;
+	}
+
+	/* Configure Beacon Interval for starter */
+	if (params->beacon_interval)
+		bcnprd = params->beacon_interval;
+	else
+		bcnprd = 100;
+
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, bcnprd);
+	if (err) {
+		brcmf_err("WLC_SET_BCNPRD failed (%d)\n", err);
+		goto done;
+	}
+
+	/* Configure required join parameter */
+	memset(&join_params, 0, sizeof(struct brcmf_join_params));
+
+	/* SSID */
+	profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32);
+	memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len);
+	memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len);
+	join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+	join_params_size = sizeof(join_params.ssid_le);
+
+	/* BSSID */
+	if (params->bssid) {
+		memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
+		join_params_size = sizeof(join_params.ssid_le) +
+				   BRCMF_ASSOC_PARAMS_FIXED_SIZE;
+		memcpy(profile->bssid, params->bssid, ETH_ALEN);
+	} else {
+		eth_broadcast_addr(join_params.params_le.bssid);
+		eth_zero_addr(profile->bssid);
+	}
+
+	/* Channel */
+	if (params->chandef.chan) {
+		u32 target_channel;
+
+		cfg->channel =
+			ieee80211_frequency_to_channel(
+				params->chandef.chan->center_freq);
+		if (params->channel_fixed) {
+			/* adding chanspec */
+			chanspec = chandef_to_chanspec(&cfg->d11inf,
+						       &params->chandef);
+			join_params.params_le.chanspec_list[0] =
+				cpu_to_le16(chanspec);
+			join_params.params_le.chanspec_num = cpu_to_le32(1);
+			join_params_size += sizeof(join_params.params_le);
+		}
+
+		/* set channel for starter */
+		target_channel = cfg->channel;
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL,
+					    target_channel);
+		if (err) {
+			brcmf_err("WLC_SET_CHANNEL failed (%d)\n", err);
+			goto done;
+		}
+	} else
+		cfg->channel = 0;
+
+	cfg->ibss_starter = false;
+
+
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+				     &join_params, join_params_size);
+	if (err) {
+		brcmf_err("WLC_SET_SSID failed (%d)\n", err);
+		goto done;
+	}
+
+done:
+	if (err)
+		clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING);
+
+	brcmf_dbg(TRACE, "Exit\n");
+
+	return 0;
+}
+
+static s32 brcmf_set_wpa_version(struct net_device *ndev,
+				 struct cfg80211_connect_params *sme)
+{
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+	struct brcmf_cfg80211_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+		val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+		val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+	else
+		val = WPA_AUTH_DISABLED;
+	brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
+	err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
+	if (err) {
+		brcmf_err("set wpa_auth failed (%d)\n", err);
+		return err;
+	}
+	sec = &profile->sec;
+	sec->wpa_versions = sme->crypto.wpa_versions;
+	return err;
+}
+
+static s32 brcmf_set_auth_type(struct net_device *ndev,
+			       struct cfg80211_connect_params *sme)
+{
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+	struct brcmf_cfg80211_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+
+	switch (sme->auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+		val = 0;
+		brcmf_dbg(CONN, "open system\n");
+		break;
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		val = 1;
+		brcmf_dbg(CONN, "shared key\n");
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		val = 2;
+		brcmf_dbg(CONN, "automatic\n");
+		break;
+	case NL80211_AUTHTYPE_NETWORK_EAP:
+		brcmf_dbg(CONN, "network eap\n");
+	default:
+		val = 2;
+		brcmf_err("invalid auth type (%d)\n", sme->auth_type);
+		break;
+	}
+
+	err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
+	if (err) {
+		brcmf_err("set auth failed (%d)\n", err);
+		return err;
+	}
+	sec = &profile->sec;
+	sec->auth_type = sme->auth_type;
+	return err;
+}
+
+static s32
+brcmf_set_wsec_mode(struct net_device *ndev,
+		     struct cfg80211_connect_params *sme, bool mfp)
+{
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+	struct brcmf_cfg80211_security *sec;
+	s32 pval = 0;
+	s32 gval = 0;
+	s32 wsec;
+	s32 err = 0;
+
+	if (sme->crypto.n_ciphers_pairwise) {
+		switch (sme->crypto.ciphers_pairwise[0]) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			pval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			pval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			pval = AES_ENABLED;
+			break;
+		default:
+			brcmf_err("invalid cipher pairwise (%d)\n",
+				  sme->crypto.ciphers_pairwise[0]);
+			return -EINVAL;
+		}
+	}
+	if (sme->crypto.cipher_group) {
+		switch (sme->crypto.cipher_group) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			gval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			gval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			gval = AES_ENABLED;
+			break;
+		default:
+			brcmf_err("invalid cipher group (%d)\n",
+				  sme->crypto.cipher_group);
+			return -EINVAL;
+		}
+	}
+
+	brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
+	/* In case of privacy, but no security and WPS then simulate */
+	/* setting AES. WPS-2.0 allows no security                   */
+	if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+	    sme->privacy)
+		pval = AES_ENABLED;
+
+	if (mfp)
+		wsec = pval | gval | MFP_CAPABLE;
+	else
+		wsec = pval | gval;
+	err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
+	if (err) {
+		brcmf_err("error (%d)\n", err);
+		return err;
+	}
+
+	sec = &profile->sec;
+	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+	sec->cipher_group = sme->crypto.cipher_group;
+
+	return err;
+}
+
+static s32
+brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
+{
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+	struct brcmf_cfg80211_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+
+	if (sme->crypto.n_akm_suites) {
+		err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+					       "wpa_auth", &val);
+		if (err) {
+			brcmf_err("could not get wpa_auth (%d)\n", err);
+			return err;
+		}
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA_AUTH_PSK;
+				break;
+			default:
+				brcmf_err("invalid cipher group (%d)\n",
+					  sme->crypto.cipher_group);
+				return -EINVAL;
+			}
+		} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+			default:
+				brcmf_err("invalid cipher group (%d)\n",
+					  sme->crypto.cipher_group);
+				return -EINVAL;
+			}
+		}
+
+		brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
+		err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev),
+					       "wpa_auth", val);
+		if (err) {
+			brcmf_err("could not set wpa_auth (%d)\n", err);
+			return err;
+		}
+	}
+	sec = &profile->sec;
+	sec->wpa_auth = sme->crypto.akm_suites[0];
+
+	return err;
+}
+
+static s32
+brcmf_set_sharedkey(struct net_device *ndev,
+		    struct cfg80211_connect_params *sme)
+{
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+	struct brcmf_cfg80211_security *sec;
+	struct brcmf_wsec_key key;
+	s32 val;
+	s32 err = 0;
+
+	brcmf_dbg(CONN, "key len (%d)\n", sme->key_len);
+
+	if (sme->key_len == 0)
+		return 0;
+
+	sec = &profile->sec;
+	brcmf_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n",
+		  sec->wpa_versions, sec->cipher_pairwise);
+
+	if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
+		return 0;
+
+	if (!(sec->cipher_pairwise &
+	    (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)))
+		return 0;
+
+	memset(&key, 0, sizeof(key));
+	key.len = (u32) sme->key_len;
+	key.index = (u32) sme->key_idx;
+	if (key.len > sizeof(key.data)) {
+		brcmf_err("Too long key length (%u)\n", key.len);
+		return -EINVAL;
+	}
+	memcpy(key.data, sme->key, key.len);
+	key.flags = BRCMF_PRIMARY_KEY;
+	switch (sec->cipher_pairwise) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key.algo = CRYPTO_ALGO_WEP1;
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key.algo = CRYPTO_ALGO_WEP128;
+		break;
+	default:
+		brcmf_err("Invalid algorithm (%d)\n",
+			  sme->crypto.ciphers_pairwise[0]);
+		return -EINVAL;
+	}
+	/* Set the new key/index */
+	brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n",
+		  key.len, key.index, key.algo);
+	brcmf_dbg(CONN, "key \"%s\"\n", key.data);
+	err = send_key_to_dongle(netdev_priv(ndev), &key);
+	if (err)
+		return err;
+
+	if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+		brcmf_dbg(CONN, "set auth_type to shared key\n");
+		val = WL_AUTH_SHARED_KEY;	/* shared key */
+		err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
+		if (err)
+			brcmf_err("set auth failed (%d)\n", err);
+	}
+	return err;
+}
+
+static
+enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
+					   enum nl80211_auth_type type)
+{
+	if (type == NL80211_AUTHTYPE_AUTOMATIC &&
+	    brcmf_feat_is_quirk_enabled(ifp, BRCMF_FEAT_QUIRK_AUTO_AUTH)) {
+		brcmf_dbg(CONN, "WAR: use OPEN instead of AUTO\n");
+		type = NL80211_AUTHTYPE_OPEN_SYSTEM;
+	}
+	return type;
+}
+
+static s32
+brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
+		       struct cfg80211_connect_params *sme)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct ieee80211_channel *chan = sme->channel;
+	struct brcmf_join_params join_params;
+	size_t join_params_size;
+	const struct brcmf_tlv *rsn_ie;
+	const struct brcmf_vs_tlv *wpa_ie;
+	const void *ie;
+	u32 ie_len;
+	struct brcmf_ext_join_params_le *ext_join_params;
+	u16 chanspec;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	if (!sme->ssid) {
+		brcmf_err("Invalid ssid\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
+		/* A normal (non P2P) connection request setup. */
+		ie = NULL;
+		ie_len = 0;
+		/* find the WPA_IE */
+		wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len);
+		if (wpa_ie) {
+			ie = wpa_ie;
+			ie_len = wpa_ie->len + TLV_HDR_LEN;
+		} else {
+			/* find the RSN_IE */
+			rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
+						  sme->ie_len,
+						  WLAN_EID_RSN);
+			if (rsn_ie) {
+				ie = rsn_ie;
+				ie_len = rsn_ie->len + TLV_HDR_LEN;
+			}
+		}
+		brcmf_fil_iovar_data_set(ifp, "wpaie", ie, ie_len);
+	}
+
+	err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+				    sme->ie, sme->ie_len);
+	if (err)
+		brcmf_err("Set Assoc REQ IE Failed\n");
+	else
+		brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
+	set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+
+	if (chan) {
+		cfg->channel =
+			ieee80211_frequency_to_channel(chan->center_freq);
+		chanspec = channel_to_chanspec(&cfg->d11inf, chan);
+		brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
+			  cfg->channel, chan->center_freq, chanspec);
+	} else {
+		cfg->channel = 0;
+		chanspec = 0;
+	}
+
+	brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
+
+	err = brcmf_set_wpa_version(ndev, sme);
+	if (err) {
+		brcmf_err("wl_set_wpa_version failed (%d)\n", err);
+		goto done;
+	}
+
+	sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
+	err = brcmf_set_auth_type(ndev, sme);
+	if (err) {
+		brcmf_err("wl_set_auth_type failed (%d)\n", err);
+		goto done;
+	}
+
+	err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
+	if (err) {
+		brcmf_err("wl_set_set_cipher failed (%d)\n", err);
+		goto done;
+	}
+
+	err = brcmf_set_key_mgmt(ndev, sme);
+	if (err) {
+		brcmf_err("wl_set_key_mgmt failed (%d)\n", err);
+		goto done;
+	}
+
+	err = brcmf_set_sharedkey(ndev, sme);
+	if (err) {
+		brcmf_err("brcmf_set_sharedkey failed (%d)\n", err);
+		goto done;
+	}
+
+	profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID),
+				       (u32)sme->ssid_len);
+	memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+	if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		profile->ssid.SSID[profile->ssid.SSID_len] = 0;
+		brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID,
+			  profile->ssid.SSID_len);
+	}
+
+	/* Join with specific BSSID and cached SSID
+	 * If SSID is zero join based on BSSID only
+	 */
+	join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) +
+		offsetof(struct brcmf_assoc_params_le, chanspec_list);
+	if (cfg->channel)
+		join_params_size += sizeof(u16);
+	ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+	if (ext_join_params == NULL) {
+		err = -ENOMEM;
+		goto done;
+	}
+	ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+	memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+	       profile->ssid.SSID_len);
+
+	/* Set up join scan parameters */
+	ext_join_params->scan_le.scan_type = -1;
+	ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+	if (sme->bssid)
+		memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
+	else
+		eth_broadcast_addr(ext_join_params->assoc_le.bssid);
+
+	if (cfg->channel) {
+		ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
+
+		ext_join_params->assoc_le.chanspec_list[0] =
+			cpu_to_le16(chanspec);
+		/* Increase dwell time to receive probe response or detect
+		 * beacon from target AP at a noisy air only during connect
+		 * command.
+		 */
+		ext_join_params->scan_le.active_time =
+			cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+		ext_join_params->scan_le.passive_time =
+			cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+		/* To sync with presence period of VSDB GO send probe request
+		 * more frequently. Probe request will be stopped when it gets
+		 * probe response from target AP/GO.
+		 */
+		ext_join_params->scan_le.nprobes =
+			cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+				    BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+	} else {
+		ext_join_params->scan_le.active_time = cpu_to_le32(-1);
+		ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
+		ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
+	}
+
+	err  = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+					 join_params_size);
+	kfree(ext_join_params);
+	if (!err)
+		/* This is it. join command worked, we are done */
+		goto done;
+
+	/* join command failed, fallback to set ssid */
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid_le);
+
+	memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
+	join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+
+	if (sme->bssid)
+		memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
+	else
+		eth_broadcast_addr(join_params.params_le.bssid);
+
+	if (cfg->channel) {
+		join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
+		join_params.params_le.chanspec_num = cpu_to_le32(1);
+		join_params_size += sizeof(join_params.params_le);
+	}
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+				     &join_params, join_params_size);
+	if (err)
+		brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err);
+
+done:
+	if (err)
+		clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
+		       u16 reason_code)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct brcmf_scb_val_le scbval;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter. Reason code = %d\n", reason_code);
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+	cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
+
+	memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
+	scbval.val = cpu_to_le32(reason_code);
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC,
+				     &scbval, sizeof(scbval));
+	if (err)
+		brcmf_err("error (%d)\n", err);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+			    enum nl80211_tx_power_setting type, s32 mbm)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+	s32 disable;
+	u32 qdbm = 127;
+
+	brcmf_dbg(TRACE, "Enter %d %d\n", type, mbm);
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	switch (type) {
+	case NL80211_TX_POWER_AUTOMATIC:
+		break;
+	case NL80211_TX_POWER_LIMITED:
+	case NL80211_TX_POWER_FIXED:
+		if (mbm < 0) {
+			brcmf_err("TX_POWER_FIXED - dbm is negative\n");
+			err = -EINVAL;
+			goto done;
+		}
+		qdbm =  MBM_TO_DBM(4 * mbm);
+		if (qdbm > 127)
+			qdbm = 127;
+		qdbm |= WL_TXPWR_OVERRIDE;
+		break;
+	default:
+		brcmf_err("Unsupported type %d\n", type);
+		err = -EINVAL;
+		goto done;
+	}
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = WL_RADIO_SW_DISABLE << 16;
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable);
+	if (err)
+		brcmf_err("WLC_SET_RADIO error (%d)\n", err);
+
+	err = brcmf_fil_iovar_int_set(ifp, "qtxpower", qdbm);
+	if (err)
+		brcmf_err("qtxpower error (%d)\n", err);
+
+done:
+	brcmf_dbg(TRACE, "Exit %d (qdbm)\n", qdbm & ~WL_TXPWR_OVERRIDE);
+	return err;
+}
+
+static s32
+brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+			    s32 *dbm)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 qdbm = 0;
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &qdbm);
+	if (err) {
+		brcmf_err("error (%d)\n", err);
+		goto done;
+	}
+	*dbm = (qdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+done:
+	brcmf_dbg(TRACE, "Exit (0x%x %d)\n", qdbm, *dbm);
+	return err;
+}
+
+static s32
+brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
+				  u8 key_idx, bool unicast, bool multicast)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	u32 index;
+	u32 wsec;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
+	if (err) {
+		brcmf_err("WLC_GET_WSEC error (%d)\n", err);
+		goto done;
+	}
+
+	if (wsec & WEP_ENABLED) {
+		/* Just select a new current key */
+		index = key_idx;
+		err = brcmf_fil_cmd_int_set(ifp,
+					    BRCMF_C_SET_KEY_PRIMARY, index);
+		if (err)
+			brcmf_err("error (%d)\n", err);
+	}
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
+	      u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_wsec_key key;
+	s32 err = 0;
+	u8 keybuf[8];
+
+	memset(&key, 0, sizeof(key));
+	key.index = (u32) key_idx;
+	/* Instead of bcast for ea address for default wep keys,
+		 driver needs it to be Null */
+	if (!is_multicast_ether_addr(mac_addr))
+		memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
+	key.len = (u32) params->key_len;
+	/* check for key index change */
+	if (key.len == 0) {
+		/* key delete */
+		err = send_key_to_dongle(ifp, &key);
+		if (err)
+			brcmf_err("key delete error (%d)\n", err);
+	} else {
+		if (key.len > sizeof(key.data)) {
+			brcmf_err("Invalid key length (%d)\n", key.len);
+			return -EINVAL;
+		}
+
+		brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
+		memcpy(key.data, params->key, key.len);
+
+		if (!brcmf_is_apmode(ifp->vif) &&
+		    (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+			brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+			memcpy(keybuf, &key.data[24], sizeof(keybuf));
+			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+			memcpy(&key.data[16], keybuf, sizeof(keybuf));
+		}
+
+		/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+		if (params->seq && params->seq_len == 6) {
+			/* rx iv */
+			u8 *ivptr;
+			ivptr = (u8 *) params->seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+			    (ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = true;
+		}
+
+		switch (params->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+			key.algo = CRYPTO_ALGO_WEP1;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
+			break;
+		case WLAN_CIPHER_SUITE_WEP104:
+			key.algo = CRYPTO_ALGO_WEP128;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			key.algo = CRYPTO_ALGO_TKIP;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
+			break;
+		default:
+			brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
+			return -EINVAL;
+		}
+		err = send_key_to_dongle(ifp, &key);
+		if (err)
+			brcmf_err("wsec_key error (%d)\n", err);
+	}
+	return err;
+}
+
+static s32
+brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr,
+		    struct key_params *params)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_wsec_key *key;
+	s32 val;
+	s32 wsec;
+	s32 err = 0;
+	u8 keybuf[8];
+
+	brcmf_dbg(TRACE, "Enter\n");
+	brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
+		/* we ignore this key index in this case */
+		brcmf_err("invalid key index (%d)\n", key_idx);
+		return -EINVAL;
+	}
+
+	if (mac_addr &&
+		(params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+		(params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
+		brcmf_dbg(TRACE, "Exit");
+		return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
+	}
+
+	key = &ifp->vif->profile.key[key_idx];
+	memset(key, 0, sizeof(*key));
+
+	if (params->key_len > sizeof(key->data)) {
+		brcmf_err("Too long key length (%u)\n", params->key_len);
+		err = -EINVAL;
+		goto done;
+	}
+	key->len = params->key_len;
+	key->index = key_idx;
+
+	memcpy(key->data, params->key, key->len);
+
+	key->flags = BRCMF_PRIMARY_KEY;
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key->algo = CRYPTO_ALGO_WEP1;
+		val = WEP_ENABLED;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key->algo = CRYPTO_ALGO_WEP128;
+		val = WEP_ENABLED;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		if (!brcmf_is_apmode(ifp->vif)) {
+			brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+			memcpy(keybuf, &key->data[24], sizeof(keybuf));
+			memcpy(&key->data[24], &key->data[16], sizeof(keybuf));
+			memcpy(&key->data[16], keybuf, sizeof(keybuf));
+		}
+		key->algo = CRYPTO_ALGO_TKIP;
+		val = TKIP_ENABLED;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key->algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
+		break;
+	default:
+		brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
+		err = -EINVAL;
+		goto done;
+	}
+
+	err = send_key_to_dongle(ifp, key);
+	if (err)
+		goto done;
+
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
+	if (err) {
+		brcmf_err("get wsec error (%d)\n", err);
+		goto done;
+	}
+	wsec |= val;
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
+	if (err) {
+		brcmf_err("set wsec error (%d)\n", err);
+		goto done;
+	}
+
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_wsec_key key;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
+		/* we ignore this key index in this case */
+		return -EINVAL;
+	}
+
+	memset(&key, 0, sizeof(key));
+
+	key.index = (u32) key_idx;
+	key.flags = BRCMF_PRIMARY_KEY;
+	key.algo = CRYPTO_ALGO_OFF;
+
+	brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+
+	/* Set the new key/index */
+	err = send_key_to_dongle(ifp, &key);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+		    void (*callback) (void *cookie, struct key_params * params))
+{
+	struct key_params params;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct brcmf_cfg80211_security *sec;
+	s32 wsec;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	memset(&params, 0, sizeof(params));
+
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
+	if (err) {
+		brcmf_err("WLC_GET_WSEC error (%d)\n", err);
+		/* Ignore this error, may happen during DISASSOC */
+		err = -EAGAIN;
+		goto done;
+	}
+	if (wsec & WEP_ENABLED) {
+		sec = &profile->sec;
+		if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+			params.cipher = WLAN_CIPHER_SUITE_WEP40;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
+		} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+			params.cipher = WLAN_CIPHER_SUITE_WEP104;
+			brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
+		}
+	} else if (wsec & TKIP_ENABLED) {
+		params.cipher = WLAN_CIPHER_SUITE_TKIP;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
+	} else if (wsec & AES_ENABLED) {
+		params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
+	} else  {
+		brcmf_err("Invalid algo (0x%x)\n", wsec);
+		err = -EINVAL;
+		goto done;
+	}
+	callback(cookie, &params);
+
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+				    struct net_device *ndev, u8 key_idx)
+{
+	brcmf_dbg(INFO, "Not supported\n");
+
+	return -EOPNOTSUPP;
+}
+
+static void
+brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp)
+{
+	s32 err;
+	u8 key_idx;
+	struct brcmf_wsec_key *key;
+	s32 wsec;
+
+	for (key_idx = 0; key_idx < BRCMF_MAX_DEFAULT_KEYS; key_idx++) {
+		key = &ifp->vif->profile.key[key_idx];
+		if ((key->algo == CRYPTO_ALGO_WEP1) ||
+		    (key->algo == CRYPTO_ALGO_WEP128))
+			break;
+	}
+	if (key_idx == BRCMF_MAX_DEFAULT_KEYS)
+		return;
+
+	err = send_key_to_dongle(ifp, key);
+	if (err) {
+		brcmf_err("Setting WEP key failed (%d)\n", err);
+		return;
+	}
+	err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
+	if (err) {
+		brcmf_err("get wsec error (%d)\n", err);
+		return;
+	}
+	wsec |= WEP_ENABLED;
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
+	if (err)
+		brcmf_err("set wsec error (%d)\n", err);
+}
+
+static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si)
+{
+	struct nl80211_sta_flag_update *sfu;
+
+	brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags);
+	si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+	sfu = &si->sta_flags;
+	sfu->mask = BIT(NL80211_STA_FLAG_WME) |
+		    BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+		    BIT(NL80211_STA_FLAG_ASSOCIATED) |
+		    BIT(NL80211_STA_FLAG_AUTHORIZED);
+	if (fw_sta_flags & BRCMF_STA_WME)
+		sfu->set |= BIT(NL80211_STA_FLAG_WME);
+	if (fw_sta_flags & BRCMF_STA_AUTHE)
+		sfu->set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+	if (fw_sta_flags & BRCMF_STA_ASSOC)
+		sfu->set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
+	if (fw_sta_flags & BRCMF_STA_AUTHO)
+		sfu->set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+}
+
+static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
+{
+	struct {
+		__le32 len;
+		struct brcmf_bss_info_le bss_le;
+	} *buf;
+	u16 capability;
+	int err;
+
+	buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	buf->len = cpu_to_le32(WL_BSS_INFO_MAX);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, buf,
+				     WL_BSS_INFO_MAX);
+	if (err) {
+		brcmf_err("Failed to get bss info (%d)\n", err);
+		goto out_kfree;
+	}
+	si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
+	si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
+	si->bss_param.dtim_period = buf->bss_le.dtim_period;
+	capability = le16_to_cpu(buf->bss_le.capability);
+	if (capability & IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT)
+		si->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
+	if (capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+		si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
+	if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+		si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+
+out_kfree:
+	kfree(buf);
+}
+
+static s32
+brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
+			   const u8 *mac, struct station_info *sinfo)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err = 0;
+	struct brcmf_sta_info_le sta_info_le;
+	u32 sta_flags;
+	u32 is_tdls_peer;
+	s32 total_rssi;
+	s32 count_rssi;
+	u32 i;
+
+	brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	memset(&sta_info_le, 0, sizeof(sta_info_le));
+	memcpy(&sta_info_le, mac, ETH_ALEN);
+	err = brcmf_fil_iovar_data_get(ifp, "tdls_sta_info",
+				       &sta_info_le,
+				       sizeof(sta_info_le));
+	is_tdls_peer = !err;
+	if (err) {
+		err = brcmf_fil_iovar_data_get(ifp, "sta_info",
+					       &sta_info_le,
+					       sizeof(sta_info_le));
+		if (err < 0) {
+			brcmf_err("GET STA INFO failed, %d\n", err);
+			goto done;
+		}
+	}
+	brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver));
+	sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
+	sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
+	sta_flags = le32_to_cpu(sta_info_le.flags);
+	brcmf_convert_sta_flags(sta_flags, sinfo);
+	sinfo->sta_flags.mask |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+	if (is_tdls_peer)
+		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+	else
+		sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+	if (sta_flags & BRCMF_STA_ASSOC) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+		sinfo->connected_time = le32_to_cpu(sta_info_le.in);
+		brcmf_fill_bss_param(ifp, sinfo);
+	}
+	if (sta_flags & BRCMF_STA_SCBSTATS) {
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+		sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures);
+		sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+		sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts);
+		sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts);
+		sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+		sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts);
+		sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
+		if (sinfo->tx_packets) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+			sinfo->txrate.legacy =
+				le32_to_cpu(sta_info_le.tx_rate) / 100;
+		}
+		if (sinfo->rx_packets) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+			sinfo->rxrate.legacy =
+				le32_to_cpu(sta_info_le.rx_rate) / 100;
+		}
+		if (le16_to_cpu(sta_info_le.ver) >= 4) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+			sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes);
+			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+			sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
+		}
+		total_rssi = 0;
+		count_rssi = 0;
+		for (i = 0; i < BRCMF_ANT_MAX; i++) {
+			if (sta_info_le.rssi[i]) {
+				sinfo->chain_signal_avg[count_rssi] =
+					sta_info_le.rssi[i];
+				sinfo->chain_signal[count_rssi] =
+					sta_info_le.rssi[i];
+				total_rssi += sta_info_le.rssi[i];
+				count_rssi++;
+			}
+		}
+		if (count_rssi) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+			sinfo->chains = count_rssi;
+
+			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+			total_rssi /= count_rssi;
+			sinfo->signal = total_rssi;
+		}
+	}
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static int
+brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+			    int idx, u8 *mac, struct station_info *sinfo)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter, idx %d\n", idx);
+
+	if (idx == 0) {
+		cfg->assoclist.count = cpu_to_le32(BRCMF_MAX_ASSOCLIST);
+		err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_ASSOCLIST,
+					     &cfg->assoclist,
+					     sizeof(cfg->assoclist));
+		if (err) {
+			brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
+				  err);
+			cfg->assoclist.count = 0;
+			return -EOPNOTSUPP;
+		}
+	}
+	if (idx < le32_to_cpu(cfg->assoclist.count)) {
+		memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN);
+		return brcmf_cfg80211_get_station(wiphy, ndev, mac, sinfo);
+	}
+	return -ENOENT;
+}
+
+static s32
+brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
+			   bool enabled, s32 timeout)
+{
+	s32 pm;
+	s32 err = 0;
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/*
+	 * Powersave enable/disable request is coming from the
+	 * cfg80211 even before the interface is up. In that
+	 * scenario, driver will be storing the power save
+	 * preference in cfg struct to apply this to
+	 * FW later while initializing the dongle
+	 */
+	cfg->pwr_save = enabled;
+	if (!check_vif_up(ifp->vif)) {
+
+		brcmf_dbg(INFO, "Device is not ready, storing the value in cfg_info struct\n");
+		goto done;
+	}
+
+	pm = enabled ? PM_FAST : PM_OFF;
+	/* Do not enable the power save after assoc if it is a p2p interface */
+	if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) {
+		brcmf_dbg(INFO, "Do not enable power save for P2P clients\n");
+		pm = PM_OFF;
+	}
+	brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled"));
+
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
+	if (err) {
+		if (err == -ENODEV)
+			brcmf_err("net_device is not ready yet\n");
+		else
+			brcmf_err("error (%d)\n", err);
+	}
+done:
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
+				   struct brcmf_bss_info_le *bi)
+{
+	struct wiphy *wiphy = cfg_to_wiphy(cfg);
+	struct ieee80211_channel *notify_channel;
+	struct cfg80211_bss *bss;
+	struct ieee80211_supported_band *band;
+	struct brcmu_chan ch;
+	u16 channel;
+	u32 freq;
+	u16 notify_capability;
+	u16 notify_interval;
+	u8 *notify_ie;
+	size_t notify_ielen;
+	s32 notify_signal;
+
+	if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
+		brcmf_err("Bss info is larger than buffer. Discarding\n");
+		return 0;
+	}
+
+	if (!bi->ctl_ch) {
+		ch.chspec = le16_to_cpu(bi->chanspec);
+		cfg->d11inf.decchspec(&ch);
+		bi->ctl_ch = ch.chnum;
+	}
+	channel = bi->ctl_ch;
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+
+	notify_capability = le16_to_cpu(bi->capability);
+	notify_interval = le16_to_cpu(bi->beacon_period);
+	notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
+	notify_ielen = le32_to_cpu(bi->ie_length);
+	notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+
+	brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
+	brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
+	brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
+	brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
+	brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
+
+	bss = cfg80211_inform_bss(wiphy, notify_channel,
+				  CFG80211_BSS_FTYPE_UNKNOWN,
+				  (const u8 *)bi->BSSID,
+				  0, notify_capability,
+				  notify_interval, notify_ie,
+				  notify_ielen, notify_signal,
+				  GFP_KERNEL);
+
+	if (!bss)
+		return -ENOMEM;
+
+	cfg80211_put_bss(wiphy, bss);
+
+	return 0;
+}
+
+static struct brcmf_bss_info_le *
+next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
+{
+	if (bss == NULL)
+		return list->bss_info_le;
+	return (struct brcmf_bss_info_le *)((unsigned long)bss +
+					    le32_to_cpu(bss->length));
+}
+
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_scan_results *bss_list;
+	struct brcmf_bss_info_le *bi = NULL;	/* must be initialized */
+	s32 err = 0;
+	int i;
+
+	bss_list = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
+	if (bss_list->count != 0 &&
+	    bss_list->version != BRCMF_BSS_INFO_VERSION) {
+		brcmf_err("Version %d != WL_BSS_INFO_VERSION\n",
+			  bss_list->version);
+		return -EOPNOTSUPP;
+	}
+	brcmf_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count);
+	for (i = 0; i < bss_list->count; i++) {
+		bi = next_bss_le(bss_list, bi);
+		err = brcmf_inform_single_bss(cfg, bi);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
+			  struct net_device *ndev, const u8 *bssid)
+{
+	struct wiphy *wiphy = cfg_to_wiphy(cfg);
+	struct ieee80211_channel *notify_channel;
+	struct brcmf_bss_info_le *bi = NULL;
+	struct ieee80211_supported_band *band;
+	struct cfg80211_bss *bss;
+	struct brcmu_chan ch;
+	u8 *buf = NULL;
+	s32 err = 0;
+	u32 freq;
+	u16 notify_capability;
+	u16 notify_interval;
+	u8 *notify_ie;
+	size_t notify_ielen;
+	s32 notify_signal;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+	if (buf == NULL) {
+		err = -ENOMEM;
+		goto CleanUp;
+	}
+
+	*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
+
+	err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO,
+				     buf, WL_BSS_INFO_MAX);
+	if (err) {
+		brcmf_err("WLC_GET_BSS_INFO failed: %d\n", err);
+		goto CleanUp;
+	}
+
+	bi = (struct brcmf_bss_info_le *)(buf + 4);
+
+	ch.chspec = le16_to_cpu(bi->chanspec);
+	cfg->d11inf.decchspec(&ch);
+
+	if (ch.band == BRCMU_CHAN_BAND_2G)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+	freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+
+	notify_capability = le16_to_cpu(bi->capability);
+	notify_interval = le16_to_cpu(bi->beacon_period);
+	notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
+	notify_ielen = le32_to_cpu(bi->ie_length);
+	notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+
+	brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq);
+	brcmf_dbg(CONN, "capability: %X\n", notify_capability);
+	brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
+	brcmf_dbg(CONN, "signal: %d\n", notify_signal);
+
+	bss = cfg80211_inform_bss(wiphy, notify_channel,
+				  CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0,
+				  notify_capability, notify_interval,
+				  notify_ie, notify_ielen, notify_signal,
+				  GFP_KERNEL);
+
+	if (!bss) {
+		err = -ENOMEM;
+		goto CleanUp;
+	}
+
+	cfg80211_put_bss(wiphy, bss);
+
+CleanUp:
+
+	kfree(buf);
+
+	brcmf_dbg(TRACE, "Exit\n");
+
+	return err;
+}
+
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+				 struct brcmf_if *ifp)
+{
+	struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
+	struct brcmf_bss_info_le *bi;
+	struct brcmf_ssid *ssid;
+	const struct brcmf_tlv *tim;
+	u16 beacon_interval;
+	u8 dtim_period;
+	size_t ie_len;
+	u8 *ie;
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (brcmf_is_ibssmode(ifp->vif))
+		return err;
+
+	ssid = &profile->ssid;
+
+	*(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+				     cfg->extra_buf, WL_EXTRA_BUF_MAX);
+	if (err) {
+		brcmf_err("Could not get bss info %d\n", err);
+		goto update_bss_info_out;
+	}
+
+	bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
+	err = brcmf_inform_single_bss(cfg, bi);
+	if (err)
+		goto update_bss_info_out;
+
+	ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+	ie_len = le32_to_cpu(bi->ie_length);
+	beacon_interval = le16_to_cpu(bi->beacon_period);
+
+	tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+	if (tim)
+		dtim_period = tim->data[1];
+	else {
+		/*
+		* active scan was done so we could not get dtim
+		* information out of probe response.
+		* so we speficially query dtim information to dongle.
+		*/
+		u32 var;
+		err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
+		if (err) {
+			brcmf_err("wl dtim_assoc failed (%d)\n", err);
+			goto update_bss_info_out;
+		}
+		dtim_period = (u8)var;
+	}
+
+update_bss_info_out:
+	brcmf_dbg(TRACE, "Exit");
+	return err;
+}
+
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
+{
+	struct escan_info *escan = &cfg->escan_info;
+
+	set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
+	if (cfg->scan_request) {
+		escan->escan_state = WL_ESCAN_STATE_IDLE;
+		brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
+	}
+	clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+	clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
+}
+
+static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
+{
+	struct brcmf_cfg80211_info *cfg =
+			container_of(work, struct brcmf_cfg80211_info,
+				     escan_timeout_work);
+
+	brcmf_inform_bss(cfg);
+	brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
+}
+
+static void brcmf_escan_timeout(unsigned long data)
+{
+	struct brcmf_cfg80211_info *cfg =
+			(struct brcmf_cfg80211_info *)data;
+
+	if (cfg->scan_request) {
+		brcmf_err("timer expired\n");
+		schedule_work(&cfg->escan_timeout_work);
+	}
+}
+
+static s32
+brcmf_compare_update_same_bss(struct brcmf_cfg80211_info *cfg,
+			      struct brcmf_bss_info_le *bss,
+			      struct brcmf_bss_info_le *bss_info_le)
+{
+	struct brcmu_chan ch_bss, ch_bss_info_le;
+
+	ch_bss.chspec = le16_to_cpu(bss->chanspec);
+	cfg->d11inf.decchspec(&ch_bss);
+	ch_bss_info_le.chspec = le16_to_cpu(bss_info_le->chanspec);
+	cfg->d11inf.decchspec(&ch_bss_info_le);
+
+	if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
+		ch_bss.band == ch_bss_info_le.band &&
+		bss_info_le->SSID_len == bss->SSID_len &&
+		!memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
+		if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) ==
+			(bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL)) {
+			s16 bss_rssi = le16_to_cpu(bss->RSSI);
+			s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
+
+			/* preserve max RSSI if the measurements are
+			* both on-channel or both off-channel
+			*/
+			if (bss_info_rssi > bss_rssi)
+				bss->RSSI = bss_info_le->RSSI;
+		} else if ((bss->flags & BRCMF_BSS_RSSI_ON_CHANNEL) &&
+			(bss_info_le->flags & BRCMF_BSS_RSSI_ON_CHANNEL) == 0) {
+			/* preserve the on-channel rssi measurement
+			* if the new measurement is off channel
+			*/
+			bss->RSSI = bss_info_le->RSSI;
+			bss->flags |= BRCMF_BSS_RSSI_ON_CHANNEL;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static s32
+brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+			     const struct brcmf_event_msg *e, void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	s32 status;
+	struct brcmf_escan_result_le *escan_result_le;
+	u32 escan_buflen;
+	struct brcmf_bss_info_le *bss_info_le;
+	struct brcmf_bss_info_le *bss = NULL;
+	u32 bi_length;
+	struct brcmf_scan_results *list;
+	u32 i;
+	bool aborted;
+
+	status = e->status;
+
+	if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+		brcmf_err("scan not ready, bssidx=%d\n", ifp->bssidx);
+		return -EPERM;
+	}
+
+	if (status == BRCMF_E_STATUS_PARTIAL) {
+		brcmf_dbg(SCAN, "ESCAN Partial result\n");
+		if (e->datalen < sizeof(*escan_result_le)) {
+			brcmf_err("invalid event data length\n");
+			goto exit;
+		}
+		escan_result_le = (struct brcmf_escan_result_le *) data;
+		if (!escan_result_le) {
+			brcmf_err("Invalid escan result (NULL pointer)\n");
+			goto exit;
+		}
+		escan_buflen = le32_to_cpu(escan_result_le->buflen);
+		if (escan_buflen > WL_ESCAN_BUF_SIZE ||
+		    escan_buflen > e->datalen ||
+		    escan_buflen < sizeof(*escan_result_le)) {
+			brcmf_err("Invalid escan buffer length: %d\n",
+				  escan_buflen);
+			goto exit;
+		}
+		if (le16_to_cpu(escan_result_le->bss_count) != 1) {
+			brcmf_err("Invalid bss_count %d: ignoring\n",
+				  escan_result_le->bss_count);
+			goto exit;
+		}
+		bss_info_le = &escan_result_le->bss_info_le;
+
+		if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
+			goto exit;
+
+		if (!cfg->scan_request) {
+			brcmf_dbg(SCAN, "result without cfg80211 request\n");
+			goto exit;
+		}
+
+		bi_length = le32_to_cpu(bss_info_le->length);
+		if (bi_length != escan_buflen -	WL_ESCAN_RESULTS_FIXED_SIZE) {
+			brcmf_err("Ignoring invalid bss_info length: %d\n",
+				  bi_length);
+			goto exit;
+		}
+
+		if (!(cfg_to_wiphy(cfg)->interface_modes &
+					BIT(NL80211_IFTYPE_ADHOC))) {
+			if (le16_to_cpu(bss_info_le->capability) &
+						WLAN_CAPABILITY_IBSS) {
+				brcmf_err("Ignoring IBSS result\n");
+				goto exit;
+			}
+		}
+
+		list = (struct brcmf_scan_results *)
+				cfg->escan_info.escan_buf;
+		if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
+			brcmf_err("Buffer is too small: ignoring\n");
+			goto exit;
+		}
+
+		for (i = 0; i < list->count; i++) {
+			bss = bss ? (struct brcmf_bss_info_le *)
+				((unsigned char *)bss +
+				le32_to_cpu(bss->length)) : list->bss_info_le;
+			if (brcmf_compare_update_same_bss(cfg, bss,
+							  bss_info_le))
+				goto exit;
+		}
+		memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
+			bss_info_le, bi_length);
+		list->version = le32_to_cpu(bss_info_le->version);
+		list->buflen += bi_length;
+		list->count++;
+	} else {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
+			goto exit;
+		if (cfg->scan_request) {
+			brcmf_inform_bss(cfg);
+			aborted = status != BRCMF_E_STATUS_SUCCESS;
+			brcmf_notify_escan_complete(cfg, ifp, aborted, false);
+		} else
+			brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
+				  status);
+	}
+exit:
+	return 0;
+}
+
+static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
+{
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ESCAN_RESULT,
+			    brcmf_cfg80211_escan_handler);
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	/* Init scan_timeout timer */
+	init_timer(&cfg->escan_timeout);
+	cfg->escan_timeout.data = (unsigned long) cfg;
+	cfg->escan_timeout.function = brcmf_escan_timeout;
+	INIT_WORK(&cfg->escan_timeout_work,
+		  brcmf_cfg80211_escan_timeout_worker);
+}
+
+static __always_inline void brcmf_delay(u32 ms)
+{
+	if (ms < 1000 / HZ) {
+		cond_resched();
+		mdelay(ms);
+	} else {
+		msleep(ms);
+	}
+}
+
+static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4],
+				     u8 *pattern, u32 patternsize, u8 *mask,
+				     u32 packet_offset)
+{
+	struct brcmf_fil_wowl_pattern_le *filter;
+	u32 masksize;
+	u32 patternoffset;
+	u8 *buf;
+	u32 bufsize;
+	s32 ret;
+
+	masksize = (patternsize + 7) / 8;
+	patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize;
+
+	bufsize = sizeof(*filter) + patternsize + masksize;
+	buf = kzalloc(bufsize, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	filter = (struct brcmf_fil_wowl_pattern_le *)buf;
+
+	memcpy(filter->cmd, cmd, 4);
+	filter->masksize = cpu_to_le32(masksize);
+	filter->offset = cpu_to_le32(packet_offset);
+	filter->patternoffset = cpu_to_le32(patternoffset);
+	filter->patternsize = cpu_to_le32(patternsize);
+	filter->type = cpu_to_le32(BRCMF_WOWL_PATTERN_TYPE_BITMAP);
+
+	if ((mask) && (masksize))
+		memcpy(buf + sizeof(*filter), mask, masksize);
+	if ((pattern) && (patternsize))
+		memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize);
+
+	ret = brcmf_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize);
+
+	kfree(buf);
+	return ret;
+}
+
+static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (cfg->wowl_enabled) {
+		brcmf_configure_arp_offload(ifp, true);
+		brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM,
+				      cfg->pre_wowl_pmmode);
+		brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0);
+		brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0);
+		cfg->wowl_enabled = false;
+	}
+	return 0;
+}
+
+static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
+				 struct brcmf_if *ifp,
+				 struct cfg80211_wowlan *wowl)
+{
+	u32 wowl_config;
+	u32 i;
+
+	brcmf_dbg(TRACE, "Suspend, wowl config.\n");
+
+	brcmf_configure_arp_offload(ifp, false);
+	brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->pre_wowl_pmmode);
+	brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX);
+
+	wowl_config = 0;
+	if (wowl->disconnect)
+		wowl_config = BRCMF_WOWL_DIS | BRCMF_WOWL_BCN | BRCMF_WOWL_RETR;
+	if (wowl->magic_pkt)
+		wowl_config |= BRCMF_WOWL_MAGIC;
+	if ((wowl->patterns) && (wowl->n_patterns)) {
+		wowl_config |= BRCMF_WOWL_NET;
+		for (i = 0; i < wowl->n_patterns; i++) {
+			brcmf_config_wowl_pattern(ifp, "add",
+				(u8 *)wowl->patterns[i].pattern,
+				wowl->patterns[i].pattern_len,
+				(u8 *)wowl->patterns[i].mask,
+				wowl->patterns[i].pkt_offset);
+		}
+	}
+	brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
+	brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
+	brcmf_bus_wowl_config(cfg->pub->bus_if, true);
+	cfg->wowl_enabled = true;
+}
+
+static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
+				  struct cfg80211_wowlan *wowl)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_vif *vif;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* if the primary net_device is not READY there is nothing
+	 * we can do but pray resume goes smoothly.
+	 */
+	if (!check_vif_up(ifp->vif))
+		goto exit;
+
+	/* end any scanning */
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+		brcmf_abort_scanning(cfg);
+
+	if (wowl == NULL) {
+		brcmf_bus_wowl_config(cfg->pub->bus_if, false);
+		list_for_each_entry(vif, &cfg->vif_list, list) {
+			if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state))
+				continue;
+			/* While going to suspend if associated with AP
+			 * disassociate from AP to save power while system is
+			 * in suspended state
+			 */
+			brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED);
+			/* Make sure WPA_Supplicant receives all the event
+			 * generated due to DISASSOC call to the fw to keep
+			 * the state fw and WPA_Supplicant state consistent
+			 */
+			brcmf_delay(500);
+		}
+		/* Configure MPC */
+		brcmf_set_mpc(ifp, 1);
+
+	} else {
+		/* Configure WOWL paramaters */
+		brcmf_configure_wowl(cfg, ifp, wowl);
+	}
+
+exit:
+	brcmf_dbg(TRACE, "Exit\n");
+	/* clear any scanning activity */
+	cfg->scan_status = 0;
+	return 0;
+}
+
+static __used s32
+brcmf_update_pmklist(struct net_device *ndev,
+		     struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
+{
+	int i, j;
+	u32 pmkid_len;
+
+	pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
+
+	brcmf_dbg(CONN, "No of elements %d\n", pmkid_len);
+	for (i = 0; i < pmkid_len; i++) {
+		brcmf_dbg(CONN, "PMKID[%d]: %pM =\n", i,
+			  &pmk_list->pmkids.pmkid[i].BSSID);
+		for (j = 0; j < WLAN_PMKID_LEN; j++)
+			brcmf_dbg(CONN, "%02x\n",
+				  pmk_list->pmkids.pmkid[i].PMKID[j]);
+	}
+
+	if (!err)
+		brcmf_fil_iovar_data_set(netdev_priv(ndev), "pmkid_info",
+					 (char *)pmk_list, sizeof(*pmk_list));
+
+	return err;
+}
+
+static s32
+brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+			 struct cfg80211_pmksa *pmksa)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
+	s32 err = 0;
+	u32 pmkid_len, i;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	pmkid_len = le32_to_cpu(pmkids->npmkid);
+	for (i = 0; i < pmkid_len; i++)
+		if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
+			break;
+	if (i < WL_NUM_PMKIDS_MAX) {
+		memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
+		memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+		if (i == pmkid_len) {
+			pmkid_len++;
+			pmkids->npmkid = cpu_to_le32(pmkid_len);
+		}
+	} else
+		err = -EINVAL;
+
+	brcmf_dbg(CONN, "set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+		  pmkids->pmkid[pmkid_len].BSSID);
+	for (i = 0; i < WLAN_PMKID_LEN; i++)
+		brcmf_dbg(CONN, "%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+
+	err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+		      struct cfg80211_pmksa *pmksa)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct pmkid_list pmkid;
+	s32 err = 0;
+	u32 pmkid_len, i;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
+	memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+
+	brcmf_dbg(CONN, "del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+		  &pmkid.pmkid[0].BSSID);
+	for (i = 0; i < WLAN_PMKID_LEN; i++)
+		brcmf_dbg(CONN, "%02x\n", pmkid.pmkid[0].PMKID[i]);
+
+	pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
+	for (i = 0; i < pmkid_len; i++)
+		if (!memcmp
+		    (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+		     ETH_ALEN))
+			break;
+
+	if ((pmkid_len > 0)
+	    && (i < pmkid_len)) {
+		memset(&cfg->pmk_list->pmkids.pmkid[i], 0,
+		       sizeof(struct pmkid));
+		for (; i < (pmkid_len - 1); i++) {
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+			       &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+			       ETH_ALEN);
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+			       &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+			       WLAN_PMKID_LEN);
+		}
+		cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
+	} else
+		err = -EINVAL;
+
+	err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+
+}
+
+static s32
+brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+	err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+
+}
+
+/*
+ * PFN result doesn't have all the info which are
+ * required by the supplicant
+ * (For e.g IEs) Do a target Escan so that sched scan results are reported
+ * via wl_inform_single_bss in the required format. Escan does require the
+ * scan request in the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+static s32
+brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
+				const struct brcmf_event_msg *e, void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
+	struct cfg80211_scan_request *request = NULL;
+	struct cfg80211_ssid *ssid = NULL;
+	struct ieee80211_channel *channel = NULL;
+	struct wiphy *wiphy = cfg_to_wiphy(cfg);
+	int err = 0;
+	int channel_req = 0;
+	int band = 0;
+	struct brcmf_pno_scanresults_le *pfn_result;
+	u32 result_count;
+	u32 status;
+
+	brcmf_dbg(SCAN, "Enter\n");
+
+	if (e->event_code == BRCMF_E_PFN_NET_LOST) {
+		brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n");
+		return 0;
+	}
+
+	pfn_result = (struct brcmf_pno_scanresults_le *)data;
+	result_count = le32_to_cpu(pfn_result->count);
+	status = le32_to_cpu(pfn_result->status);
+
+	/*
+	 * PFN event is limited to fit 512 bytes so we may get
+	 * multiple NET_FOUND events. For now place a warning here.
+	 */
+	WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
+	brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
+	if (result_count > 0) {
+		int i;
+
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
+		channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
+		if (!request || !ssid || !channel) {
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		request->wiphy = wiphy;
+		data += sizeof(struct brcmf_pno_scanresults_le);
+		netinfo_start = (struct brcmf_pno_net_info_le *)data;
+
+		for (i = 0; i < result_count; i++) {
+			netinfo = &netinfo_start[i];
+			if (!netinfo) {
+				brcmf_err("Invalid netinfo ptr. index: %d\n",
+					  i);
+				err = -EINVAL;
+				goto out_err;
+			}
+
+			brcmf_dbg(SCAN, "SSID:%s Channel:%d\n",
+				  netinfo->SSID, netinfo->channel);
+			memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
+			ssid[i].ssid_len = netinfo->SSID_len;
+			request->n_ssids++;
+
+			channel_req = netinfo->channel;
+			if (channel_req <= CH_MAX_2G_CHANNEL)
+				band = NL80211_BAND_2GHZ;
+			else
+				band = NL80211_BAND_5GHZ;
+			channel[i].center_freq =
+				ieee80211_channel_to_frequency(channel_req,
+							       band);
+			channel[i].band = band;
+			channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+			request->channels[i] = &channel[i];
+			request->n_channels++;
+		}
+
+		/* assign parsed ssid array */
+		if (request->n_ssids)
+			request->ssids = &ssid[0];
+
+		if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+			/* Abort any on-going scan */
+			brcmf_abort_scanning(cfg);
+		}
+
+		set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+		cfg->escan_info.run = brcmf_run_escan;
+		err = brcmf_do_escan(cfg, wiphy, ifp, request);
+		if (err) {
+			clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+			goto out_err;
+		}
+		cfg->sched_escan = true;
+		cfg->scan_request = request;
+	} else {
+		brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
+		goto out_err;
+	}
+
+	kfree(ssid);
+	kfree(channel);
+	kfree(request);
+	return 0;
+
+out_err:
+	kfree(ssid);
+	kfree(channel);
+	kfree(request);
+	cfg80211_sched_scan_stopped(wiphy);
+	return err;
+}
+
+static int brcmf_dev_pno_clean(struct net_device *ndev)
+{
+	int ret;
+
+	/* Disable pfn */
+	ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
+	if (ret == 0) {
+		/* clear pfn */
+		ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
+					       NULL, 0);
+	}
+	if (ret < 0)
+		brcmf_err("failed code %d\n", ret);
+
+	return ret;
+}
+
+static int brcmf_dev_pno_config(struct net_device *ndev)
+{
+	struct brcmf_pno_param_le pfn_param;
+
+	memset(&pfn_param, 0, sizeof(pfn_param));
+	pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
+
+	/* set extra pno params */
+	pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
+	pfn_param.repeat = BRCMF_PNO_REPEAT;
+	pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
+
+	/* set up pno scan fr */
+	pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
+
+	return brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfn_set",
+					&pfn_param, sizeof(pfn_param));
+}
+
+static int
+brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
+				struct net_device *ndev,
+				struct cfg80211_sched_scan_request *request)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+	struct brcmf_pno_net_param_le pfn;
+	int i;
+	int ret = 0;
+
+	brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
+		  request->n_match_sets, request->n_ssids);
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+		brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
+		return -EAGAIN;
+	}
+	if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
+		brcmf_err("Scanning suppressed: status (%lu)\n",
+			  cfg->scan_status);
+		return -EAGAIN;
+	}
+
+	if (!request->n_ssids || !request->n_match_sets) {
+		brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
+			  request->n_ssids);
+		return -EINVAL;
+	}
+
+	if (request->n_ssids > 0) {
+		for (i = 0; i < request->n_ssids; i++) {
+			/* Active scan req for ssids */
+			brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n",
+				  request->ssids[i].ssid);
+
+			/*
+			 * match_set ssids is a supert set of n_ssid list,
+			 * so we need not add these set seperately.
+			 */
+		}
+	}
+
+	if (request->n_match_sets > 0) {
+		/* clean up everything */
+		ret = brcmf_dev_pno_clean(ndev);
+		if  (ret < 0) {
+			brcmf_err("failed error=%d\n", ret);
+			return ret;
+		}
+
+		/* configure pno */
+		ret = brcmf_dev_pno_config(ndev);
+		if (ret < 0) {
+			brcmf_err("PNO setup failed!! ret=%d\n", ret);
+			return -EINVAL;
+		}
+
+		/* configure each match set */
+		for (i = 0; i < request->n_match_sets; i++) {
+			struct cfg80211_ssid *ssid;
+			u32 ssid_len;
+
+			ssid = &request->match_sets[i].ssid;
+			ssid_len = ssid->ssid_len;
+
+			if (!ssid_len) {
+				brcmf_err("skip broadcast ssid\n");
+				continue;
+			}
+			pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
+			pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
+			pfn.wsec = cpu_to_le32(0);
+			pfn.infra = cpu_to_le32(1);
+			pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
+			pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
+			memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
+			ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
+						       sizeof(pfn));
+			brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
+				  ret == 0 ? "set" : "failed", ssid->ssid);
+		}
+		/* Enable the PNO */
+		if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
+			brcmf_err("PNO enable failed!! ret=%d\n", ret);
+			return -EINVAL;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+					  struct net_device *ndev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+
+	brcmf_dbg(SCAN, "enter\n");
+	brcmf_dev_pno_clean(ndev);
+	if (cfg->sched_escan)
+		brcmf_notify_escan_complete(cfg, netdev_priv(ndev), true, true);
+	return 0;
+}
+
+static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
+{
+	s32 err;
+
+	/* set auth */
+	err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0);
+	if (err < 0) {
+		brcmf_err("auth error %d\n", err);
+		return err;
+	}
+	/* set wsec */
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0);
+	if (err < 0) {
+		brcmf_err("wsec error %d\n", err);
+		return err;
+	}
+	/* set upper-layer auth */
+	err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE);
+	if (err < 0) {
+		brcmf_err("wpa_auth error %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
+{
+	if (is_rsn_ie)
+		return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0);
+
+	return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0);
+}
+
+static s32
+brcmf_configure_wpaie(struct brcmf_if *ifp,
+		      const struct brcmf_vs_tlv *wpa_ie,
+		      bool is_rsn_ie)
+{
+	u32 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = 0;
+	s32 len = 0;
+	u32 i;
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 offset;
+	u8 *data;
+	u16 rsn_cap;
+	u32 wme_bss_disable;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (wpa_ie == NULL)
+		goto exit;
+
+	len = wpa_ie->len + TLV_HDR_LEN;
+	data = (u8 *)wpa_ie;
+	offset = TLV_HDR_LEN;
+	if (!is_rsn_ie)
+		offset += VS_IE_FIXED_HDR_LEN;
+	else
+		offset += WPA_IE_VERSION_LEN;
+
+	/* check for multicast cipher suite */
+	if (offset + WPA_IE_MIN_OUI_LEN > len) {
+		err = -EINVAL;
+		brcmf_err("no multicast cipher suite\n");
+		goto exit;
+	}
+
+	if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+		err = -EINVAL;
+		brcmf_err("ivalid OUI\n");
+		goto exit;
+	}
+	offset += TLV_OUI_LEN;
+
+	/* pick up multicast cipher */
+	switch (data[offset]) {
+	case WPA_CIPHER_NONE:
+		gval = 0;
+		break;
+	case WPA_CIPHER_WEP_40:
+	case WPA_CIPHER_WEP_104:
+		gval = WEP_ENABLED;
+		break;
+	case WPA_CIPHER_TKIP:
+		gval = TKIP_ENABLED;
+		break;
+	case WPA_CIPHER_AES_CCM:
+		gval = AES_ENABLED;
+		break;
+	default:
+		err = -EINVAL;
+		brcmf_err("Invalid multi cast cipher info\n");
+		goto exit;
+	}
+
+	offset++;
+	/* walk thru unicast cipher list and pick up what we recognize */
+	count = data[offset] + (data[offset + 1] << 8);
+	offset += WPA_IE_SUITE_COUNT_LEN;
+	/* Check for unicast suite(s) */
+	if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+		err = -EINVAL;
+		brcmf_err("no unicast cipher suite\n");
+		goto exit;
+	}
+	for (i = 0; i < count; i++) {
+		if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+			err = -EINVAL;
+			brcmf_err("ivalid OUI\n");
+			goto exit;
+		}
+		offset += TLV_OUI_LEN;
+		switch (data[offset]) {
+		case WPA_CIPHER_NONE:
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			pval |= WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			pval |= TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			pval |= AES_ENABLED;
+			break;
+		default:
+			brcmf_err("Ivalid unicast security info\n");
+		}
+		offset++;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	count = data[offset] + (data[offset + 1] << 8);
+	offset += WPA_IE_SUITE_COUNT_LEN;
+	/* Check for auth key management suite(s) */
+	if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+		err = -EINVAL;
+		brcmf_err("no auth key mgmt suite\n");
+		goto exit;
+	}
+	for (i = 0; i < count; i++) {
+		if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+			err = -EINVAL;
+			brcmf_err("ivalid OUI\n");
+			goto exit;
+		}
+		offset += TLV_OUI_LEN;
+		switch (data[offset]) {
+		case RSN_AKM_NONE:
+			brcmf_dbg(TRACE, "RSN_AKM_NONE\n");
+			wpa_auth |= WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			brcmf_dbg(TRACE, "RSN_AKM_UNSPECIFIED\n");
+			is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
+				    (wpa_auth |= WPA_AUTH_UNSPECIFIED);
+			break;
+		case RSN_AKM_PSK:
+			brcmf_dbg(TRACE, "RSN_AKM_PSK\n");
+			is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
+				    (wpa_auth |= WPA_AUTH_PSK);
+			break;
+		default:
+			brcmf_err("Ivalid key mgmt info\n");
+		}
+		offset++;
+	}
+
+	if (is_rsn_ie) {
+		wme_bss_disable = 1;
+		if ((offset + RSN_CAP_LEN) <= len) {
+			rsn_cap = data[offset] + (data[offset + 1] << 8);
+			if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
+				wme_bss_disable = 0;
+		}
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
+					       wme_bss_disable);
+		if (err < 0) {
+			brcmf_err("wme_bss_disable error %d\n", err);
+			goto exit;
+		}
+	}
+	/* FOR WPS , set SES_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+
+	/* set auth */
+	err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth);
+	if (err < 0) {
+		brcmf_err("auth error %d\n", err);
+		goto exit;
+	}
+	/* set wsec */
+	err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
+	if (err < 0) {
+		brcmf_err("wsec error %d\n", err);
+		goto exit;
+	}
+	/* set upper-layer auth */
+	err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
+	if (err < 0) {
+		brcmf_err("wpa_auth error %d\n", err);
+		goto exit;
+	}
+
+exit:
+	return err;
+}
+
+static s32
+brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
+		     struct parsed_vndr_ies *vndr_ies)
+{
+	struct brcmf_vs_tlv *vndrie;
+	struct brcmf_tlv *ie;
+	struct parsed_vndr_ie_info *parsed_info;
+	s32 remaining_len;
+
+	remaining_len = (s32)vndr_ie_len;
+	memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+	ie = (struct brcmf_tlv *)vndr_ie_buf;
+	while (ie) {
+		if (ie->id != WLAN_EID_VENDOR_SPECIFIC)
+			goto next;
+		vndrie = (struct brcmf_vs_tlv *)ie;
+		/* len should be bigger than OUI length + one */
+		if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
+			brcmf_err("invalid vndr ie. length is too small %d\n",
+				  vndrie->len);
+			goto next;
+		}
+		/* if wpa or wme ie, do not add ie */
+		if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
+		    ((vndrie->oui_type == WPA_OUI_TYPE) ||
+		    (vndrie->oui_type == WME_OUI_TYPE))) {
+			brcmf_dbg(TRACE, "Found WPA/WME oui. Do not add it\n");
+			goto next;
+		}
+
+		parsed_info = &vndr_ies->ie_info[vndr_ies->count];
+
+		/* save vndr ie information */
+		parsed_info->ie_ptr = (char *)vndrie;
+		parsed_info->ie_len = vndrie->len + TLV_HDR_LEN;
+		memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie));
+
+		vndr_ies->count++;
+
+		brcmf_dbg(TRACE, "** OUI %02x %02x %02x, type 0x%02x\n",
+			  parsed_info->vndrie.oui[0],
+			  parsed_info->vndrie.oui[1],
+			  parsed_info->vndrie.oui[2],
+			  parsed_info->vndrie.oui_type);
+
+		if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT)
+			break;
+next:
+		remaining_len -= (ie->len + TLV_HDR_LEN);
+		if (remaining_len <= TLV_HDR_LEN)
+			ie = NULL;
+		else
+			ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len +
+				TLV_HDR_LEN);
+	}
+	return 0;
+}
+
+static u32
+brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
+{
+
+	strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+	iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	put_unaligned_le32(1, &iebuf[VNDR_IE_COUNT_OFFSET]);
+
+	put_unaligned_le32(pktflag, &iebuf[VNDR_IE_PKTFLAG_OFFSET]);
+
+	memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
+
+	return ie_len + VNDR_IE_HDR_SIZE;
+}
+
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+			  const u8 *vndr_ie_buf, u32 vndr_ie_len)
+{
+	struct brcmf_if *ifp;
+	struct vif_saved_ie *saved_ie;
+	s32 err = 0;
+	u8  *iovar_ie_buf;
+	u8  *curr_ie_buf;
+	u8  *mgmt_ie_buf = NULL;
+	int mgmt_ie_buf_len;
+	u32 *mgmt_ie_len;
+	u32 del_add_ie_buf_len = 0;
+	u32 total_ie_buf_len = 0;
+	u32 parsed_ie_buf_len = 0;
+	struct parsed_vndr_ies old_vndr_ies;
+	struct parsed_vndr_ies new_vndr_ies;
+	struct parsed_vndr_ie_info *vndrie_info;
+	s32 i;
+	u8 *ptr;
+	int remained_buf_len;
+
+	if (!vif)
+		return -ENODEV;
+	ifp = vif->ifp;
+	saved_ie = &vif->saved_ie;
+
+	brcmf_dbg(TRACE, "bssidx %d, pktflag : 0x%02X\n", ifp->bssidx, pktflag);
+	iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (!iovar_ie_buf)
+		return -ENOMEM;
+	curr_ie_buf = iovar_ie_buf;
+	switch (pktflag) {
+	case BRCMF_VNDR_IE_PRBREQ_FLAG:
+		mgmt_ie_buf = saved_ie->probe_req_ie;
+		mgmt_ie_len = &saved_ie->probe_req_ie_len;
+		mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie);
+		break;
+	case BRCMF_VNDR_IE_PRBRSP_FLAG:
+		mgmt_ie_buf = saved_ie->probe_res_ie;
+		mgmt_ie_len = &saved_ie->probe_res_ie_len;
+		mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
+		break;
+	case BRCMF_VNDR_IE_BEACON_FLAG:
+		mgmt_ie_buf = saved_ie->beacon_ie;
+		mgmt_ie_len = &saved_ie->beacon_ie_len;
+		mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
+		break;
+	case BRCMF_VNDR_IE_ASSOCREQ_FLAG:
+		mgmt_ie_buf = saved_ie->assoc_req_ie;
+		mgmt_ie_len = &saved_ie->assoc_req_ie_len;
+		mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
+		break;
+	default:
+		err = -EPERM;
+		brcmf_err("not suitable type\n");
+		goto exit;
+	}
+
+	if (vndr_ie_len > mgmt_ie_buf_len) {
+		err = -ENOMEM;
+		brcmf_err("extra IE size too big\n");
+		goto exit;
+	}
+
+	/* parse and save new vndr_ie in curr_ie_buff before comparing it */
+	if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) {
+		ptr = curr_ie_buf;
+		brcmf_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies);
+		for (i = 0; i < new_vndr_ies.count; i++) {
+			vndrie_info = &new_vndr_ies.ie_info[i];
+			memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+			       vndrie_info->ie_len);
+			parsed_ie_buf_len += vndrie_info->ie_len;
+		}
+	}
+
+	if (mgmt_ie_buf && *mgmt_ie_len) {
+		if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+		    (memcmp(mgmt_ie_buf, curr_ie_buf,
+			    parsed_ie_buf_len) == 0)) {
+			brcmf_dbg(TRACE, "Previous mgmt IE equals to current IE\n");
+			goto exit;
+		}
+
+		/* parse old vndr_ie */
+		brcmf_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies);
+
+		/* make a command to delete old ie */
+		for (i = 0; i < old_vndr_ies.count; i++) {
+			vndrie_info = &old_vndr_ies.ie_info[i];
+
+			brcmf_dbg(TRACE, "DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+				  vndrie_info->vndrie.id,
+				  vndrie_info->vndrie.len,
+				  vndrie_info->vndrie.oui[0],
+				  vndrie_info->vndrie.oui[1],
+				  vndrie_info->vndrie.oui[2]);
+
+			del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+							   vndrie_info->ie_ptr,
+							   vndrie_info->ie_len,
+							   "del");
+			curr_ie_buf += del_add_ie_buf_len;
+			total_ie_buf_len += del_add_ie_buf_len;
+		}
+	}
+
+	*mgmt_ie_len = 0;
+	/* Add if there is any extra IE */
+	if (mgmt_ie_buf && parsed_ie_buf_len) {
+		ptr = mgmt_ie_buf;
+
+		remained_buf_len = mgmt_ie_buf_len;
+
+		/* make a command to add new ie */
+		for (i = 0; i < new_vndr_ies.count; i++) {
+			vndrie_info = &new_vndr_ies.ie_info[i];
+
+			/* verify remained buf size before copy data */
+			if (remained_buf_len < (vndrie_info->vndrie.len +
+							VNDR_IE_VSIE_OFFSET)) {
+				brcmf_err("no space in mgmt_ie_buf: len left %d",
+					  remained_buf_len);
+				break;
+			}
+			remained_buf_len -= (vndrie_info->ie_len +
+					     VNDR_IE_VSIE_OFFSET);
+
+			brcmf_dbg(TRACE, "ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
+				  vndrie_info->vndrie.id,
+				  vndrie_info->vndrie.len,
+				  vndrie_info->vndrie.oui[0],
+				  vndrie_info->vndrie.oui[1],
+				  vndrie_info->vndrie.oui[2]);
+
+			del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+							   vndrie_info->ie_ptr,
+							   vndrie_info->ie_len,
+							   "add");
+
+			/* save the parsed IE in wl struct */
+			memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+			       vndrie_info->ie_len);
+			*mgmt_ie_len += vndrie_info->ie_len;
+
+			curr_ie_buf += del_add_ie_buf_len;
+			total_ie_buf_len += del_add_ie_buf_len;
+		}
+	}
+	if (total_ie_buf_len) {
+		err  = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf,
+						 total_ie_buf_len);
+		if (err)
+			brcmf_err("vndr ie set error : %d\n", err);
+	}
+
+exit:
+	kfree(iovar_ie_buf);
+	return err;
+}
+
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
+{
+	s32 pktflags[] = {
+		BRCMF_VNDR_IE_PRBREQ_FLAG,
+		BRCMF_VNDR_IE_PRBRSP_FLAG,
+		BRCMF_VNDR_IE_BEACON_FLAG
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pktflags); i++)
+		brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
+
+	memset(&vif->saved_ie, 0, sizeof(vif->saved_ie));
+	return 0;
+}
+
+static s32
+brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
+			struct cfg80211_beacon_data *beacon)
+{
+	s32 err;
+
+	/* Set Beacon IEs to FW */
+	err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG,
+				    beacon->tail, beacon->tail_len);
+	if (err) {
+		brcmf_err("Set Beacon IE Failed\n");
+		return err;
+	}
+	brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
+
+	/* Set Probe Response IEs to FW */
+	err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBRSP_FLAG,
+				    beacon->proberesp_ies,
+				    beacon->proberesp_ies_len);
+	if (err)
+		brcmf_err("Set Probe Resp IE Failed\n");
+	else
+		brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+
+	return err;
+}
+
+static s32
+brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+			struct cfg80211_ap_settings *settings)
+{
+	s32 ie_offset;
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	const struct brcmf_tlv *ssid_ie;
+	const struct brcmf_tlv *country_ie;
+	struct brcmf_ssid_le ssid_le;
+	s32 err = -EPERM;
+	const struct brcmf_tlv *rsn_ie;
+	const struct brcmf_vs_tlv *wpa_ie;
+	struct brcmf_join_params join_params;
+	enum nl80211_iftype dev_role;
+	struct brcmf_fil_bss_enable_le bss_enable;
+	u16 chanspec;
+	bool mbss;
+	int is_11d;
+
+	brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
+		  settings->chandef.chan->hw_value,
+		  settings->chandef.center_freq1, settings->chandef.width,
+		  settings->beacon_interval, settings->dtim_period);
+	brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
+		  settings->ssid, settings->ssid_len, settings->auth_type,
+		  settings->inactivity_timeout);
+	dev_role = ifp->vif->wdev.iftype;
+	mbss = ifp->vif->mbss;
+
+	/* store current 11d setting */
+	brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
+	country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+				      settings->beacon.tail_len,
+				      WLAN_EID_COUNTRY);
+	is_11d = country_ie ? 1 : 0;
+
+	memset(&ssid_le, 0, sizeof(ssid_le));
+	if (settings->ssid == NULL || settings->ssid_len == 0) {
+		ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+		ssid_ie = brcmf_parse_tlvs(
+				(u8 *)&settings->beacon.head[ie_offset],
+				settings->beacon.head_len - ie_offset,
+				WLAN_EID_SSID);
+		if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
+			return -EINVAL;
+
+		memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
+		ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
+		brcmf_dbg(TRACE, "SSID is (%s) in Head\n", ssid_le.SSID);
+	} else {
+		memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
+		ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
+	}
+
+	if (!mbss) {
+		brcmf_set_mpc(ifp, 0);
+		brcmf_configure_arp_offload(ifp, false);
+	}
+
+	/* find the RSN_IE */
+	rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+				  settings->beacon.tail_len, WLAN_EID_RSN);
+
+	/* find the WPA_IE */
+	wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
+				  settings->beacon.tail_len);
+
+	if ((wpa_ie != NULL || rsn_ie != NULL)) {
+		brcmf_dbg(TRACE, "WPA(2) IE is found\n");
+		if (wpa_ie != NULL) {
+			/* WPA IE */
+			err = brcmf_configure_wpaie(ifp, wpa_ie, false);
+			if (err < 0)
+				goto exit;
+		} else {
+			struct brcmf_vs_tlv *tmp_ie;
+
+			tmp_ie = (struct brcmf_vs_tlv *)rsn_ie;
+
+			/* RSN IE */
+			err = brcmf_configure_wpaie(ifp, tmp_ie, true);
+			if (err < 0)
+				goto exit;
+		}
+	} else {
+		brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
+		brcmf_configure_opensecurity(ifp);
+	}
+
+	brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
+
+	if (!mbss) {
+		chanspec = chandef_to_chanspec(&cfg->d11inf,
+					       &settings->chandef);
+		err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+		if (err < 0) {
+			brcmf_err("Set Channel failed: chspec=%d, %d\n",
+				  chanspec, err);
+			goto exit;
+		}
+
+		if (is_11d != ifp->vif->is_11d) {
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+						    is_11d);
+			if (err < 0) {
+				brcmf_err("Regulatory Set Error, %d\n", err);
+				goto exit;
+			}
+		}
+		if (settings->beacon_interval) {
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
+						    settings->beacon_interval);
+			if (err < 0) {
+				brcmf_err("Beacon Interval Set Error, %d\n",
+					  err);
+				goto exit;
+			}
+		}
+		if (settings->dtim_period) {
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
+						    settings->dtim_period);
+			if (err < 0) {
+				brcmf_err("DTIM Interval Set Error, %d\n", err);
+				goto exit;
+			}
+		}
+
+		if (dev_role == NL80211_IFTYPE_AP) {
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+			if (err < 0) {
+				brcmf_err("BRCMF_C_DOWN error %d\n", err);
+				goto exit;
+			}
+			brcmf_fil_iovar_int_set(ifp, "apsta", 0);
+		}
+
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
+		if (err < 0) {
+			brcmf_err("SET INFRA error %d\n", err);
+			goto exit;
+		}
+	} else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
+		/* Multiple-BSS should use same 11d configuration */
+		err = -EINVAL;
+		goto exit;
+	}
+	if (dev_role == NL80211_IFTYPE_AP) {
+		if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
+			brcmf_fil_iovar_int_set(ifp, "mbss", 1);
+
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
+		if (err < 0) {
+			brcmf_err("setting AP mode failed %d\n", err);
+			goto exit;
+		}
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+		if (err < 0) {
+			brcmf_err("BRCMF_C_UP error (%d)\n", err);
+			goto exit;
+		}
+		/* On DOWN the firmware removes the WEP keys, reconfigure
+		 * them if they were set.
+		 */
+		brcmf_cfg80211_reconfigure_wep(ifp);
+
+		memset(&join_params, 0, sizeof(join_params));
+		/* join parameters starts with ssid */
+		memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+		/* create softap */
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+					     &join_params, sizeof(join_params));
+		if (err < 0) {
+			brcmf_err("SET SSID error (%d)\n", err);
+			goto exit;
+		}
+		brcmf_dbg(TRACE, "AP mode configuration complete\n");
+	} else {
+		err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
+						sizeof(ssid_le));
+		if (err < 0) {
+			brcmf_err("setting ssid failed %d\n", err);
+			goto exit;
+		}
+		bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+		bss_enable.enable = cpu_to_le32(1);
+		err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+					       sizeof(bss_enable));
+		if (err < 0) {
+			brcmf_err("bss_enable config failed %d\n", err);
+			goto exit;
+		}
+
+		brcmf_dbg(TRACE, "GO mode configuration complete\n");
+	}
+	set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+	brcmf_net_setcarrier(ifp, true);
+
+exit:
+	if ((err) && (!mbss)) {
+		brcmf_set_mpc(ifp, 1);
+		brcmf_configure_arp_offload(ifp, true);
+	}
+	return err;
+}
+
+static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+	struct brcmf_fil_bss_enable_le bss_enable;
+	struct brcmf_join_params join_params;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) {
+		/* Due to most likely deauths outstanding we sleep */
+		/* first to make sure they get processed by fw. */
+		msleep(400);
+
+		if (ifp->vif->mbss) {
+			err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+			return err;
+		}
+
+		memset(&join_params, 0, sizeof(join_params));
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+					     &join_params, sizeof(join_params));
+		if (err < 0)
+			brcmf_err("SET SSID error (%d)\n", err);
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+		if (err < 0)
+			brcmf_err("BRCMF_C_DOWN error %d\n", err);
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
+		if (err < 0)
+			brcmf_err("setting AP mode failed %d\n", err);
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
+			brcmf_fil_iovar_int_set(ifp, "mbss", 0);
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+					    ifp->vif->is_11d);
+		if (err < 0)
+			brcmf_err("restoring REGULATORY setting failed %d\n",
+				  err);
+		/* Bring device back up so it can be used again */
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+		if (err < 0)
+			brcmf_err("BRCMF_C_UP error %d\n", err);
+	} else {
+		bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+		bss_enable.enable = cpu_to_le32(0);
+		err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+					       sizeof(bss_enable));
+		if (err < 0)
+			brcmf_err("bss_enable config failed %d\n", err);
+	}
+	brcmf_set_mpc(ifp, 1);
+	brcmf_configure_arp_offload(ifp, true);
+	clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+	brcmf_net_setcarrier(ifp, false);
+
+	return err;
+}
+
+static s32
+brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
+			     struct cfg80211_beacon_data *info)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
+
+	return err;
+}
+
+static int
+brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
+			   struct station_del_parameters *params)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_scb_val_le scbval;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+
+	if (!params->mac)
+		return -EFAULT;
+
+	brcmf_dbg(TRACE, "Enter %pM\n", params->mac);
+
+	if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+		ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+	if (!check_vif_up(ifp->vif))
+		return -EIO;
+
+	memcpy(&scbval.ea, params->mac, ETH_ALEN);
+	scbval.val = cpu_to_le32(params->reason_code);
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+				     &scbval, sizeof(scbval));
+	if (err)
+		brcmf_err("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
+
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static int
+brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev,
+			      const u8 *mac, struct station_parameters *params)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter, MAC %pM, mask 0x%04x set 0x%04x\n", mac,
+		  params->sta_flags_mask, params->sta_flags_set);
+
+	/* Ignore all 00 MAC */
+	if (is_zero_ether_addr(mac))
+		return 0;
+
+	if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+		return 0;
+
+	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_AUTHORIZE,
+					     (void *)mac, ETH_ALEN);
+	else
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_DEAUTHORIZE,
+					     (void *)mac, ETH_ALEN);
+	if (err < 0)
+		brcmf_err("Setting SCB (de-)authorize failed, %d\n", err);
+
+	return err;
+}
+
+static void
+brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+				   struct wireless_dev *wdev,
+				   u16 frame_type, bool reg)
+{
+	struct brcmf_cfg80211_vif *vif;
+	u16 mgmt_type;
+
+	brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
+
+	mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+	if (reg)
+		vif->mgmt_rx_reg |= BIT(mgmt_type);
+	else
+		vif->mgmt_rx_reg &= ~BIT(mgmt_type);
+}
+
+
+static int
+brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+		       struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct ieee80211_channel *chan = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
+	const struct ieee80211_mgmt *mgmt;
+	struct brcmf_cfg80211_vif *vif;
+	s32 err = 0;
+	s32 ie_offset;
+	s32 ie_len;
+	struct brcmf_fil_action_frame_le *action_frame;
+	struct brcmf_fil_af_params_le *af_params;
+	bool ack;
+	s32 chan_nr;
+	u32 freq;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	*cookie = 0;
+
+	mgmt = (const struct ieee80211_mgmt *)buf;
+
+	if (!ieee80211_is_mgmt(mgmt->frame_control)) {
+		brcmf_err("Driver only allows MGMT packet type\n");
+		return -EPERM;
+	}
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+	if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+		/* Right now the only reason to get a probe response */
+		/* is for p2p listen response or for p2p GO from     */
+		/* wpa_supplicant. Unfortunately the probe is send   */
+		/* on primary ndev, while dongle wants it on the p2p */
+		/* vif. Since this is only reason for a probe        */
+		/* response to be sent, the vif is taken from cfg.   */
+		/* If ever desired to send proberesp for non p2p     */
+		/* response then data should be checked for          */
+		/* "DIRECT-". Note in future supplicant will take    */
+		/* dedicated p2p wdev to do this and then this 'hack'*/
+		/* is not needed anymore.                            */
+		ie_offset =  DOT11_MGMT_HDR_LEN +
+			     DOT11_BCN_PRB_FIXED_LEN;
+		ie_len = len - ie_offset;
+		if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
+			vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+		err = brcmf_vif_set_mgmt_ie(vif,
+					    BRCMF_VNDR_IE_PRBRSP_FLAG,
+					    &buf[ie_offset],
+					    ie_len);
+		cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+					GFP_KERNEL);
+	} else if (ieee80211_is_action(mgmt->frame_control)) {
+		if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
+			brcmf_err("invalid action frame length\n");
+			err = -EINVAL;
+			goto exit;
+		}
+		af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+		if (af_params == NULL) {
+			brcmf_err("unable to allocate frame\n");
+			err = -ENOMEM;
+			goto exit;
+		}
+		action_frame = &af_params->action_frame;
+		/* Add the packet Id */
+		action_frame->packet_id = cpu_to_le32(*cookie);
+		/* Add BSSID */
+		memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN);
+		memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+		/* Add the length exepted for 802.11 header  */
+		action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+		/* Add the channel. Use the one specified as parameter if any or
+		 * the current one (got from the firmware) otherwise
+		 */
+		if (chan)
+			freq = chan->center_freq;
+		else
+			brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
+					      &freq);
+		chan_nr = ieee80211_frequency_to_channel(freq);
+		af_params->channel = cpu_to_le32(chan_nr);
+
+		memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
+		       le16_to_cpu(action_frame->len));
+
+		brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
+			  *cookie, le16_to_cpu(action_frame->len), freq);
+
+		ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
+						  af_params);
+
+		cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+					GFP_KERNEL);
+		kfree(af_params);
+	} else {
+		brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control);
+		brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len);
+	}
+
+exit:
+	return err;
+}
+
+
+static int
+brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+					struct wireless_dev *wdev,
+					u64 cookie)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_cfg80211_vif *vif;
+	int err = 0;
+
+	brcmf_dbg(TRACE, "Enter p2p listen cancel\n");
+
+	vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	if (vif == NULL) {
+		brcmf_err("No p2p device available for probe response\n");
+		err = -ENODEV;
+		goto exit;
+	}
+	brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+exit:
+	return err;
+}
+
+static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy,
+					   struct wireless_dev *wdev,
+					   enum nl80211_crit_proto_id proto,
+					   u16 duration)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_cfg80211_vif *vif;
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+	/* only DHCP support for now */
+	if (proto != NL80211_CRIT_PROTO_DHCP)
+		return -EINVAL;
+
+	/* suppress and abort scanning */
+	set_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
+	brcmf_abort_scanning(cfg);
+
+	return brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_DISABLED, duration);
+}
+
+static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
+					   struct wireless_dev *wdev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_cfg80211_vif *vif;
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+	brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
+	clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
+}
+
+static s32
+brcmf_notify_tdls_peer_event(struct brcmf_if *ifp,
+			     const struct brcmf_event_msg *e, void *data)
+{
+	switch (e->reason) {
+	case BRCMF_E_REASON_TDLS_PEER_DISCOVERED:
+		brcmf_dbg(TRACE, "TDLS Peer Discovered\n");
+		break;
+	case BRCMF_E_REASON_TDLS_PEER_CONNECTED:
+		brcmf_dbg(TRACE, "TDLS Peer Connected\n");
+		brcmf_proto_add_tdls_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
+		break;
+	case BRCMF_E_REASON_TDLS_PEER_DISCONNECTED:
+		brcmf_dbg(TRACE, "TDLS Peer Disconnected\n");
+		brcmf_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
+		break;
+	}
+
+	return 0;
+}
+
+static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
+{
+	int ret;
+
+	switch (oper) {
+	case NL80211_TDLS_DISCOVERY_REQ:
+		ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY;
+		break;
+	case NL80211_TDLS_SETUP:
+		ret = BRCMF_TDLS_MANUAL_EP_CREATE;
+		break;
+	case NL80211_TDLS_TEARDOWN:
+		ret = BRCMF_TDLS_MANUAL_EP_DELETE;
+		break;
+	default:
+		brcmf_err("unsupported operation: %d\n", oper);
+		ret = -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
+				    struct net_device *ndev, const u8 *peer,
+				    enum nl80211_tdls_operation oper)
+{
+	struct brcmf_if *ifp;
+	struct brcmf_tdls_iovar_le info;
+	int ret = 0;
+
+	ret = brcmf_convert_nl80211_tdls_oper(oper);
+	if (ret < 0)
+		return ret;
+
+	ifp = netdev_priv(ndev);
+	memset(&info, 0, sizeof(info));
+	info.mode = (u8)ret;
+	if (peer)
+		memcpy(info.ea, peer, ETH_ALEN);
+
+	ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
+				       &info, sizeof(info));
+	if (ret < 0)
+		brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
+
+	return ret;
+}
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+	.add_virtual_intf = brcmf_cfg80211_add_iface,
+	.del_virtual_intf = brcmf_cfg80211_del_iface,
+	.change_virtual_intf = brcmf_cfg80211_change_iface,
+	.scan = brcmf_cfg80211_scan,
+	.set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
+	.join_ibss = brcmf_cfg80211_join_ibss,
+	.leave_ibss = brcmf_cfg80211_leave_ibss,
+	.get_station = brcmf_cfg80211_get_station,
+	.dump_station = brcmf_cfg80211_dump_station,
+	.set_tx_power = brcmf_cfg80211_set_tx_power,
+	.get_tx_power = brcmf_cfg80211_get_tx_power,
+	.add_key = brcmf_cfg80211_add_key,
+	.del_key = brcmf_cfg80211_del_key,
+	.get_key = brcmf_cfg80211_get_key,
+	.set_default_key = brcmf_cfg80211_config_default_key,
+	.set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
+	.set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
+	.connect = brcmf_cfg80211_connect,
+	.disconnect = brcmf_cfg80211_disconnect,
+	.suspend = brcmf_cfg80211_suspend,
+	.resume = brcmf_cfg80211_resume,
+	.set_pmksa = brcmf_cfg80211_set_pmksa,
+	.del_pmksa = brcmf_cfg80211_del_pmksa,
+	.flush_pmksa = brcmf_cfg80211_flush_pmksa,
+	.start_ap = brcmf_cfg80211_start_ap,
+	.stop_ap = brcmf_cfg80211_stop_ap,
+	.change_beacon = brcmf_cfg80211_change_beacon,
+	.del_station = brcmf_cfg80211_del_station,
+	.change_station = brcmf_cfg80211_change_station,
+	.sched_scan_start = brcmf_cfg80211_sched_scan_start,
+	.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+	.mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
+	.mgmt_tx = brcmf_cfg80211_mgmt_tx,
+	.remain_on_channel = brcmf_p2p_remain_on_channel,
+	.cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
+	.start_p2p_device = brcmf_p2p_start_device,
+	.stop_p2p_device = brcmf_p2p_stop_device,
+	.crit_proto_start = brcmf_cfg80211_crit_proto_start,
+	.crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
+	.tdls_oper = brcmf_cfg80211_tdls_oper,
+};
+
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+					   enum nl80211_iftype type,
+					   bool pm_block)
+{
+	struct brcmf_cfg80211_vif *vif_walk;
+	struct brcmf_cfg80211_vif *vif;
+	bool mbss;
+
+	brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
+		  sizeof(*vif));
+	vif = kzalloc(sizeof(*vif), GFP_KERNEL);
+	if (!vif)
+		return ERR_PTR(-ENOMEM);
+
+	vif->wdev.wiphy = cfg->wiphy;
+	vif->wdev.iftype = type;
+
+	vif->pm_block = pm_block;
+	vif->roam_off = -1;
+
+	brcmf_init_prof(&vif->profile);
+
+	if (type == NL80211_IFTYPE_AP) {
+		mbss = false;
+		list_for_each_entry(vif_walk, &cfg->vif_list, list) {
+			if (vif_walk->wdev.iftype == NL80211_IFTYPE_AP) {
+				mbss = true;
+				break;
+			}
+		}
+		vif->mbss = mbss;
+	}
+
+	list_add_tail(&vif->list, &cfg->vif_list);
+	return vif;
+}
+
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+{
+	list_del(&vif->list);
+	kfree(vif);
+}
+
+void brcmf_cfg80211_free_netdev(struct net_device *ndev)
+{
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
+
+	ifp = netdev_priv(ndev);
+	vif = ifp->vif;
+
+	if (vif)
+		brcmf_free_vif(vif);
+	free_netdev(ndev);
+}
+
+static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
+{
+	u32 event = e->event_code;
+	u32 status = e->status;
+
+	if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
+		brcmf_dbg(CONN, "Processing set ssid\n");
+		return true;
+	}
+
+	return false;
+}
+
+static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
+{
+	u32 event = e->event_code;
+	u16 flags = e->flags;
+
+	if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
+	    (event == BRCMF_E_DISASSOC_IND) ||
+	    ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
+		brcmf_dbg(CONN, "Processing link down\n");
+		return true;
+	}
+	return false;
+}
+
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
+			       const struct brcmf_event_msg *e)
+{
+	u32 event = e->event_code;
+	u32 status = e->status;
+
+	if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
+		brcmf_dbg(CONN, "Processing Link %s & no network found\n",
+			  e->flags & BRCMF_EVENT_MSG_LINK ? "up" : "down");
+		return true;
+	}
+
+	if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) {
+		brcmf_dbg(CONN, "Processing connecting & no network found\n");
+		return true;
+	}
+
+	return false;
+}
+
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+
+	kfree(conn_info->req_ie);
+	conn_info->req_ie = NULL;
+	conn_info->req_ie_len = 0;
+	kfree(conn_info->resp_ie);
+	conn_info->resp_ie = NULL;
+	conn_info->resp_ie_len = 0;
+}
+
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+			       struct brcmf_if *ifp)
+{
+	struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
+	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+	u32 req_len;
+	u32 resp_len;
+	s32 err = 0;
+
+	brcmf_clear_assoc_ies(cfg);
+
+	err = brcmf_fil_iovar_data_get(ifp, "assoc_info",
+				       cfg->extra_buf, WL_ASSOC_INFO_MAX);
+	if (err) {
+		brcmf_err("could not get assoc info (%d)\n", err);
+		return err;
+	}
+	assoc_info =
+		(struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
+	req_len = le32_to_cpu(assoc_info->req_len);
+	resp_len = le32_to_cpu(assoc_info->resp_len);
+	if (req_len) {
+		err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
+					       cfg->extra_buf,
+					       WL_ASSOC_INFO_MAX);
+		if (err) {
+			brcmf_err("could not get assoc req (%d)\n", err);
+			return err;
+		}
+		conn_info->req_ie_len = req_len;
+		conn_info->req_ie =
+		    kmemdup(cfg->extra_buf, conn_info->req_ie_len,
+			    GFP_KERNEL);
+	} else {
+		conn_info->req_ie_len = 0;
+		conn_info->req_ie = NULL;
+	}
+	if (resp_len) {
+		err = brcmf_fil_iovar_data_get(ifp, "assoc_resp_ies",
+					       cfg->extra_buf,
+					       WL_ASSOC_INFO_MAX);
+		if (err) {
+			brcmf_err("could not get assoc resp (%d)\n", err);
+			return err;
+		}
+		conn_info->resp_ie_len = resp_len;
+		conn_info->resp_ie =
+		    kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
+			    GFP_KERNEL);
+	} else {
+		conn_info->resp_ie_len = 0;
+		conn_info->resp_ie = NULL;
+	}
+	brcmf_dbg(CONN, "req len (%d) resp len (%d)\n",
+		  conn_info->req_ie_len, conn_info->resp_ie_len);
+
+	return err;
+}
+
+static s32
+brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
+		       struct net_device *ndev,
+		       const struct brcmf_event_msg *e)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+	struct wiphy *wiphy = cfg_to_wiphy(cfg);
+	struct ieee80211_channel *notify_channel = NULL;
+	struct ieee80211_supported_band *band;
+	struct brcmf_bss_info_le *bi;
+	struct brcmu_chan ch;
+	u32 freq;
+	s32 err = 0;
+	u8 *buf;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	brcmf_get_assoc_ies(cfg, ifp);
+	memcpy(profile->bssid, e->addr, ETH_ALEN);
+	brcmf_update_bss_info(cfg, ifp);
+
+	buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+	if (buf == NULL) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	/* data sent to dongle has to be little endian */
+	*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+				     buf, WL_BSS_INFO_MAX);
+
+	if (err)
+		goto done;
+
+	bi = (struct brcmf_bss_info_le *)(buf + 4);
+	ch.chspec = le16_to_cpu(bi->chanspec);
+	cfg->d11inf.decchspec(&ch);
+
+	if (ch.band == BRCMU_CHAN_BAND_2G)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+	freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+
+done:
+	kfree(buf);
+	cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
+			conn_info->req_ie, conn_info->req_ie_len,
+			conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+	brcmf_dbg(CONN, "Report roaming result\n");
+
+	set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+	brcmf_dbg(TRACE, "Exit\n");
+	return err;
+}
+
+static s32
+brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
+		       struct net_device *ndev, const struct brcmf_event_msg *e,
+		       bool completed)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+			       &ifp->vif->sme_state)) {
+		if (completed) {
+			brcmf_get_assoc_ies(cfg, ifp);
+			memcpy(profile->bssid, e->addr, ETH_ALEN);
+			brcmf_update_bss_info(cfg, ifp);
+			set_bit(BRCMF_VIF_STATUS_CONNECTED,
+				&ifp->vif->sme_state);
+		}
+		cfg80211_connect_result(ndev,
+					(u8 *)profile->bssid,
+					conn_info->req_ie,
+					conn_info->req_ie_len,
+					conn_info->resp_ie,
+					conn_info->resp_ie_len,
+					completed ? WLAN_STATUS_SUCCESS :
+						    WLAN_STATUS_AUTH_TIMEOUT,
+					GFP_KERNEL);
+		brcmf_dbg(CONN, "Report connect result - connection %s\n",
+			  completed ? "succeeded" : "failed");
+	}
+	brcmf_dbg(TRACE, "Exit\n");
+	return 0;
+}
+
+static s32
+brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
+			       struct net_device *ndev,
+			       const struct brcmf_event_msg *e, void *data)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	static int generation;
+	u32 event = e->event_code;
+	u32 reason = e->reason;
+	struct station_info sinfo;
+
+	brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
+	if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
+	    ndev != cfg_to_ndev(cfg)) {
+		brcmf_dbg(CONN, "AP mode link down\n");
+		complete(&cfg->vif_disabled);
+		if (ifp->vif->mbss)
+			brcmf_remove_interface(ifp);
+		return 0;
+	}
+
+	if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
+	    (reason == BRCMF_E_STATUS_SUCCESS)) {
+		memset(&sinfo, 0, sizeof(sinfo));
+		if (!data) {
+			brcmf_err("No IEs present in ASSOC/REASSOC_IND");
+			return -EINVAL;
+		}
+		sinfo.assoc_req_ies = data;
+		sinfo.assoc_req_ies_len = e->datalen;
+		generation++;
+		sinfo.generation = generation;
+		cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL);
+	} else if ((event == BRCMF_E_DISASSOC_IND) ||
+		   (event == BRCMF_E_DEAUTH_IND) ||
+		   (event == BRCMF_E_DEAUTH)) {
+		cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
+	}
+	return 0;
+}
+
+static s32
+brcmf_notify_connect_status(struct brcmf_if *ifp,
+			    const struct brcmf_event_msg *e, void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct net_device *ndev = ifp->ndev;
+	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+	struct ieee80211_channel *chan;
+	s32 err = 0;
+
+	if ((e->event_code == BRCMF_E_DEAUTH) ||
+	    (e->event_code == BRCMF_E_DEAUTH_IND) ||
+	    (e->event_code == BRCMF_E_DISASSOC_IND) ||
+	    ((e->event_code == BRCMF_E_LINK) && (!e->flags))) {
+		brcmf_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr);
+	}
+
+	if (brcmf_is_apmode(ifp->vif)) {
+		err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
+	} else if (brcmf_is_linkup(e)) {
+		brcmf_dbg(CONN, "Linkup\n");
+		if (brcmf_is_ibssmode(ifp->vif)) {
+			chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
+			memcpy(profile->bssid, e->addr, ETH_ALEN);
+			wl_inform_ibss(cfg, ndev, e->addr);
+			cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
+			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+				  &ifp->vif->sme_state);
+			set_bit(BRCMF_VIF_STATUS_CONNECTED,
+				&ifp->vif->sme_state);
+		} else
+			brcmf_bss_connect_done(cfg, ndev, e, true);
+		brcmf_net_setcarrier(ifp, true);
+	} else if (brcmf_is_linkdown(e)) {
+		brcmf_dbg(CONN, "Linkdown\n");
+		if (!brcmf_is_ibssmode(ifp->vif)) {
+			brcmf_bss_connect_done(cfg, ndev, e, false);
+		}
+		brcmf_link_down(ifp->vif, brcmf_map_fw_linkdown_reason(e));
+		brcmf_init_prof(ndev_to_prof(ndev));
+		if (ndev != cfg_to_ndev(cfg))
+			complete(&cfg->vif_disabled);
+		brcmf_net_setcarrier(ifp, false);
+	} else if (brcmf_is_nonetwork(cfg, e)) {
+		if (brcmf_is_ibssmode(ifp->vif))
+			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+				  &ifp->vif->sme_state);
+		else
+			brcmf_bss_connect_done(cfg, ndev, e, false);
+	}
+
+	return err;
+}
+
+static s32
+brcmf_notify_roaming_status(struct brcmf_if *ifp,
+			    const struct brcmf_event_msg *e, void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	u32 event = e->event_code;
+	u32 status = e->status;
+
+	if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
+		if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
+			brcmf_bss_roaming_done(cfg, ifp->ndev, e);
+		else
+			brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
+	}
+
+	return 0;
+}
+
+static s32
+brcmf_notify_mic_status(struct brcmf_if *ifp,
+			const struct brcmf_event_msg *e, void *data)
+{
+	u16 flags = e->flags;
+	enum nl80211_key_type key_type;
+
+	if (flags & BRCMF_EVENT_MSG_GROUP)
+		key_type = NL80211_KEYTYPE_GROUP;
+	else
+		key_type = NL80211_KEYTYPE_PAIRWISE;
+
+	cfg80211_michael_mic_failure(ifp->ndev, (u8 *)&e->addr, key_type, -1,
+				     NULL, GFP_KERNEL);
+
+	return 0;
+}
+
+static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
+				  const struct brcmf_event_msg *e, void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data;
+	struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+	struct brcmf_cfg80211_vif *vif;
+
+	brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n",
+		  ifevent->action, ifevent->flags, ifevent->ifidx,
+		  ifevent->bssidx);
+
+	mutex_lock(&event->vif_event_lock);
+	event->action = ifevent->action;
+	vif = event->vif;
+
+	switch (ifevent->action) {
+	case BRCMF_E_IF_ADD:
+		/* waiting process may have timed out */
+		if (!cfg->vif_event.vif) {
+			mutex_unlock(&event->vif_event_lock);
+			return -EBADF;
+		}
+
+		ifp->vif = vif;
+		vif->ifp = ifp;
+		if (ifp->ndev) {
+			vif->wdev.netdev = ifp->ndev;
+			ifp->ndev->ieee80211_ptr = &vif->wdev;
+			SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
+		}
+		mutex_unlock(&event->vif_event_lock);
+		wake_up(&event->vif_wq);
+		return 0;
+
+	case BRCMF_E_IF_DEL:
+		mutex_unlock(&event->vif_event_lock);
+		/* event may not be upon user request */
+		if (brcmf_cfg80211_vif_event_armed(cfg))
+			wake_up(&event->vif_wq);
+		return 0;
+
+	case BRCMF_E_IF_CHANGE:
+		mutex_unlock(&event->vif_event_lock);
+		wake_up(&event->vif_wq);
+		return 0;
+
+	default:
+		mutex_unlock(&event->vif_event_lock);
+		break;
+	}
+	return -EINVAL;
+}
+
+static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
+{
+	conf->frag_threshold = (u32)-1;
+	conf->rts_threshold = (u32)-1;
+	conf->retry_short = (u32)-1;
+	conf->retry_long = (u32)-1;
+	conf->tx_power = -1;
+}
+
+static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
+{
+	brcmf_fweh_register(cfg->pub, BRCMF_E_LINK,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_DISASSOC_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ASSOC_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_REASSOC_IND,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ROAM,
+			    brcmf_notify_roaming_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_MIC_ERROR,
+			    brcmf_notify_mic_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_SET_SSID,
+			    brcmf_notify_connect_status);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
+			    brcmf_notify_sched_scan_results);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_IF,
+			    brcmf_notify_vif_event);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_PROBEREQ_MSG,
+			    brcmf_p2p_notify_rx_mgmt_p2p_probereq);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_DISC_LISTEN_COMPLETE,
+			    brcmf_p2p_notify_listen_complete);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_RX,
+			    brcmf_p2p_notify_action_frame_rx);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_COMPLETE,
+			    brcmf_p2p_notify_action_tx_complete);
+	brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE,
+			    brcmf_p2p_notify_action_tx_complete);
+}
+
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+	kfree(cfg->conf);
+	cfg->conf = NULL;
+	kfree(cfg->escan_ioctl_buf);
+	cfg->escan_ioctl_buf = NULL;
+	kfree(cfg->extra_buf);
+	cfg->extra_buf = NULL;
+	kfree(cfg->pmk_list);
+	cfg->pmk_list = NULL;
+}
+
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+	cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+	if (!cfg->conf)
+		goto init_priv_mem_out;
+	cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+	if (!cfg->escan_ioctl_buf)
+		goto init_priv_mem_out;
+	cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (!cfg->extra_buf)
+		goto init_priv_mem_out;
+	cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+	if (!cfg->pmk_list)
+		goto init_priv_mem_out;
+
+	return 0;
+
+init_priv_mem_out:
+	brcmf_deinit_priv_mem(cfg);
+
+	return -ENOMEM;
+}
+
+static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
+{
+	s32 err = 0;
+
+	cfg->scan_request = NULL;
+	cfg->pwr_save = true;
+	cfg->active_scan = true;	/* we do active scan per default */
+	cfg->dongle_up = false;		/* dongle is not up yet */
+	err = brcmf_init_priv_mem(cfg);
+	if (err)
+		return err;
+	brcmf_register_event_handlers(cfg);
+	mutex_init(&cfg->usr_sync);
+	brcmf_init_escan(cfg);
+	brcmf_init_conf(cfg->conf);
+	init_completion(&cfg->vif_disabled);
+	return err;
+}
+
+static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
+{
+	cfg->dongle_up = false;	/* dongle down */
+	brcmf_abort_scanning(cfg);
+	brcmf_deinit_priv_mem(cfg);
+}
+
+static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
+{
+	init_waitqueue_head(&event->vif_wq);
+	mutex_init(&event->vif_event_lock);
+}
+
+static s32
+brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
+{
+	s32 err = 0;
+	__le32 roamtrigger[2];
+	__le32 roam_delta[2];
+
+	/*
+	 * Setup timeout if Beacons are lost and roam is
+	 * off to report link down
+	 */
+	if (brcmf_roamoff) {
+		err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
+		if (err) {
+			brcmf_err("bcn_timeout error (%d)\n", err);
+			goto dongle_rom_out;
+		}
+	}
+
+	/*
+	 * Enable/Disable built-in roaming to allow supplicant
+	 * to take care of roaming
+	 */
+	brcmf_dbg(INFO, "Internal Roaming = %s\n",
+		  brcmf_roamoff ? "Off" : "On");
+	err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
+	if (err) {
+		brcmf_err("roam_off error (%d)\n", err);
+		goto dongle_rom_out;
+	}
+
+	roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL);
+	roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
+				     (void *)roamtrigger, sizeof(roamtrigger));
+	if (err) {
+		brcmf_err("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
+		goto dongle_rom_out;
+	}
+
+	roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
+	roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
+				     (void *)roam_delta, sizeof(roam_delta));
+	if (err) {
+		brcmf_err("WLC_SET_ROAM_DELTA error (%d)\n", err);
+		goto dongle_rom_out;
+	}
+
+dongle_rom_out:
+	return err;
+}
+
+static s32
+brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time,
+		      s32 scan_unassoc_time, s32 scan_passive_time)
+{
+	s32 err = 0;
+
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+				    scan_assoc_time);
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			brcmf_dbg(INFO, "Scan assoc time is not supported\n");
+		else
+			brcmf_err("Scan assoc time error (%d)\n", err);
+		goto dongle_scantime_out;
+	}
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+				    scan_unassoc_time);
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			brcmf_dbg(INFO, "Scan unassoc time is not supported\n");
+		else
+			brcmf_err("Scan unassoc time error (%d)\n", err);
+		goto dongle_scantime_out;
+	}
+
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME,
+				    scan_passive_time);
+	if (err) {
+		if (err == -EOPNOTSUPP)
+			brcmf_dbg(INFO, "Scan passive time is not supported\n");
+		else
+			brcmf_err("Scan passive time error (%d)\n", err);
+		goto dongle_scantime_out;
+	}
+
+dongle_scantime_out:
+	return err;
+}
+
+static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
+					   struct brcmu_chan *ch)
+{
+	u32 ht40_flag;
+
+	ht40_flag = channel->flags & IEEE80211_CHAN_NO_HT40;
+	if (ch->sb == BRCMU_CHAN_SB_U) {
+		if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+			channel->flags &= ~IEEE80211_CHAN_NO_HT40;
+		channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
+	} else {
+		/* It should be one of
+		 * IEEE80211_CHAN_NO_HT40 or
+		 * IEEE80211_CHAN_NO_HT40PLUS
+		 */
+		channel->flags &= ~IEEE80211_CHAN_NO_HT40;
+		if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+			channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
+	}
+}
+
+static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
+				    u32 bw_cap[])
+{
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel *channel;
+	struct wiphy *wiphy;
+	struct brcmf_chanspec_list *list;
+	struct brcmu_chan ch;
+	int err;
+	u8 *pbuf;
+	u32 i, j;
+	u32 total;
+	u32 chaninfo;
+	u32 index;
+
+	pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+
+	if (pbuf == NULL)
+		return -ENOMEM;
+
+	list = (struct brcmf_chanspec_list *)pbuf;
+
+	err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
+				       BRCMF_DCMD_MEDLEN);
+	if (err) {
+		brcmf_err("get chanspecs error (%d)\n", err);
+		goto fail_pbuf;
+	}
+
+	wiphy = cfg_to_wiphy(cfg);
+	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	if (band)
+		for (i = 0; i < band->n_channels; i++)
+			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+	band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (band)
+		for (i = 0; i < band->n_channels; i++)
+			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+
+	total = le32_to_cpu(list->count);
+	for (i = 0; i < total; i++) {
+		ch.chspec = (u16)le32_to_cpu(list->element[i]);
+		cfg->d11inf.decchspec(&ch);
+
+		if (ch.band == BRCMU_CHAN_BAND_2G) {
+			band = wiphy->bands[IEEE80211_BAND_2GHZ];
+		} else if (ch.band == BRCMU_CHAN_BAND_5G) {
+			band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		} else {
+			brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
+			continue;
+		}
+		if (!band)
+			continue;
+		if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
+		    ch.bw == BRCMU_CHAN_BW_40)
+			continue;
+		if (!(bw_cap[band->band] & WLC_BW_80MHZ_BIT) &&
+		    ch.bw == BRCMU_CHAN_BW_80)
+			continue;
+
+		channel = band->channels;
+		index = band->n_channels;
+		for (j = 0; j < band->n_channels; j++) {
+			if (channel[j].hw_value == ch.chnum) {
+				index = j;
+				break;
+			}
+		}
+		channel[index].center_freq =
+			ieee80211_channel_to_frequency(ch.chnum, band->band);
+		channel[index].hw_value = ch.chnum;
+
+		/* assuming the chanspecs order is HT20,
+		 * HT40 upper, HT40 lower, and VHT80.
+		 */
+		if (ch.bw == BRCMU_CHAN_BW_80) {
+			channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
+		} else if (ch.bw == BRCMU_CHAN_BW_40) {
+			brcmf_update_bw40_channel_flag(&channel[index], &ch);
+		} else {
+			/* enable the channel and disable other bandwidths
+			 * for now as mentioned order assure they are enabled
+			 * for subsequent chanspecs.
+			 */
+			channel[index].flags = IEEE80211_CHAN_NO_HT40 |
+					       IEEE80211_CHAN_NO_80MHZ;
+			ch.bw = BRCMU_CHAN_BW_20;
+			cfg->d11inf.encchspec(&ch);
+			chaninfo = ch.chspec;
+			err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info",
+						       &chaninfo);
+			if (!err) {
+				if (chaninfo & WL_CHAN_RADAR)
+					channel[index].flags |=
+						(IEEE80211_CHAN_RADAR |
+						 IEEE80211_CHAN_NO_IR);
+				if (chaninfo & WL_CHAN_PASSIVE)
+					channel[index].flags |=
+						IEEE80211_CHAN_NO_IR;
+			}
+		}
+	}
+
+fail_pbuf:
+	kfree(pbuf);
+	return err;
+}
+
+static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	struct ieee80211_supported_band *band;
+	struct brcmf_fil_bwcap_le band_bwcap;
+	struct brcmf_chanspec_list *list;
+	u8 *pbuf;
+	u32 val;
+	int err;
+	struct brcmu_chan ch;
+	u32 num_chan;
+	int i, j;
+
+	/* verify support for bw_cap command */
+	val = WLC_BAND_5G;
+	err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+
+	if (!err) {
+		/* only set 2G bandwidth using bw_cap command */
+		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
+		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
+		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
+					       sizeof(band_bwcap));
+	} else {
+		brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
+		val = WLC_N_BW_40ALL;
+		err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
+	}
+
+	if (!err) {
+		/* update channel info in 2G band */
+		pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+
+		if (pbuf == NULL)
+			return -ENOMEM;
+
+		ch.band = BRCMU_CHAN_BAND_2G;
+		ch.bw = BRCMU_CHAN_BW_40;
+		ch.sb = BRCMU_CHAN_SB_NONE;
+		ch.chnum = 0;
+		cfg->d11inf.encchspec(&ch);
+
+		/* pass encoded chanspec in query */
+		*(__le16 *)pbuf = cpu_to_le16(ch.chspec);
+
+		err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
+					       BRCMF_DCMD_MEDLEN);
+		if (err) {
+			brcmf_err("get chanspecs error (%d)\n", err);
+			kfree(pbuf);
+			return err;
+		}
+
+		band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+		list = (struct brcmf_chanspec_list *)pbuf;
+		num_chan = le32_to_cpu(list->count);
+		for (i = 0; i < num_chan; i++) {
+			ch.chspec = (u16)le32_to_cpu(list->element[i]);
+			cfg->d11inf.decchspec(&ch);
+			if (WARN_ON(ch.band != BRCMU_CHAN_BAND_2G))
+				continue;
+			if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40))
+				continue;
+			for (j = 0; j < band->n_channels; j++) {
+				if (band->channels[j].hw_value == ch.chnum)
+					break;
+			}
+			if (WARN_ON(j == band->n_channels))
+				continue;
+
+			brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
+		}
+		kfree(pbuf);
+	}
+	return err;
+}
+
+static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+{
+	u32 band, mimo_bwcap;
+	int err;
+
+	band = WLC_BAND_2G;
+	err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+	if (!err) {
+		bw_cap[IEEE80211_BAND_2GHZ] = band;
+		band = WLC_BAND_5G;
+		err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
+		if (!err) {
+			bw_cap[IEEE80211_BAND_5GHZ] = band;
+			return;
+		}
+		WARN_ON(1);
+		return;
+	}
+	brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
+	mimo_bwcap = 0;
+	err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
+	if (err)
+		/* assume 20MHz if firmware does not give a clue */
+		mimo_bwcap = WLC_N_BW_20ALL;
+
+	switch (mimo_bwcap) {
+	case WLC_N_BW_40ALL:
+		bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+		/* fall-thru */
+	case WLC_N_BW_20IN2G_40IN5G:
+		bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+		/* fall-thru */
+	case WLC_N_BW_20ALL:
+		bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+		bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+		break;
+	default:
+		brcmf_err("invalid mimo_bw_cap value\n");
+	}
+}
+
+static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
+				u32 bw_cap[2], u32 nchain)
+{
+	band->ht_cap.ht_supported = true;
+	if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+		band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+	band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+	band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+	memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
+	band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
+{
+	u16 mcs_map;
+	int i;
+
+	for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
+		mcs_map = (mcs_map << 2) | supp;
+
+	return cpu_to_le16(mcs_map);
+}
+
+static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
+				 u32 bw_cap[2], u32 nchain)
+{
+	__le16 mcs_map;
+
+	/* not allowed in 2.4G band */
+	if (band->band == IEEE80211_BAND_2GHZ)
+		return;
+
+	band->vht_cap.vht_supported = true;
+	/* 80MHz is mandatory */
+	band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
+	if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
+		band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+		band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
+	}
+	/* all support 256-QAM */
+	mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
+	band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
+	band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
+}
+
+static int brcmf_setup_wiphybands(struct wiphy *wiphy)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	u32 nmode = 0;
+	u32 vhtmode = 0;
+	u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
+	u32 rxchain;
+	u32 nchain;
+	int err;
+	s32 i;
+	struct ieee80211_supported_band *band;
+
+	(void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
+	err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
+	if (err) {
+		brcmf_err("nmode error (%d)\n", err);
+	} else {
+		brcmf_get_bwcap(ifp, bw_cap);
+	}
+	brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
+		  nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
+		  bw_cap[IEEE80211_BAND_5GHZ]);
+
+	err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
+	if (err) {
+		brcmf_err("rxchain error (%d)\n", err);
+		nchain = 1;
+	} else {
+		for (nchain = 0; rxchain; nchain++)
+			rxchain = rxchain & (rxchain - 1);
+	}
+	brcmf_dbg(INFO, "nchain=%d\n", nchain);
+
+	err = brcmf_construct_chaninfo(cfg, bw_cap);
+	if (err) {
+		brcmf_err("brcmf_construct_chaninfo failed (%d)\n", err);
+		return err;
+	}
+
+	wiphy = cfg_to_wiphy(cfg);
+	for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) {
+		band = wiphy->bands[i];
+		if (band == NULL)
+			continue;
+
+		if (nmode)
+			brcmf_update_ht_cap(band, bw_cap, nchain);
+		if (vhtmode)
+			brcmf_update_vht_cap(band, bw_cap, nchain);
+	}
+
+	return 0;
+}
+
+static const struct ieee80211_txrx_stypes
+brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		      BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		      BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		      BIT(IEEE80211_STYPE_AUTH >> 4) |
+		      BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		      BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	}
+};
+
+/**
+ * brcmf_setup_ifmodes() - determine interface modes and combinations.
+ *
+ * @wiphy: wiphy object.
+ * @ifp: interface object needed for feat module api.
+ *
+ * The interface modes and combinations are determined dynamically here
+ * based on firmware functionality.
+ *
+ * no p2p and no mbss:
+ *
+ *	#STA <= 1, #AP <= 1, channels = 1, 2 total
+ *
+ * no p2p and mbss:
+ *
+ *	#STA <= 1, #AP <= 1, channels = 1, 2 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, no mchan, and mbss:
+ *
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
+ *	#STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, mchan, and mbss:
+ *
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
+ *	#STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ */
+static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
+{
+	struct ieee80211_iface_combination *combo = NULL;
+	struct ieee80211_iface_limit *c0_limits = NULL;
+	struct ieee80211_iface_limit *p2p_limits = NULL;
+	struct ieee80211_iface_limit *mbss_limits = NULL;
+	bool mbss, p2p;
+	int i, c, n_combos;
+
+	mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
+	p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+
+	n_combos = 1 + !!p2p + !!mbss;
+	combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
+	if (!combo)
+		goto err;
+
+	c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+	if (!c0_limits)
+		goto err;
+
+	if (p2p) {
+		p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
+		if (!p2p_limits)
+			goto err;
+	}
+
+	if (mbss) {
+		mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+		if (!mbss_limits)
+			goto err;
+	}
+
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_ADHOC) |
+				 BIT(NL80211_IFTYPE_AP);
+
+	c = 0;
+	i = 0;
+	combo[c].num_different_channels = 1;
+	c0_limits[i].max = 1;
+	c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+	if (p2p) {
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+			combo[c].num_different_channels = 2;
+		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					  BIT(NL80211_IFTYPE_P2P_GO) |
+					  BIT(NL80211_IFTYPE_P2P_DEVICE);
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				       BIT(NL80211_IFTYPE_P2P_GO);
+	} else {
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+	}
+	combo[c].max_interfaces = i;
+	combo[c].n_limits = i;
+	combo[c].limits = c0_limits;
+
+	if (p2p) {
+		c++;
+		i = 0;
+		combo[c].num_different_channels = 1;
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		combo[c].max_interfaces = i;
+		combo[c].n_limits = i;
+		combo[c].limits = p2p_limits;
+	}
+
+	if (mbss) {
+		c++;
+		combo[c].beacon_int_infra_match = true;
+		combo[c].num_different_channels = 1;
+		mbss_limits[0].max = 4;
+		mbss_limits[0].types = BIT(NL80211_IFTYPE_AP);
+		combo[c].max_interfaces = 4;
+		combo[c].n_limits = 1;
+		combo[c].limits = mbss_limits;
+	}
+	wiphy->n_iface_combinations = n_combos;
+	wiphy->iface_combinations = combo;
+	return 0;
+
+err:
+	kfree(c0_limits);
+	kfree(p2p_limits);
+	kfree(mbss_limits);
+	kfree(combo);
+	return -ENOMEM;
+}
+
+static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
+{
+	/* scheduled scan settings */
+	wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
+	wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
+	wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+	wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+}
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support brcmf_wowlan_support = {
+	.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+	.n_patterns = BRCMF_WOWL_MAXPATTERNS,
+	.pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE,
+	.pattern_min_len = 1,
+	.max_pkt_offset = 1500,
+};
+#endif
+
+static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
+{
+#ifdef CONFIG_PM
+	/* wowl settings */
+	wiphy->wowlan = &brcmf_wowlan_support;
+#endif
+}
+
+static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	const struct ieee80211_iface_combination *combo;
+	struct ieee80211_supported_band *band;
+	u16 max_interfaces = 0;
+	__le32 bandlist[3];
+	u32 n_bands;
+	int err, i;
+
+	wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+	wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+	wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+
+	err = brcmf_setup_ifmodes(wiphy, ifp);
+	if (err)
+		return err;
+
+	for (i = 0, combo = wiphy->iface_combinations;
+	     i < wiphy->n_iface_combinations; i++, combo++) {
+		max_interfaces = max(max_interfaces, combo->max_interfaces);
+	}
+
+	for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses);
+	     i++) {
+		u8 *addr = drvr->addresses[i].addr;
+
+		memcpy(addr, drvr->mac, ETH_ALEN);
+		if (i) {
+			addr[0] |= BIT(1);
+			addr[ETH_ALEN - 1] ^= i;
+		}
+	}
+	wiphy->addresses = drvr->addresses;
+	wiphy->n_addresses = i;
+
+	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wiphy->cipher_suites = __wl_cipher_suites;
+	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+			WIPHY_FLAG_OFFCHAN_TX |
+			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+			WIPHY_FLAG_SUPPORTS_TDLS;
+	if (!brcmf_roamoff)
+		wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+	wiphy->mgmt_stypes = brcmf_txrx_stypes;
+	wiphy->max_remain_on_channel_duration = 5000;
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
+		brcmf_wiphy_pno_params(wiphy);
+
+	/* vendor commands/events support */
+	wiphy->vendor_commands = brcmf_vendor_cmds;
+	wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1;
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
+		brcmf_wiphy_wowl_params(wiphy);
+
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
+				     sizeof(bandlist));
+	if (err) {
+		brcmf_err("could not obtain band info: err=%d\n", err);
+		return err;
+	}
+	/* first entry in bandlist is number of bands */
+	n_bands = le32_to_cpu(bandlist[0]);
+	for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
+		if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
+			band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+				       GFP_KERNEL);
+			if (!band)
+				return -ENOMEM;
+
+			band->channels = kmemdup(&__wl_2ghz_channels,
+						 sizeof(__wl_2ghz_channels),
+						 GFP_KERNEL);
+			if (!band->channels) {
+				kfree(band);
+				return -ENOMEM;
+			}
+
+			band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
+			wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+		}
+		if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
+			band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
+				       GFP_KERNEL);
+			if (!band)
+				return -ENOMEM;
+
+			band->channels = kmemdup(&__wl_5ghz_channels,
+						 sizeof(__wl_5ghz_channels),
+						 GFP_KERNEL);
+			if (!band->channels) {
+				kfree(band);
+				return -ENOMEM;
+			}
+
+			band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
+			wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+		}
+	}
+	err = brcmf_setup_wiphybands(wiphy);
+	return err;
+}
+
+static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
+{
+	struct net_device *ndev;
+	struct wireless_dev *wdev;
+	struct brcmf_if *ifp;
+	s32 power_mode;
+	s32 err = 0;
+
+	if (cfg->dongle_up)
+		return err;
+
+	ndev = cfg_to_ndev(cfg);
+	wdev = ndev->ieee80211_ptr;
+	ifp = netdev_priv(ndev);
+
+	/* make sure RF is ready for work */
+	brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+
+	brcmf_dongle_scantime(ifp, WL_SCAN_CHANNEL_TIME,
+			      WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
+
+	power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, power_mode);
+	if (err)
+		goto default_conf_out;
+	brcmf_dbg(INFO, "power save set to %s\n",
+		  (power_mode ? "enabled" : "disabled"));
+
+	err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
+	if (err)
+		goto default_conf_out;
+	err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
+					  NULL, NULL);
+	if (err)
+		goto default_conf_out;
+
+	brcmf_configure_arp_offload(ifp, true);
+
+	cfg->dongle_up = true;
+default_conf_out:
+
+	return err;
+
+}
+
+static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp)
+{
+	set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
+
+	return brcmf_config_dongle(ifp->drvr->config);
+}
+
+static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+
+	/*
+	 * While going down, if associated with AP disassociate
+	 * from AP to save power
+	 */
+	if (check_vif_up(ifp->vif)) {
+		brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED);
+
+		/* Make sure WPA_Supplicant receives all the event
+		   generated due to DISASSOC call to the fw to keep
+		   the state fw and WPA_Supplicant state consistent
+		 */
+		brcmf_delay(500);
+	}
+
+	brcmf_abort_scanning(cfg);
+	clear_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
+
+	return 0;
+}
+
+s32 brcmf_cfg80211_up(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	s32 err = 0;
+
+	mutex_lock(&cfg->usr_sync);
+	err = __brcmf_cfg80211_up(ifp);
+	mutex_unlock(&cfg->usr_sync);
+
+	return err;
+}
+
+s32 brcmf_cfg80211_down(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	s32 err = 0;
+
+	mutex_lock(&cfg->usr_sync);
+	err = __brcmf_cfg80211_down(ifp);
+	mutex_unlock(&cfg->usr_sync);
+
+	return err;
+}
+
+enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp)
+{
+	struct wireless_dev *wdev = &ifp->vif->wdev;
+
+	return wdev->iftype;
+}
+
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
+			     unsigned long state)
+{
+	struct brcmf_cfg80211_vif *vif;
+
+	list_for_each_entry(vif, &cfg->vif_list, list) {
+		if (test_bit(state, &vif->sme_state))
+			return true;
+	}
+	return false;
+}
+
+static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
+				    u8 action)
+{
+	u8 evt_action;
+
+	mutex_lock(&event->vif_event_lock);
+	evt_action = event->action;
+	mutex_unlock(&event->vif_event_lock);
+	return evt_action == action;
+}
+
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+				  struct brcmf_cfg80211_vif *vif)
+{
+	struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+	mutex_lock(&event->vif_event_lock);
+	event->vif = vif;
+	event->action = 0;
+	mutex_unlock(&event->vif_event_lock);
+}
+
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+	bool armed;
+
+	mutex_lock(&event->vif_event_lock);
+	armed = event->vif != NULL;
+	mutex_unlock(&event->vif_event_lock);
+
+	return armed;
+}
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+					  u8 action, ulong timeout)
+{
+	struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+	return wait_event_timeout(event->vif_wq,
+				  vif_event_equals(event, action), timeout);
+}
+
+static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
+					struct regulatory_request *req)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	struct brcmf_fil_country_le ccreq;
+	int i;
+
+	brcmf_dbg(TRACE, "enter: initiator=%d, alpha=%c%c\n", req->initiator,
+		  req->alpha2[0], req->alpha2[1]);
+
+	/* ignore non-ISO3166 country codes */
+	for (i = 0; i < sizeof(req->alpha2); i++)
+		if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
+			brcmf_err("not a ISO3166 code\n");
+			return;
+		}
+	memset(&ccreq, 0, sizeof(ccreq));
+	ccreq.rev = cpu_to_le32(-1);
+	memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
+	if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
+		brcmf_err("firmware rejected country setting\n");
+		return;
+	}
+	brcmf_setup_wiphybands(wiphy);
+}
+
+static void brcmf_free_wiphy(struct wiphy *wiphy)
+{
+	int i;
+
+	if (!wiphy)
+		return;
+
+	if (wiphy->iface_combinations) {
+		for (i = 0; i < wiphy->n_iface_combinations; i++)
+			kfree(wiphy->iface_combinations[i].limits);
+	}
+	kfree(wiphy->iface_combinations);
+	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
+		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
+		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+	}
+	if (wiphy->bands[IEEE80211_BAND_5GHZ]) {
+		kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels);
+		kfree(wiphy->bands[IEEE80211_BAND_5GHZ]);
+	}
+	wiphy_free(wiphy);
+}
+
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+						  struct device *busdev,
+						  bool p2pdev_forced)
+{
+	struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev;
+	struct brcmf_cfg80211_info *cfg;
+	struct wiphy *wiphy;
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
+	s32 err = 0;
+	s32 io_type;
+	u16 *cap = NULL;
+
+	if (!ndev) {
+		brcmf_err("ndev is invalid\n");
+		return NULL;
+	}
+
+	ifp = netdev_priv(ndev);
+	wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
+	if (!wiphy) {
+		brcmf_err("Could not allocate wiphy device\n");
+		return NULL;
+	}
+	memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
+	set_wiphy_dev(wiphy, busdev);
+
+	cfg = wiphy_priv(wiphy);
+	cfg->wiphy = wiphy;
+	cfg->pub = drvr;
+	init_vif_event(&cfg->vif_event);
+	INIT_LIST_HEAD(&cfg->vif_list);
+
+	vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
+	if (IS_ERR(vif))
+		goto wiphy_out;
+
+	vif->ifp = ifp;
+	vif->wdev.netdev = ndev;
+	ndev->ieee80211_ptr = &vif->wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
+	err = wl_init_priv(cfg);
+	if (err) {
+		brcmf_err("Failed to init iwm_priv (%d)\n", err);
+		brcmf_free_vif(vif);
+		goto wiphy_out;
+	}
+	ifp->vif = vif;
+
+	/* determine d11 io type before wiphy setup */
+	err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, &io_type);
+	if (err) {
+		brcmf_err("Failed to get D11 version (%d)\n", err);
+		goto priv_out;
+	}
+	cfg->d11inf.io_type = (u8)io_type;
+	brcmu_d11_attach(&cfg->d11inf);
+
+	err = brcmf_setup_wiphy(wiphy, ifp);
+	if (err < 0)
+		goto priv_out;
+
+	brcmf_dbg(INFO, "Registering custom regulatory\n");
+	wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
+	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+	wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
+
+	/* firmware defaults to 40MHz disabled in 2G band. We signal
+	 * cfg80211 here that we do and have it decide we can enable
+	 * it. But first check if device does support 2G operation.
+	 */
+	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
+		cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap;
+		*cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	err = wiphy_register(wiphy);
+	if (err < 0) {
+		brcmf_err("Could not register wiphy device (%d)\n", err);
+		goto priv_out;
+	}
+
+	/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
+	 * setup 40MHz in 2GHz band and enable OBSS scanning.
+	 */
+	if (cap && (*cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) {
+		err = brcmf_enable_bw40_2g(cfg);
+		if (!err)
+			err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
+						      BRCMF_OBSS_COEX_AUTO);
+		else
+			*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	/* p2p might require that "if-events" get processed by fweh. So
+	 * activate the already registered event handlers now and activate
+	 * the rest when initialization has completed. drvr->config needs to
+	 * be assigned before activating events.
+	 */
+	drvr->config = cfg;
+	err = brcmf_fweh_activate_events(ifp);
+	if (err) {
+		brcmf_err("FWEH activation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
+
+	err = brcmf_p2p_attach(cfg, p2pdev_forced);
+	if (err) {
+		brcmf_err("P2P initilisation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
+	err = brcmf_btcoex_attach(cfg);
+	if (err) {
+		brcmf_err("BT-coex initialisation failed (%d)\n", err);
+		brcmf_p2p_detach(&cfg->p2p);
+		goto wiphy_unreg_out;
+	}
+
+	err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+	if (err) {
+		brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+		wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
+	} else {
+		brcmf_fweh_register(cfg->pub, BRCMF_E_TDLS_PEER_EVENT,
+				    brcmf_notify_tdls_peer_event);
+	}
+
+	/* (re-) activate FWEH event handling */
+	err = brcmf_fweh_activate_events(ifp);
+	if (err) {
+		brcmf_err("FWEH activation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
+
+	return cfg;
+
+wiphy_unreg_out:
+	wiphy_unregister(cfg->wiphy);
+priv_out:
+	wl_deinit_priv(cfg);
+	brcmf_free_vif(vif);
+	ifp->vif = NULL;
+wiphy_out:
+	brcmf_free_wiphy(wiphy);
+	return NULL;
+}
+
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
+{
+	if (!cfg)
+		return;
+
+	brcmf_btcoex_detach(cfg);
+	wiphy_unregister(cfg->wiphy);
+	wl_deinit_priv(cfg);
+	brcmf_free_wiphy(cfg->wiphy);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
new file mode 100644
index 0000000..6a878c8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BRCMFMAC_CFG80211_H
+#define BRCMFMAC_CFG80211_H
+
+/* for brcmu_d11inf */
+#include <brcmu_d11.h>
+
+#define WL_NUM_SCAN_MAX			10
+#define WL_NUM_PMKIDS_MAX		MAXPMKID
+#define WL_TLV_INFO_MAX			1024
+#define WL_BSS_INFO_MAX			2048
+#define WL_ASSOC_INFO_MAX		512	/* assoc related fil max buf */
+#define WL_EXTRA_BUF_MAX		2048
+#define WL_ROAM_TRIGGER_LEVEL		-75
+#define WL_ROAM_DELTA			20
+#define WL_BEACON_TIMEOUT		3
+
+#define WL_SCAN_CHANNEL_TIME		40
+#define WL_SCAN_UNASSOC_TIME		40
+#define WL_SCAN_PASSIVE_TIME		120
+
+#define WL_ESCAN_BUF_SIZE		(1024 * 64)
+#define WL_ESCAN_TIMER_INTERVAL_MS	10000 /* E-Scan timeout */
+
+#define WL_ESCAN_ACTION_START		1
+#define WL_ESCAN_ACTION_CONTINUE	2
+#define WL_ESCAN_ACTION_ABORT		3
+
+#define WL_AUTH_SHARED_KEY		1	/* d11 shared authentication */
+#define IE_MAX_LEN			512
+
+/* IE TLV processing */
+#define TLV_LEN_OFF			1	/* length offset */
+#define TLV_HDR_LEN			2	/* header length */
+#define TLV_BODY_OFF			2	/* body offset */
+#define TLV_OUI_LEN			3	/* oui id length */
+
+/* 802.11 Mgmt Packet flags */
+#define BRCMF_VNDR_IE_BEACON_FLAG	0x1
+#define BRCMF_VNDR_IE_PRBRSP_FLAG	0x2
+#define BRCMF_VNDR_IE_ASSOCRSP_FLAG	0x4
+#define BRCMF_VNDR_IE_AUTHRSP_FLAG	0x8
+#define BRCMF_VNDR_IE_PRBREQ_FLAG	0x10
+#define BRCMF_VNDR_IE_ASSOCREQ_FLAG	0x20
+/* vendor IE in IW advertisement protocol ID field */
+#define BRCMF_VNDR_IE_IWAPID_FLAG	0x40
+/* allow custom IE id */
+#define BRCMF_VNDR_IE_CUSTOM_FLAG	0x100
+
+/* P2P Action Frames flags (spec ordered) */
+#define BRCMF_VNDR_IE_GONREQ_FLAG     0x001000
+#define BRCMF_VNDR_IE_GONRSP_FLAG     0x002000
+#define BRCMF_VNDR_IE_GONCFM_FLAG     0x004000
+#define BRCMF_VNDR_IE_INVREQ_FLAG     0x008000
+#define BRCMF_VNDR_IE_INVRSP_FLAG     0x010000
+#define BRCMF_VNDR_IE_DISREQ_FLAG     0x020000
+#define BRCMF_VNDR_IE_DISRSP_FLAG     0x040000
+#define BRCMF_VNDR_IE_PRDREQ_FLAG     0x080000
+#define BRCMF_VNDR_IE_PRDRSP_FLAG     0x100000
+
+#define BRCMF_VNDR_IE_P2PAF_SHIFT	12
+
+#define BRCMF_MAX_DEFAULT_KEYS		4
+
+
+/**
+ * enum brcmf_scan_status - scan engine status
+ *
+ * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle.
+ * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle.
+ * @BRCMF_SCAN_STATUS_SUPPRESS: scanning is suppressed in driver.
+ */
+enum brcmf_scan_status {
+	BRCMF_SCAN_STATUS_BUSY,
+	BRCMF_SCAN_STATUS_ABORT,
+	BRCMF_SCAN_STATUS_SUPPRESS,
+};
+
+/* dongle configuration */
+struct brcmf_cfg80211_conf {
+	u32 frag_threshold;
+	u32 rts_threshold;
+	u32 retry_short;
+	u32 retry_long;
+	s32 tx_power;
+	struct ieee80211_channel channel;
+};
+
+/* basic structure of scan request */
+struct brcmf_cfg80211_scan_req {
+	struct brcmf_ssid_le ssid_le;
+};
+
+/* basic structure of information element */
+struct brcmf_cfg80211_ie {
+	u16 offset;
+	u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* security information with currently associated ap */
+struct brcmf_cfg80211_security {
+	u32 wpa_versions;
+	u32 auth_type;
+	u32 cipher_pairwise;
+	u32 cipher_group;
+	u32 wpa_auth;
+};
+
+/**
+ * struct brcmf_cfg80211_profile - profile information.
+ *
+ * @ssid: ssid of associated/associating ap.
+ * @bssid: bssid of joined/joining ibss.
+ * @sec: security information.
+ * @key: key information
+ */
+struct brcmf_cfg80211_profile {
+	struct brcmf_ssid ssid;
+	u8 bssid[ETH_ALEN];
+	struct brcmf_cfg80211_security sec;
+	struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
+};
+
+/**
+ * enum brcmf_vif_status - bit indices for vif status.
+ *
+ * @BRCMF_VIF_STATUS_READY: ready for operation.
+ * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
+ * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
+ * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
+ */
+enum brcmf_vif_status {
+	BRCMF_VIF_STATUS_READY,
+	BRCMF_VIF_STATUS_CONNECTING,
+	BRCMF_VIF_STATUS_CONNECTED,
+	BRCMF_VIF_STATUS_DISCONNECTING,
+	BRCMF_VIF_STATUS_AP_CREATED
+};
+
+/**
+ * struct vif_saved_ie - holds saved IEs for a virtual interface.
+ *
+ * @probe_req_ie: IE info for probe request.
+ * @probe_res_ie: IE info for probe response.
+ * @beacon_ie: IE info for beacon frame.
+ * @probe_req_ie_len: IE info length for probe request.
+ * @probe_res_ie_len: IE info length for probe response.
+ * @beacon_ie_len: IE info length for beacon frame.
+ */
+struct vif_saved_ie {
+	u8  probe_req_ie[IE_MAX_LEN];
+	u8  probe_res_ie[IE_MAX_LEN];
+	u8  beacon_ie[IE_MAX_LEN];
+	u8  assoc_req_ie[IE_MAX_LEN];
+	u32 probe_req_ie_len;
+	u32 probe_res_ie_len;
+	u32 beacon_ie_len;
+	u32 assoc_req_ie_len;
+};
+
+/**
+ * struct brcmf_cfg80211_vif - virtual interface specific information.
+ *
+ * @ifp: lower layer interface pointer
+ * @wdev: wireless device.
+ * @profile: profile information.
+ * @roam_off: roaming state.
+ * @sme_state: SME state using enum brcmf_vif_status bits.
+ * @pm_block: power-management blocked.
+ * @list: linked list.
+ * @mgmt_rx_reg: registered rx mgmt frame types.
+ * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
+ */
+struct brcmf_cfg80211_vif {
+	struct brcmf_if *ifp;
+	struct wireless_dev wdev;
+	struct brcmf_cfg80211_profile profile;
+	s32 roam_off;
+	unsigned long sme_state;
+	bool pm_block;
+	struct vif_saved_ie saved_ie;
+	struct list_head list;
+	u16 mgmt_rx_reg;
+	bool mbss;
+	int is_11d;
+};
+
+/* association inform */
+struct brcmf_cfg80211_connect_info {
+	u8 *req_ie;
+	s32 req_ie_len;
+	u8 *resp_ie;
+	s32 resp_ie_len;
+};
+
+/* assoc ie length */
+struct brcmf_cfg80211_assoc_ielen_le {
+	__le32 req_len;
+	__le32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct brcmf_cfg80211_pmk_list {
+	struct pmkid_list pmkids;
+	struct pmkid foo[MAXPMKID - 1];
+};
+
+/* dongle escan state */
+enum wl_escan_state {
+	WL_ESCAN_STATE_IDLE,
+	WL_ESCAN_STATE_SCANNING
+};
+
+struct escan_info {
+	u32 escan_state;
+	u8 escan_buf[WL_ESCAN_BUF_SIZE];
+	struct wiphy *wiphy;
+	struct brcmf_if *ifp;
+	s32 (*run)(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
+		   struct cfg80211_scan_request *request, u16 action);
+};
+
+/**
+ * struct brcmf_pno_param_le - PNO scan configuration parameters
+ *
+ * @version: PNO parameters version.
+ * @scan_freq: scan frequency.
+ * @lost_network_timeout: #sec. to declare discovered network as lost.
+ * @flags: Bit field to control features of PFN such as sort criteria auto
+ *	enable switch and background scan.
+ * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
+ *	criteria.
+ * @bestn: number of best networks in each scan.
+ * @mscan: number of scans recorded.
+ * @repeat: minimum number of scan intervals before scan frequency changes
+ *	in adaptive scan.
+ * @exp: exponent of 2 for maximum scan interval.
+ * @slow_freq: slow scan period.
+ */
+struct brcmf_pno_param_le {
+	__le32 version;
+	__le32 scan_freq;
+	__le32 lost_network_timeout;
+	__le16 flags;
+	__le16 rssi_margin;
+	u8 bestn;
+	u8 mscan;
+	u8 repeat;
+	u8 exp;
+	__le32 slow_freq;
+};
+
+/**
+ * struct brcmf_pno_net_param_le - scan parameters per preferred network.
+ *
+ * @ssid: ssid name and its length.
+ * @flags: bit2: hidden.
+ * @infra: BSS vs IBSS.
+ * @auth: Open vs Closed.
+ * @wpa_auth: WPA type.
+ * @wsec: wsec value.
+ */
+struct brcmf_pno_net_param_le {
+	struct brcmf_ssid_le ssid;
+	__le32 flags;
+	__le32 infra;
+	__le32 auth;
+	__le32 wpa_auth;
+	__le32 wsec;
+};
+
+/**
+ * struct brcmf_pno_net_info_le - information per found network.
+ *
+ * @bssid: BSS network identifier.
+ * @channel: channel number only.
+ * @SSID_len: length of ssid.
+ * @SSID: ssid characters.
+ * @RSSI: receive signal strength (in dBm).
+ * @timestamp: age in seconds.
+ */
+struct brcmf_pno_net_info_le {
+	u8 bssid[ETH_ALEN];
+	u8 channel;
+	u8 SSID_len;
+	u8 SSID[32];
+	__le16	RSSI;
+	__le16	timestamp;
+};
+
+/**
+ * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
+ *
+ * @version: PNO version identifier.
+ * @status: indicates completion status of PNO scan.
+ * @count: amount of brcmf_pno_net_info_le entries appended.
+ */
+struct brcmf_pno_scanresults_le {
+	__le32 version;
+	__le32 status;
+	__le32 count;
+};
+
+/**
+ * struct brcmf_cfg80211_vif_event - virtual interface event information.
+ *
+ * @vif_wq: waitqueue awaiting interface event from firmware.
+ * @vif_event_lock: protects other members in this structure.
+ * @vif_complete: completion for net attach.
+ * @action: either add, change, or delete.
+ * @vif: virtual interface object related to the event.
+ */
+struct brcmf_cfg80211_vif_event {
+	wait_queue_head_t vif_wq;
+	struct mutex vif_event_lock;
+	u8 action;
+	struct brcmf_cfg80211_vif *vif;
+};
+
+/**
+ * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
+ *
+ * @wiphy: wiphy object for cfg80211 interface.
+ * @conf: dongle configuration.
+ * @p2p: peer-to-peer specific information.
+ * @btcoex: Bluetooth coexistence information.
+ * @scan_request: cfg80211 scan request object.
+ * @usr_sync: mainly for dongle up/down synchronization.
+ * @bss_list: bss_list holding scanned ap information.
+ * @scan_req_int: internal scan request object.
+ * @bss_info: bss information for cfg80211 layer.
+ * @ie: information element object for internal purpose.
+ * @conn_info: association info.
+ * @pmk_list: wpa2 pmk list.
+ * @scan_status: scan activity on the dongle.
+ * @pub: common driver information.
+ * @channel: current channel.
+ * @active_scan: current scan mode.
+ * @sched_escan: e-scan for scheduled scan support running.
+ * @ibss_starter: indicates this sta is ibss starter.
+ * @pwr_save: indicate whether dongle to support power save mode.
+ * @dongle_up: indicate whether dongle up or not.
+ * @roam_on: on/off switch for dongle self-roaming.
+ * @scan_tried: indicates if first scan attempted.
+ * @dcmd_buf: dcmd buffer.
+ * @extra_buf: mainly to grab assoc information.
+ * @debugfsdir: debugfs folder for this device.
+ * @escan_info: escan information.
+ * @escan_timeout: Timer for catch scan timeout.
+ * @escan_timeout_work: scan timeout worker.
+ * @escan_ioctl_buf: dongle command buffer for escan commands.
+ * @vif_list: linked list of vif instances.
+ * @vif_cnt: number of vif instances.
+ * @vif_event: vif event signalling.
+ * @wowl_enabled; set during suspend, is wowl used.
+ * @pre_wowl_pmmode: intermediate storage of pm mode during wowl.
+ */
+struct brcmf_cfg80211_info {
+	struct wiphy *wiphy;
+	struct brcmf_cfg80211_conf *conf;
+	struct brcmf_p2p_info p2p;
+	struct brcmf_btcoex_info *btcoex;
+	struct cfg80211_scan_request *scan_request;
+	struct mutex usr_sync;
+	struct brcmf_cfg80211_scan_req scan_req_int;
+	struct wl_cfg80211_bss_info *bss_info;
+	struct brcmf_cfg80211_ie ie;
+	struct brcmf_cfg80211_connect_info conn_info;
+	struct brcmf_cfg80211_pmk_list *pmk_list;
+	unsigned long scan_status;
+	struct brcmf_pub *pub;
+	u32 channel;
+	bool active_scan;
+	bool sched_escan;
+	bool ibss_starter;
+	bool pwr_save;
+	bool dongle_up;
+	bool scan_tried;
+	u8 *dcmd_buf;
+	u8 *extra_buf;
+	struct dentry *debugfsdir;
+	struct escan_info escan_info;
+	struct timer_list escan_timeout;
+	struct work_struct escan_timeout_work;
+	u8 *escan_ioctl_buf;
+	struct list_head vif_list;
+	struct brcmf_cfg80211_vif_event vif_event;
+	struct completion vif_disabled;
+	struct brcmu_d11inf d11inf;
+	bool wowl_enabled;
+	u32 pre_wowl_pmmode;
+	struct brcmf_assoclist_le assoclist;
+};
+
+/**
+ * struct brcmf_tlv - tag_ID/length/value_buffer tuple.
+ *
+ * @id: tag identifier.
+ * @len: number of bytes in value buffer.
+ * @data: value buffer.
+ */
+struct brcmf_tlv {
+	u8 id;
+	u8 len;
+	u8 data[1];
+};
+
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
+{
+	return cfg->wiphy;
+}
+
+static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
+{
+	return (struct brcmf_cfg80211_info *)(wiphy_priv(w));
+}
+
+static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
+{
+	return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
+}
+
+static inline
+struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_cfg80211_vif *vif;
+	vif = list_first_entry(&cfg->vif_list, struct brcmf_cfg80211_vif, list);
+	return vif->wdev.netdev;
+}
+
+static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
+{
+	return wdev_to_cfg(ndev->ieee80211_ptr);
+}
+
+static inline struct brcmf_cfg80211_profile *ndev_to_prof(struct net_device *nd)
+{
+	struct brcmf_if *ifp = netdev_priv(nd);
+	return &ifp->vif->profile;
+}
+
+static inline struct brcmf_cfg80211_vif *ndev_to_vif(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	return ifp->vif;
+}
+
+static inline struct
+brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
+{
+	return &cfg->conn_info;
+}
+
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+						  struct device *busdev,
+						  bool p2pdev_forced);
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_cfg80211_up(struct net_device *ndev);
+s32 brcmf_cfg80211_down(struct net_device *ndev);
+enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
+
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+					   enum nl80211_iftype type,
+					   bool pm_block);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
+
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+			  const u8 *vndr_ie_buf, u32 vndr_ie_len);
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key);
+u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
+			struct ieee80211_channel *ch);
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
+			     unsigned long state);
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+				  struct brcmf_cfg80211_vif *vif);
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+					  u8 action, ulong timeout);
+s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+				struct brcmf_if *ifp, bool aborted,
+				bool fw_abort);
+void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+
+#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644
index 0000000..f04833d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+
+#include <defs.h>
+#include <soc.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <chipcommon.h>
+#include "debug.h"
+#include "chip.h"
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB		0
+#define SOCI_AI		1
+
+/* PL-368 DMP definitions */
+#define DMP_DESC_TYPE_MSK	0x0000000F
+#define  DMP_DESC_EMPTY		0x00000000
+#define  DMP_DESC_VALID		0x00000001
+#define  DMP_DESC_COMPONENT	0x00000001
+#define  DMP_DESC_MASTER_PORT	0x00000003
+#define  DMP_DESC_ADDRESS	0x00000005
+#define  DMP_DESC_ADDRSIZE_GT32	0x00000008
+#define  DMP_DESC_EOT		0x0000000F
+
+#define DMP_COMP_DESIGNER	0xFFF00000
+#define DMP_COMP_DESIGNER_S	20
+#define DMP_COMP_PARTNUM	0x000FFF00
+#define DMP_COMP_PARTNUM_S	8
+#define DMP_COMP_CLASS		0x000000F0
+#define DMP_COMP_CLASS_S	4
+#define DMP_COMP_REVISION	0xFF000000
+#define DMP_COMP_REVISION_S	24
+#define DMP_COMP_NUM_SWRAP	0x00F80000
+#define DMP_COMP_NUM_SWRAP_S	19
+#define DMP_COMP_NUM_MWRAP	0x0007C000
+#define DMP_COMP_NUM_MWRAP_S	14
+#define DMP_COMP_NUM_SPORT	0x00003E00
+#define DMP_COMP_NUM_SPORT_S	9
+#define DMP_COMP_NUM_MPORT	0x000001F0
+#define DMP_COMP_NUM_MPORT_S	4
+
+#define DMP_MASTER_PORT_UID	0x0000FF00
+#define DMP_MASTER_PORT_UID_S	8
+#define DMP_MASTER_PORT_NUM	0x000000F0
+#define DMP_MASTER_PORT_NUM_S	4
+
+#define DMP_SLAVE_ADDR_BASE	0xFFFFF000
+#define DMP_SLAVE_ADDR_BASE_S	12
+#define DMP_SLAVE_PORT_NUM	0x00000F00
+#define DMP_SLAVE_PORT_NUM_S	8
+#define DMP_SLAVE_TYPE		0x000000C0
+#define DMP_SLAVE_TYPE_S	6
+#define  DMP_SLAVE_TYPE_SLAVE	0
+#define  DMP_SLAVE_TYPE_BRIDGE	1
+#define  DMP_SLAVE_TYPE_SWRAP	2
+#define  DMP_SLAVE_TYPE_MWRAP	3
+#define DMP_SLAVE_SIZE_TYPE	0x00000030
+#define DMP_SLAVE_SIZE_TYPE_S	4
+#define  DMP_SLAVE_SIZE_4K	0
+#define  DMP_SLAVE_SIZE_8K	1
+#define  DMP_SLAVE_SIZE_16K	2
+#define  DMP_SLAVE_SIZE_DESC	3
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK		0xff000000
+#define CIB_REV_SHIFT		24
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT	0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN	0x0004
+#define D11_BCMA_IOCTL_PHYRESET		0x0008
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE		0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE	0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE		0x18002000
+
+/* Max possibly supported memory size (limited by IO mapped memory) */
+#define BRCMF_CHIP_MAX_MEMSIZE		(4 * 1024 * 1024)
+
+#define CORE_SB(base, field) \
+		(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+	  ((sbidh) & SSB_IDHIGH_RCLO))
+
+struct sbconfig {
+	u32 PAD[2];
+	u32 sbipsflag;	/* initiator port ocp slave flag */
+	u32 PAD[3];
+	u32 sbtpsflag;	/* target port ocp slave flag */
+	u32 PAD[11];
+	u32 sbtmerrloga;	/* (sonics >= 2.3) */
+	u32 PAD;
+	u32 sbtmerrlog;	/* (sonics >= 2.3) */
+	u32 PAD[3];
+	u32 sbadmatch3;	/* address match3 */
+	u32 PAD;
+	u32 sbadmatch2;	/* address match2 */
+	u32 PAD;
+	u32 sbadmatch1;	/* address match1 */
+	u32 PAD[7];
+	u32 sbimstate;	/* initiator agent state */
+	u32 sbintvec;	/* interrupt mask */
+	u32 sbtmstatelow;	/* target state */
+	u32 sbtmstatehigh;	/* target state */
+	u32 sbbwa0;		/* bandwidth allocation table0 */
+	u32 PAD;
+	u32 sbimconfiglow;	/* initiator configuration */
+	u32 sbimconfighigh;	/* initiator configuration */
+	u32 sbadmatch0;	/* address match0 */
+	u32 PAD;
+	u32 sbtmconfiglow;	/* target configuration */
+	u32 sbtmconfighigh;	/* target configuration */
+	u32 sbbconfig;	/* broadcast configuration */
+	u32 PAD;
+	u32 sbbstate;	/* broadcast state */
+	u32 PAD[3];
+	u32 sbactcnfg;	/* activate configuration */
+	u32 PAD[3];
+	u32 sbflagst;	/* current sbflags */
+	u32 PAD[3];
+	u32 sbidlow;		/* identification */
+	u32 sbidhigh;	/* identification */
+};
+
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_RETNTRAM_MASK	0x00010000
+#define SOCRAM_BANKINFO_SZMASK		0x0000007f
+#define SOCRAM_BANKIDX_ROM_MASK		0x00000100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM		0
+#define SOCRAM_MEMTYPE_R0M		1
+#define SOCRAM_MEMTYPE_DEVRAM		2
+
+#define SOCRAM_BANKINFO_SZBASE		8192
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+#define SR_BSZ_BASE		14
+
+struct sbsocramregs {
+	u32 coreinfo;
+	u32 bwalloc;
+	u32 extracoreinfo;
+	u32 biststat;
+	u32 bankidx;
+	u32 standbyctrl;
+
+	u32 errlogstatus;	/* rev 6 */
+	u32 errlogaddr;	/* rev 6 */
+	/* used for patching rev 3 & 5 */
+	u32 cambankidx;
+	u32 cambankstandbyctrl;
+	u32 cambankpatchctrl;
+	u32 cambankpatchtblbaseaddr;
+	u32 cambankcmdreg;
+	u32 cambankdatareg;
+	u32 cambankmaskreg;
+	u32 PAD[1];
+	u32 bankinfo;	/* corev 8 */
+	u32 bankpda;
+	u32 PAD[14];
+	u32 extmemconfig;
+	u32 extmemparitycsr;
+	u32 extmemparityerrdata;
+	u32 extmemparityerrcnt;
+	u32 extmemwrctrlandsize;
+	u32 PAD[84];
+	u32 workaround;
+	u32 pwrctl;		/* corerev >= 2 */
+	u32 PAD[133];
+	u32 sr_control;     /* corerev >= 15 */
+	u32 sr_status;      /* corerev >= 15 */
+	u32 sr_address;     /* corerev >= 15 */
+	u32 sr_data;        /* corerev >= 15 */
+};
+
+#define SOCRAMREGOFFS(_f)	offsetof(struct sbsocramregs, _f)
+#define SYSMEMREGOFFS(_f)	offsetof(struct sbsocramregs, _f)
+
+#define ARMCR4_CAP		(0x04)
+#define ARMCR4_BANKIDX		(0x40)
+#define ARMCR4_BANKINFO		(0x44)
+#define ARMCR4_BANKPDA		(0x4C)
+
+#define	ARMCR4_TCBBNB_MASK	0xf0
+#define	ARMCR4_TCBBNB_SHIFT	4
+#define	ARMCR4_TCBANB_MASK	0xf
+#define	ARMCR4_TCBANB_SHIFT	0
+
+#define	ARMCR4_BSZ_MASK		0x3f
+#define	ARMCR4_BSZ_MULT		8192
+
+struct brcmf_core_priv {
+	struct brcmf_core pub;
+	u32 wrapbase;
+	struct list_head list;
+	struct brcmf_chip_priv *chip;
+};
+
+struct brcmf_chip_priv {
+	struct brcmf_chip pub;
+	const struct brcmf_buscore_ops *ops;
+	void *ctx;
+	/* assured first core is chipcommon, second core is buscore */
+	struct list_head cores;
+	u16 num_cores;
+
+	bool (*iscoreup)(struct brcmf_core_priv *core);
+	void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
+			    u32 reset);
+	void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
+			  u32 postreset);
+};
+
+static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
+				  struct brcmf_core *core)
+{
+	u32 regdata;
+
+	regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
+	core->rev = SBCOREREV(regdata);
+}
+
+static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
+{
+	struct brcmf_chip_priv *ci;
+	u32 regdata;
+	u32 address;
+
+	ci = core->chip;
+	address = CORE_SB(core->pub.base, sbtmstatelow);
+	regdata = ci->ops->read32(ci->ctx, address);
+	regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+		    SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+	return SSB_TMSLOW_CLOCK == regdata;
+}
+
+static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
+{
+	struct brcmf_chip_priv *ci;
+	u32 regdata;
+	bool ret;
+
+	ci = core->chip;
+	regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+	ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+	regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+	ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+	return ret;
+}
+
+static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
+				      u32 prereset, u32 reset)
+{
+	struct brcmf_chip_priv *ci;
+	u32 val, base;
+
+	ci = core->chip;
+	base = core->pub.base;
+	val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+	if (val & SSB_TMSLOW_RESET)
+		return;
+
+	val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+	if ((val & SSB_TMSLOW_CLOCK) != 0) {
+		/*
+		 * set target reject and spin until busy is clear
+		 * (preserve core-specific bits)
+		 */
+		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+		ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+					 val | SSB_TMSLOW_REJECT);
+
+		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+		udelay(1);
+		SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
+			  & SSB_TMSHIGH_BUSY), 100000);
+
+		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+		if (val & SSB_TMSHIGH_BUSY)
+			brcmf_err("core state still busy\n");
+
+		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+		if (val & SSB_IDLOW_INITIATOR) {
+			val = ci->ops->read32(ci->ctx,
+					      CORE_SB(base, sbimstate));
+			val |= SSB_IMSTATE_REJECT;
+			ci->ops->write32(ci->ctx,
+					 CORE_SB(base, sbimstate), val);
+			val = ci->ops->read32(ci->ctx,
+					      CORE_SB(base, sbimstate));
+			udelay(1);
+			SPINWAIT((ci->ops->read32(ci->ctx,
+						  CORE_SB(base, sbimstate)) &
+				  SSB_IMSTATE_BUSY), 100000);
+		}
+
+		/* set reset and reject while enabling the clocks */
+		val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+		      SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+		ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
+		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+		udelay(10);
+
+		/* clear the initiator reject bit */
+		val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+		if (val & SSB_IDLOW_INITIATOR) {
+			val = ci->ops->read32(ci->ctx,
+					      CORE_SB(base, sbimstate));
+			val &= ~SSB_IMSTATE_REJECT;
+			ci->ops->write32(ci->ctx,
+					 CORE_SB(base, sbimstate), val);
+		}
+	}
+
+	/* leave reset and reject asserted */
+	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+			 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+	udelay(1);
+}
+
+static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
+				      u32 prereset, u32 reset)
+{
+	struct brcmf_chip_priv *ci;
+	u32 regdata;
+
+	ci = core->chip;
+
+	/* if core is already in reset, skip reset */
+	regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+	if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+		goto in_reset_configure;
+
+	/* configure reset */
+	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+			 prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+	/* put in reset */
+	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
+			 BCMA_RESET_CTL_RESET);
+	usleep_range(10, 20);
+
+	/* wait till reset is 1 */
+	SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
+		 BCMA_RESET_CTL_RESET, 300);
+
+in_reset_configure:
+	/* in-reset configure */
+	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+			 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
+				    u32 reset, u32 postreset)
+{
+	struct brcmf_chip_priv *ci;
+	u32 regdata;
+	u32 base;
+
+	ci = core->chip;
+	base = core->pub.base;
+	/*
+	 * Must do the disable sequence first to work for
+	 * arbitrary current core state.
+	 */
+	brcmf_chip_sb_coredisable(core, 0, 0);
+
+	/*
+	 * Now do the initialization sequence.
+	 * set reset while enabling the clock and
+	 * forcing them on throughout the core
+	 */
+	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+			 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+			 SSB_TMSLOW_RESET);
+	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+	udelay(1);
+
+	/* clear any serror */
+	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+	if (regdata & SSB_TMSHIGH_SERR)
+		ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
+
+	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
+	if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
+		regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
+		ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+			 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+	udelay(1);
+
+	/* leave clock enabled */
+	ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+			 SSB_TMSLOW_CLOCK);
+	regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+	udelay(1);
+}
+
+static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
+				    u32 reset, u32 postreset)
+{
+	struct brcmf_chip_priv *ci;
+	int count;
+
+	ci = core->chip;
+
+	/* must disable first to work for arbitrary current core state */
+	brcmf_chip_ai_coredisable(core, prereset, reset);
+
+	count = 0;
+	while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
+	       BCMA_RESET_CTL_RESET) {
+		ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
+		count++;
+		if (count > 50)
+			break;
+		usleep_range(40, 60);
+	}
+
+	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+			 postreset | BCMA_IOCTL_CLK);
+	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static char *brcmf_chip_name(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
+					      u16 coreid, u32 base,
+					      u32 wrapbase)
+{
+	struct brcmf_core_priv *core;
+
+	core = kzalloc(sizeof(*core), GFP_KERNEL);
+	if (!core)
+		return ERR_PTR(-ENOMEM);
+
+	core->pub.id = coreid;
+	core->pub.base = base;
+	core->chip = ci;
+	core->wrapbase = wrapbase;
+
+	list_add_tail(&core->list, &ci->cores);
+	return &core->pub;
+}
+
+/* safety check for chipinfo */
+static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+	struct brcmf_core_priv *core;
+	bool need_socram = false;
+	bool has_socram = false;
+	bool cpu_found = false;
+	int idx = 1;
+
+	list_for_each_entry(core, &ci->cores, list) {
+		brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
+			  idx++, core->pub.id, core->pub.rev, core->pub.base,
+			  core->wrapbase);
+
+		switch (core->pub.id) {
+		case BCMA_CORE_ARM_CM3:
+			cpu_found = true;
+			need_socram = true;
+			break;
+		case BCMA_CORE_INTERNAL_MEM:
+			has_socram = true;
+			break;
+		case BCMA_CORE_ARM_CR4:
+			cpu_found = true;
+			break;
+		case BCMA_CORE_ARM_CA7:
+			cpu_found = true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!cpu_found) {
+		brcmf_err("CPU core not detected\n");
+		return -ENXIO;
+	}
+	/* check RAM core presence for ARM CM3 core */
+	if (need_socram && !has_socram) {
+		brcmf_err("RAM core not provided with ARM CM3 core\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
+{
+	return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
+}
+
+static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
+				    u16 reg, u32 val)
+{
+	core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
+}
+
+static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
+				       u32 *banksize)
+{
+	u32 bankinfo;
+	u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+	bankidx |= idx;
+	brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
+	bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
+	*banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
+	*banksize *= SOCRAM_BANKINFO_SZBASE;
+	return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
+}
+
+static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
+				      u32 *srsize)
+{
+	u32 coreinfo;
+	uint nb, banksize, lss;
+	bool retent;
+	int i;
+
+	*ramsize = 0;
+	*srsize = 0;
+
+	if (WARN_ON(sr->pub.rev < 4))
+		return;
+
+	if (!brcmf_chip_iscoreup(&sr->pub))
+		brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
+
+	/* Get info for determining size */
+	coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
+	nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+
+	if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
+		banksize = (coreinfo & SRCI_SRBSZ_MASK);
+		lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb--;
+		*ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
+		if (lss != 0)
+			*ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	} else {
+		nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++) {
+			retent = brcmf_chip_socram_banksize(sr, i, &banksize);
+			*ramsize += banksize;
+			if (retent)
+				*srsize += banksize;
+		}
+	}
+
+	/* hardcoded save&restore memory sizes */
+	switch (sr->chip->pub.chip) {
+	case BRCM_CC_4334_CHIP_ID:
+		if (sr->chip->pub.chiprev < 2)
+			*srsize = (32 * 1024);
+		break;
+	case BRCM_CC_43430_CHIP_ID:
+		/* assume sr for now as we can not check
+		 * firmware sr capability at this point.
+		 */
+		*srsize = (64 * 1024);
+		break;
+	default:
+		break;
+	}
+}
+
+/** Return the SYS MEM size */
+static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
+{
+	u32 memsize = 0;
+	u32 coreinfo;
+	u32 idx;
+	u32 nb;
+	u32 banksize;
+
+	if (!brcmf_chip_iscoreup(&sysmem->pub))
+		brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0);
+
+	coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo));
+	nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+
+	for (idx = 0; idx < nb; idx++) {
+		brcmf_chip_socram_banksize(sysmem, idx, &banksize);
+		memsize += banksize;
+	}
+
+	return memsize;
+}
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
+{
+	u32 corecap;
+	u32 memsize = 0;
+	u32 nab;
+	u32 nbb;
+	u32 totb;
+	u32 bxinfo;
+	u32 idx;
+
+	corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
+
+	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+	totb = nab + nbb;
+
+	for (idx = 0; idx < totb; idx++) {
+		brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
+		bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
+		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+	}
+
+	return memsize;
+}
+
+static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
+{
+	switch (ci->pub.chip) {
+	case BRCM_CC_4345_CHIP_ID:
+		return 0x198000;
+	case BRCM_CC_4335_CHIP_ID:
+	case BRCM_CC_4339_CHIP_ID:
+	case BRCM_CC_4350_CHIP_ID:
+	case BRCM_CC_4354_CHIP_ID:
+	case BRCM_CC_4356_CHIP_ID:
+	case BRCM_CC_43567_CHIP_ID:
+	case BRCM_CC_43569_CHIP_ID:
+	case BRCM_CC_43570_CHIP_ID:
+	case BRCM_CC_4358_CHIP_ID:
+	case BRCM_CC_43602_CHIP_ID:
+	case BRCM_CC_4371_CHIP_ID:
+		return 0x180000;
+	case BRCM_CC_4365_CHIP_ID:
+	case BRCM_CC_4366_CHIP_ID:
+		return 0x200000;
+	default:
+		brcmf_err("unknown chip: %s\n", ci->pub.name);
+		break;
+	}
+	return 0;
+}
+
+static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+	struct brcmf_core_priv *mem_core;
+	struct brcmf_core *mem;
+
+	mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
+	if (mem) {
+		mem_core = container_of(mem, struct brcmf_core_priv, pub);
+		ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
+		ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
+		if (!ci->pub.rambase) {
+			brcmf_err("RAM base not provided with ARM CR4 core\n");
+			return -EINVAL;
+		}
+	} else {
+		mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM);
+		if (mem) {
+			mem_core = container_of(mem, struct brcmf_core_priv,
+						pub);
+			ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core);
+			ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
+			if (!ci->pub.rambase) {
+				brcmf_err("RAM base not provided with ARM CA7 core\n");
+				return -EINVAL;
+			}
+		} else {
+			mem = brcmf_chip_get_core(&ci->pub,
+						  BCMA_CORE_INTERNAL_MEM);
+			if (!mem) {
+				brcmf_err("No memory cores found\n");
+				return -ENOMEM;
+			}
+			mem_core = container_of(mem, struct brcmf_core_priv,
+						pub);
+			brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
+						  &ci->pub.srsize);
+		}
+	}
+	brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
+		  ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
+		  ci->pub.srsize, ci->pub.srsize);
+
+	if (!ci->pub.ramsize) {
+		brcmf_err("RAM size is undetermined\n");
+		return -ENOMEM;
+	}
+
+	if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) {
+		brcmf_err("RAM size is incorrect\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
+				   u8 *type)
+{
+	u32 val;
+
+	/* read next descriptor */
+	val = ci->ops->read32(ci->ctx, *eromaddr);
+	*eromaddr += 4;
+
+	if (!type)
+		return val;
+
+	/* determine descriptor type */
+	*type = (val & DMP_DESC_TYPE_MSK);
+	if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
+		*type = DMP_DESC_ADDRESS;
+
+	return val;
+}
+
+static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
+				      u32 *regbase, u32 *wrapbase)
+{
+	u8 desc;
+	u32 val;
+	u8 mpnum = 0;
+	u8 stype, sztype, wraptype;
+
+	*regbase = 0;
+	*wrapbase = 0;
+
+	val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+	if (desc == DMP_DESC_MASTER_PORT) {
+		mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
+		wraptype = DMP_SLAVE_TYPE_MWRAP;
+	} else if (desc == DMP_DESC_ADDRESS) {
+		/* revert erom address */
+		*eromaddr -= 4;
+		wraptype = DMP_SLAVE_TYPE_SWRAP;
+	} else {
+		*eromaddr -= 4;
+		return -EILSEQ;
+	}
+
+	do {
+		/* locate address descriptor */
+		do {
+			val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+			/* unexpected table end */
+			if (desc == DMP_DESC_EOT) {
+				*eromaddr -= 4;
+				return -EFAULT;
+			}
+		} while (desc != DMP_DESC_ADDRESS);
+
+		/* skip upper 32-bit address descriptor */
+		if (val & DMP_DESC_ADDRSIZE_GT32)
+			brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+
+		sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
+
+		/* next size descriptor can be skipped */
+		if (sztype == DMP_SLAVE_SIZE_DESC) {
+			val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+			/* skip upper size descriptor if present */
+			if (val & DMP_DESC_ADDRSIZE_GT32)
+				brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+		}
+
+		/* only look for 4K register regions */
+		if (sztype != DMP_SLAVE_SIZE_4K)
+			continue;
+
+		stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
+
+		/* only regular slave and wrapper */
+		if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
+			*regbase = val & DMP_SLAVE_ADDR_BASE;
+		if (*wrapbase == 0 && stype == wraptype)
+			*wrapbase = val & DMP_SLAVE_ADDR_BASE;
+	} while (*regbase == 0 || *wrapbase == 0);
+
+	return 0;
+}
+
+static
+int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
+{
+	struct brcmf_core *core;
+	u32 eromaddr;
+	u8 desc_type = 0;
+	u32 val;
+	u16 id;
+	u8 nmp, nsp, nmw, nsw, rev;
+	u32 base, wrap;
+	int err;
+
+	eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
+
+	while (desc_type != DMP_DESC_EOT) {
+		val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+		if (!(val & DMP_DESC_VALID))
+			continue;
+
+		if (desc_type == DMP_DESC_EMPTY)
+			continue;
+
+		/* need a component descriptor */
+		if (desc_type != DMP_DESC_COMPONENT)
+			continue;
+
+		id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
+
+		/* next descriptor must be component as well */
+		val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+		if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
+			return -EFAULT;
+
+		/* only look at cores with master port(s) */
+		nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+		nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
+		nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
+		nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
+		rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
+
+		/* need core with ports */
+		if (nmw + nsw == 0)
+			continue;
+
+		/* try to obtain register address info */
+		err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
+		if (err)
+			continue;
+
+		/* finally a core to be added */
+		core = brcmf_chip_add_core(ci, id, base, wrap);
+		if (IS_ERR(core))
+			return PTR_ERR(core);
+
+		core->rev = rev;
+	}
+
+	return 0;
+}
+
+static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
+{
+	struct brcmf_core *core;
+	u32 regdata;
+	u32 socitype;
+	int ret;
+
+	/* Get CC core rev
+	 * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+	 * For different chiptypes or old sdio hosts w/o chipcommon,
+	 * other ways of recognition should be added here.
+	 */
+	regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
+	ci->pub.chip = regdata & CID_ID_MASK;
+	ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+	socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+	brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
+	brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
+		  socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
+		  ci->pub.chiprev);
+
+	if (socitype == SOCI_SB) {
+		if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) {
+			brcmf_err("SB chip is not supported\n");
+			return -ENODEV;
+		}
+		ci->iscoreup = brcmf_chip_sb_iscoreup;
+		ci->coredisable = brcmf_chip_sb_coredisable;
+		ci->resetcore = brcmf_chip_sb_resetcore;
+
+		core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
+					   SI_ENUM_BASE, 0);
+		brcmf_chip_sb_corerev(ci, core);
+		core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
+					   BCM4329_CORE_BUS_BASE, 0);
+		brcmf_chip_sb_corerev(ci, core);
+		core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
+					   BCM4329_CORE_SOCRAM_BASE, 0);
+		brcmf_chip_sb_corerev(ci, core);
+		core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
+					   BCM4329_CORE_ARM_BASE, 0);
+		brcmf_chip_sb_corerev(ci, core);
+
+		core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
+		brcmf_chip_sb_corerev(ci, core);
+	} else if (socitype == SOCI_AI) {
+		ci->iscoreup = brcmf_chip_ai_iscoreup;
+		ci->coredisable = brcmf_chip_ai_coredisable;
+		ci->resetcore = brcmf_chip_ai_resetcore;
+
+		brcmf_chip_dmp_erom_scan(ci);
+	} else {
+		brcmf_err("chip backplane type %u is not supported\n",
+			  socitype);
+		return -ENODEV;
+	}
+
+	ret = brcmf_chip_cores_check(ci);
+	if (ret)
+		return ret;
+
+	/* assure chip is passive for core access */
+	brcmf_chip_set_passive(&ci->pub);
+
+	/* Call bus specific reset function now. Cores have been determined
+	 * but further access may require a chip specific reset at this point.
+	 */
+	if (ci->ops->reset) {
+		ci->ops->reset(ci->ctx, &ci->pub);
+		brcmf_chip_set_passive(&ci->pub);
+	}
+
+	return brcmf_chip_get_raminfo(ci);
+}
+
+static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
+{
+	struct brcmf_core *core;
+	struct brcmf_core_priv *cpu;
+	u32 val;
+
+
+	core = brcmf_chip_get_core(&chip->pub, id);
+	if (!core)
+		return;
+
+	switch (id) {
+	case BCMA_CORE_ARM_CM3:
+		brcmf_chip_coredisable(core, 0, 0);
+		break;
+	case BCMA_CORE_ARM_CR4:
+	case BCMA_CORE_ARM_CA7:
+		cpu = container_of(core, struct brcmf_core_priv, pub);
+
+		/* clear all IOCTL bits except HALT bit */
+		val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL);
+		val &= ARMCR4_BCMA_IOCTL_CPUHALT;
+		brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
+				     ARMCR4_BCMA_IOCTL_CPUHALT);
+		break;
+	default:
+		brcmf_err("unknown id: %u\n", id);
+		break;
+	}
+}
+
+static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
+{
+	struct brcmf_chip *pub;
+	struct brcmf_core_priv *cc;
+	u32 base;
+	u32 val;
+	int ret = 0;
+
+	pub = &chip->pub;
+	cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+	base = cc->pub.base;
+
+	/* get chipcommon capabilites */
+	pub->cc_caps = chip->ops->read32(chip->ctx,
+					 CORE_CC_REG(base, capabilities));
+
+	/* get pmu caps & rev */
+	if (pub->cc_caps & CC_CAP_PMU) {
+		val = chip->ops->read32(chip->ctx,
+					CORE_CC_REG(base, pmucapabilities));
+		pub->pmurev = val & PCAP_REV_MASK;
+		pub->pmucaps = val;
+	}
+
+	brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
+		  cc->pub.rev, pub->pmurev, pub->pmucaps);
+
+	/* execute bus core specific setup */
+	if (chip->ops->setup)
+		ret = chip->ops->setup(chip->ctx, pub);
+
+	return ret;
+}
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+				     const struct brcmf_buscore_ops *ops)
+{
+	struct brcmf_chip_priv *chip;
+	int err = 0;
+
+	if (WARN_ON(!ops->read32))
+		err = -EINVAL;
+	if (WARN_ON(!ops->write32))
+		err = -EINVAL;
+	if (WARN_ON(!ops->prepare))
+		err = -EINVAL;
+	if (WARN_ON(!ops->activate))
+		err = -EINVAL;
+	if (err < 0)
+		return ERR_PTR(-EINVAL);
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&chip->cores);
+	chip->num_cores = 0;
+	chip->ops = ops;
+	chip->ctx = ctx;
+
+	err = ops->prepare(ctx);
+	if (err < 0)
+		goto fail;
+
+	err = brcmf_chip_recognition(chip);
+	if (err < 0)
+		goto fail;
+
+	err = brcmf_chip_setup(chip);
+	if (err < 0)
+		goto fail;
+
+	return &chip->pub;
+
+fail:
+	brcmf_chip_detach(&chip->pub);
+	return ERR_PTR(err);
+}
+
+void brcmf_chip_detach(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	struct brcmf_core_priv *core;
+	struct brcmf_core_priv *tmp;
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	list_for_each_entry_safe(core, tmp, &chip->cores, list) {
+		list_del(&core->list);
+		kfree(core);
+	}
+	kfree(chip);
+}
+
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
+{
+	struct brcmf_chip_priv *chip;
+	struct brcmf_core_priv *core;
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	list_for_each_entry(core, &chip->cores, list)
+		if (core->pub.id == coreid)
+			return &core->pub;
+
+	return NULL;
+}
+
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	struct brcmf_core_priv *cc;
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+	if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
+		return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
+	return &cc->pub;
+}
+
+bool brcmf_chip_iscoreup(struct brcmf_core *pub)
+{
+	struct brcmf_core_priv *core;
+
+	core = container_of(pub, struct brcmf_core_priv, pub);
+	return core->chip->iscoreup(core);
+}
+
+void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
+{
+	struct brcmf_core_priv *core;
+
+	core = container_of(pub, struct brcmf_core_priv, pub);
+	core->chip->coredisable(core, prereset, reset);
+}
+
+void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
+			  u32 postreset)
+{
+	struct brcmf_core_priv *core;
+
+	core = container_of(pub, struct brcmf_core_priv, pub);
+	core->chip->resetcore(core, prereset, reset, postreset);
+}
+
+static void
+brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
+{
+	struct brcmf_core *core;
+	struct brcmf_core_priv *sr;
+
+	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+				   D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN);
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+	brcmf_chip_resetcore(core, 0, 0, 0);
+
+	/* disable bank #3 remap for this device */
+	if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+		sr = container_of(core, struct brcmf_core_priv, pub);
+		brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
+		brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
+	}
+}
+
+static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
+{
+	struct brcmf_core *core;
+
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+	if (!brcmf_chip_iscoreup(core)) {
+		brcmf_err("SOCRAM core is down after reset?\n");
+		return false;
+	}
+
+	chip->ops->activate(chip->ctx, &chip->pub, 0);
+
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
+	brcmf_chip_resetcore(core, 0, 0, 0);
+
+	return true;
+}
+
+static inline void
+brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
+{
+	struct brcmf_core *core;
+
+	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+				   D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+	struct brcmf_core *core;
+
+	chip->ops->activate(chip->ctx, &chip->pub, rstvec);
+
+	/* restore ARM */
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
+	brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+	return true;
+}
+
+static inline void
+brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip)
+{
+	struct brcmf_core *core;
+
+	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7);
+
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+				   D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+	struct brcmf_core *core;
+
+	chip->ops->activate(chip->ctx, &chip->pub, rstvec);
+
+	/* restore ARM */
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7);
+	brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+	return true;
+}
+
+void brcmf_chip_set_passive(struct brcmf_chip *pub)
+{
+	struct brcmf_chip_priv *chip;
+	struct brcmf_core *arm;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+	if (arm) {
+		brcmf_chip_cr4_set_passive(chip);
+		return;
+	}
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
+	if (arm) {
+		brcmf_chip_ca7_set_passive(chip);
+		return;
+	}
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
+	if (arm) {
+		brcmf_chip_cm3_set_passive(chip);
+		return;
+	}
+}
+
+bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
+{
+	struct brcmf_chip_priv *chip;
+	struct brcmf_core *arm;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+	if (arm)
+		return brcmf_chip_cr4_set_active(chip, rstvec);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
+	if (arm)
+		return brcmf_chip_ca7_set_active(chip, rstvec);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
+	if (arm)
+		return brcmf_chip_cm3_set_active(chip);
+
+	return false;
+}
+
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+{
+	u32 base, addr, reg, pmu_cc3_mask = ~0;
+	struct brcmf_chip_priv *chip;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* old chips with PMU version less than 17 don't support save restore */
+	if (pub->pmurev < 17)
+		return false;
+
+	base = brcmf_chip_get_chipcommon(pub)->base;
+	chip = container_of(pub, struct brcmf_chip_priv, pub);
+
+	switch (pub->chip) {
+	case BRCM_CC_4354_CHIP_ID:
+		/* explicitly check SR engine enable bit */
+		pmu_cc3_mask = BIT(2);
+		/* fall-through */
+	case BRCM_CC_43241_CHIP_ID:
+	case BRCM_CC_4335_CHIP_ID:
+	case BRCM_CC_4339_CHIP_ID:
+		/* read PMU chipcontrol register 3 */
+		addr = CORE_CC_REG(base, chipcontrol_addr);
+		chip->ops->write32(chip->ctx, addr, 3);
+		addr = CORE_CC_REG(base, chipcontrol_data);
+		reg = chip->ops->read32(chip->ctx, addr);
+		return (reg & pmu_cc3_mask) != 0;
+	case BRCM_CC_43430_CHIP_ID:
+		addr = CORE_CC_REG(base, sr_control1);
+		reg = chip->ops->read32(chip->ctx, addr);
+		return reg != 0;
+	default:
+		addr = CORE_CC_REG(base, pmucapabilities_ext);
+		reg = chip->ops->read32(chip->ctx, addr);
+		if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+			return false;
+
+		addr = CORE_CC_REG(base, retention_ctl);
+		reg = chip->ops->read32(chip->ctx, addr);
+		return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+			       PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+	}
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644
index 0000000..f6b5fee
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMF_CHIP_H
+#define BRCMF_CHIP_H
+
+#include <linux/types.h>
+
+#define CORE_CC_REG(base, field) \
+		(base + offsetof(struct chipcregs, field))
+
+/**
+ * struct brcmf_chip - chip level information.
+ *
+ * @chip: chip identifier.
+ * @chiprev: chip revision.
+ * @cc_caps: chipcommon core capabilities.
+ * @pmucaps: PMU capabilities.
+ * @pmurev: PMU revision.
+ * @rambase: RAM base address (only applicable for ARM CR4 chips).
+ * @ramsize: amount of RAM on chip including retention.
+ * @srsize: amount of retention RAM on chip.
+ * @name: string representation of the chip identifier.
+ */
+struct brcmf_chip {
+	u32 chip;
+	u32 chiprev;
+	u32 cc_caps;
+	u32 pmucaps;
+	u32 pmurev;
+	u32 rambase;
+	u32 ramsize;
+	u32 srsize;
+	char name[8];
+};
+
+/**
+ * struct brcmf_core - core related information.
+ *
+ * @id: core identifier.
+ * @rev: core revision.
+ * @base: base address of core register space.
+ */
+struct brcmf_core {
+	u16 id;
+	u16 rev;
+	u32 base;
+};
+
+/**
+ * struct brcmf_buscore_ops - buscore specific callbacks.
+ *
+ * @read32: read 32-bit value over bus.
+ * @write32: write 32-bit value over bus.
+ * @prepare: prepare bus for core configuration.
+ * @setup: bus-specific core setup.
+ * @active: chip becomes active.
+ *	The callback should use the provided @rstvec when non-zero.
+ */
+struct brcmf_buscore_ops {
+	u32 (*read32)(void *ctx, u32 addr);
+	void (*write32)(void *ctx, u32 addr, u32 value);
+	int (*prepare)(void *ctx);
+	int (*reset)(void *ctx, struct brcmf_chip *chip);
+	int (*setup)(void *ctx, struct brcmf_chip *chip);
+	void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+};
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+				     const struct brcmf_buscore_ops *ops);
+void brcmf_chip_detach(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+bool brcmf_chip_iscoreup(struct brcmf_core *core);
+void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
+void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+			  u32 postreset);
+void brcmf_chip_set_passive(struct brcmf_chip *ci);
+bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
+
+#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/brcm80211/brcmfmac/common.c
new file mode 100644
index 0000000..fe54844
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "tracepoint.h"
+#include "common.h"
+
+const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+#define BRCMF_DEFAULT_BCN_TIMEOUT	3
+#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME	40
+#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME	40
+
+/* boost value for RSSI_DELTA in preferred join selection */
+#define BRCMF_JOIN_PREF_RSSI_BOOST	8
+
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+{
+	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+	u8 buf[BRCMF_DCMD_SMLEN];
+	struct brcmf_join_pref_params join_pref_params[2];
+	struct brcmf_rev_info_le revinfo;
+	struct brcmf_rev_info *ri;
+	char *ptr;
+	s32 err;
+
+	/* retreive mac address */
+	err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+				       sizeof(ifp->mac_addr));
+	if (err < 0) {
+		brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
+		goto done;
+	}
+	memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
+
+	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO,
+				     &revinfo, sizeof(revinfo));
+	ri = &ifp->drvr->revinfo;
+	if (err < 0) {
+		brcmf_err("retrieving revision info failed, %d\n", err);
+	} else {
+		ri->vendorid = le32_to_cpu(revinfo.vendorid);
+		ri->deviceid = le32_to_cpu(revinfo.deviceid);
+		ri->radiorev = le32_to_cpu(revinfo.radiorev);
+		ri->chiprev = le32_to_cpu(revinfo.chiprev);
+		ri->corerev = le32_to_cpu(revinfo.corerev);
+		ri->boardid = le32_to_cpu(revinfo.boardid);
+		ri->boardvendor = le32_to_cpu(revinfo.boardvendor);
+		ri->boardrev = le32_to_cpu(revinfo.boardrev);
+		ri->driverrev = le32_to_cpu(revinfo.driverrev);
+		ri->ucoderev = le32_to_cpu(revinfo.ucoderev);
+		ri->bus = le32_to_cpu(revinfo.bus);
+		ri->chipnum = le32_to_cpu(revinfo.chipnum);
+		ri->phytype = le32_to_cpu(revinfo.phytype);
+		ri->phyrev = le32_to_cpu(revinfo.phyrev);
+		ri->anarev = le32_to_cpu(revinfo.anarev);
+		ri->chippkg = le32_to_cpu(revinfo.chippkg);
+		ri->nvramrev = le32_to_cpu(revinfo.nvramrev);
+	}
+	ri->result = err;
+
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	strcpy(buf, "ver");
+	err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
+	if (err < 0) {
+		brcmf_err("Retreiving version information failed, %d\n",
+			  err);
+		goto done;
+	}
+	ptr = (char *)buf;
+	strsep(&ptr, "\n");
+
+	/* Print fw version info */
+	brcmf_err("Firmware version = %s\n", buf);
+
+	/* locate firmware version number for ethtool */
+	ptr = strrchr(buf, ' ') + 1;
+	strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+
+	/* set mpc */
+	err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
+	if (err) {
+		brcmf_err("failed setting mpc\n");
+		goto done;
+	}
+
+	/*
+	 * Setup timeout if Beacons are lost and roam is off to report
+	 * link down
+	 */
+	err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
+				      BRCMF_DEFAULT_BCN_TIMEOUT);
+	if (err) {
+		brcmf_err("bcn_timeout error (%d)\n", err);
+		goto done;
+	}
+
+	/* Enable/Disable build-in roaming to allowed ext supplicant to take
+	 * of romaing
+	 */
+	err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+	if (err) {
+		brcmf_err("roam_off error (%d)\n", err);
+		goto done;
+	}
+
+	/* Setup join_pref to select target by RSSI(with boost on 5GHz) */
+	join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+	join_pref_params[0].len = 2;
+	join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+	join_pref_params[0].band = WLC_BAND_5G;
+	join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+	join_pref_params[1].len = 2;
+	join_pref_params[1].rssi_gain = 0;
+	join_pref_params[1].band = 0;
+	err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+				       sizeof(join_pref_params));
+	if (err)
+		brcmf_err("Set join_pref error (%d)\n", err);
+
+	/* Setup event_msgs, enable E_IF */
+	err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+				       BRCMF_EVENTING_MASK_LEN);
+	if (err) {
+		brcmf_err("Get event_msgs error (%d)\n", err);
+		goto done;
+	}
+	setbit(eventmask, BRCMF_E_IF);
+	err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
+				       BRCMF_EVENTING_MASK_LEN);
+	if (err) {
+		brcmf_err("Set event_msgs error (%d)\n", err);
+		goto done;
+	}
+
+	/* Setup default scan channel time */
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+				    BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
+	if (err) {
+		brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+			  err);
+		goto done;
+	}
+
+	/* Setup default scan unassoc time */
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+				    BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
+	if (err) {
+		brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+			  err);
+		goto done;
+	}
+
+	/* do bus specific preinit here */
+	err = brcmf_bus_preinit(ifp->drvr->bus_if);
+done:
+	return err;
+}
+
+#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
+void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+	if (brcmf_msg_level & level)
+		pr_debug("%s %pV", func, &vaf);
+	trace_brcmf_dbg(level, func, &vaf);
+	va_end(args);
+}
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h
new file mode 100644
index 0000000..21c7488
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_COMMON_H
+#define BRCMFMAC_COMMON_H
+
+extern const u8 ALLFFMAC[ETH_ALEN];
+
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
+#endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
new file mode 100644
index 0000000..7b0e521
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -0,0 +1,252 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "core.h"
+#include "commonring.h"
+
+void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
+				  int (*cr_ring_bell)(void *ctx),
+				  int (*cr_update_rptr)(void *ctx),
+				  int (*cr_update_wptr)(void *ctx),
+				  int (*cr_write_rptr)(void *ctx),
+				  int (*cr_write_wptr)(void *ctx), void *ctx)
+{
+	commonring->cr_ring_bell = cr_ring_bell;
+	commonring->cr_update_rptr = cr_update_rptr;
+	commonring->cr_update_wptr = cr_update_wptr;
+	commonring->cr_write_rptr = cr_write_rptr;
+	commonring->cr_write_wptr = cr_write_wptr;
+	commonring->cr_ctx = ctx;
+}
+
+
+void brcmf_commonring_config(struct brcmf_commonring *commonring, u16 depth,
+			     u16 item_len, void *buf_addr)
+{
+	commonring->depth = depth;
+	commonring->item_len = item_len;
+	commonring->buf_addr = buf_addr;
+	if (!commonring->inited) {
+		spin_lock_init(&commonring->lock);
+		commonring->inited = true;
+	}
+	commonring->r_ptr = 0;
+	if (commonring->cr_write_rptr)
+		commonring->cr_write_rptr(commonring->cr_ctx);
+	commonring->w_ptr = 0;
+	if (commonring->cr_write_wptr)
+		commonring->cr_write_wptr(commonring->cr_ctx);
+	commonring->f_ptr = 0;
+}
+
+
+void brcmf_commonring_lock(struct brcmf_commonring *commonring)
+		__acquires(&commonring->lock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&commonring->lock, flags);
+	commonring->flags = flags;
+}
+
+
+void brcmf_commonring_unlock(struct brcmf_commonring *commonring)
+		__releases(&commonring->lock)
+{
+	spin_unlock_irqrestore(&commonring->lock, commonring->flags);
+}
+
+
+bool brcmf_commonring_write_available(struct brcmf_commonring *commonring)
+{
+	u16 available;
+	bool retry = true;
+
+again:
+	if (commonring->r_ptr <= commonring->w_ptr)
+		available = commonring->depth - commonring->w_ptr +
+			    commonring->r_ptr;
+	else
+		available = commonring->r_ptr - commonring->w_ptr;
+
+	if (available > 1) {
+		if (!commonring->was_full)
+			return true;
+		if (available > commonring->depth / 8) {
+			commonring->was_full = false;
+			return true;
+		}
+		if (retry) {
+			if (commonring->cr_update_rptr)
+				commonring->cr_update_rptr(commonring->cr_ctx);
+			retry = false;
+			goto again;
+		}
+		return false;
+	}
+
+	if (retry) {
+		if (commonring->cr_update_rptr)
+			commonring->cr_update_rptr(commonring->cr_ctx);
+		retry = false;
+		goto again;
+	}
+
+	commonring->was_full = true;
+	return false;
+}
+
+
+void *brcmf_commonring_reserve_for_write(struct brcmf_commonring *commonring)
+{
+	void *ret_ptr;
+	u16 available;
+	bool retry = true;
+
+again:
+	if (commonring->r_ptr <= commonring->w_ptr)
+		available = commonring->depth - commonring->w_ptr +
+			    commonring->r_ptr;
+	else
+		available = commonring->r_ptr - commonring->w_ptr;
+
+	if (available > 1) {
+		ret_ptr = commonring->buf_addr +
+			  (commonring->w_ptr * commonring->item_len);
+		commonring->w_ptr++;
+		if (commonring->w_ptr == commonring->depth)
+			commonring->w_ptr = 0;
+		return ret_ptr;
+	}
+
+	if (retry) {
+		if (commonring->cr_update_rptr)
+			commonring->cr_update_rptr(commonring->cr_ctx);
+		retry = false;
+		goto again;
+	}
+
+	commonring->was_full = true;
+	return NULL;
+}
+
+
+void *
+brcmf_commonring_reserve_for_write_multiple(struct brcmf_commonring *commonring,
+					    u16 n_items, u16 *alloced)
+{
+	void *ret_ptr;
+	u16 available;
+	bool retry = true;
+
+again:
+	if (commonring->r_ptr <= commonring->w_ptr)
+		available = commonring->depth - commonring->w_ptr +
+			    commonring->r_ptr;
+	else
+		available = commonring->r_ptr - commonring->w_ptr;
+
+	if (available > 1) {
+		ret_ptr = commonring->buf_addr +
+			  (commonring->w_ptr * commonring->item_len);
+		*alloced = min_t(u16, n_items, available - 1);
+		if (*alloced + commonring->w_ptr > commonring->depth)
+			*alloced = commonring->depth - commonring->w_ptr;
+		commonring->w_ptr += *alloced;
+		if (commonring->w_ptr == commonring->depth)
+			commonring->w_ptr = 0;
+		return ret_ptr;
+	}
+
+	if (retry) {
+		if (commonring->cr_update_rptr)
+			commonring->cr_update_rptr(commonring->cr_ctx);
+		retry = false;
+		goto again;
+	}
+
+	commonring->was_full = true;
+	return NULL;
+}
+
+
+int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
+{
+	void *address;
+
+	address = commonring->buf_addr;
+	address += (commonring->f_ptr * commonring->item_len);
+	if (commonring->f_ptr > commonring->w_ptr) {
+		address = commonring->buf_addr;
+		commonring->f_ptr = 0;
+	}
+
+	commonring->f_ptr = commonring->w_ptr;
+
+	if (commonring->cr_write_wptr)
+		commonring->cr_write_wptr(commonring->cr_ctx);
+	if (commonring->cr_ring_bell)
+		return commonring->cr_ring_bell(commonring->cr_ctx);
+
+	return -EIO;
+}
+
+
+void brcmf_commonring_write_cancel(struct brcmf_commonring *commonring,
+				   u16 n_items)
+{
+	if (commonring->w_ptr == 0)
+		commonring->w_ptr = commonring->depth - n_items;
+	else
+		commonring->w_ptr -= n_items;
+}
+
+
+void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
+				    u16 *n_items)
+{
+	if (commonring->cr_update_wptr)
+		commonring->cr_update_wptr(commonring->cr_ctx);
+
+	*n_items = (commonring->w_ptr >= commonring->r_ptr) ?
+				(commonring->w_ptr - commonring->r_ptr) :
+				(commonring->depth - commonring->r_ptr);
+
+	if (*n_items == 0)
+		return NULL;
+
+	return commonring->buf_addr +
+	       (commonring->r_ptr * commonring->item_len);
+}
+
+
+int brcmf_commonring_read_complete(struct brcmf_commonring *commonring,
+				   u16 n_items)
+{
+	commonring->r_ptr += n_items;
+	if (commonring->r_ptr == commonring->depth)
+		commonring->r_ptr = 0;
+
+	if (commonring->cr_write_rptr)
+		return commonring->cr_write_rptr(commonring->cr_ctx);
+
+	return -EIO;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
new file mode 100644
index 0000000..b850336
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_COMMONRING_H
+#define BRCMFMAC_COMMONRING_H
+
+
+struct brcmf_commonring {
+	u16 r_ptr;
+	u16 w_ptr;
+	u16 f_ptr;
+	u16 depth;
+	u16 item_len;
+
+	void *buf_addr;
+
+	int (*cr_ring_bell)(void *ctx);
+	int (*cr_update_rptr)(void *ctx);
+	int (*cr_update_wptr)(void *ctx);
+	int (*cr_write_rptr)(void *ctx);
+	int (*cr_write_wptr)(void *ctx);
+
+	void *cr_ctx;
+
+	spinlock_t lock;
+	unsigned long flags;
+	bool inited;
+	bool was_full;
+
+	atomic_t outstanding_tx;
+};
+
+
+void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
+				  int (*cr_ring_bell)(void *ctx),
+				  int (*cr_update_rptr)(void *ctx),
+				  int (*cr_update_wptr)(void *ctx),
+				  int (*cr_write_rptr)(void *ctx),
+				  int (*cr_write_wptr)(void *ctx), void *ctx);
+void brcmf_commonring_config(struct brcmf_commonring *commonring, u16 depth,
+			     u16 item_len, void *buf_addr);
+void brcmf_commonring_lock(struct brcmf_commonring *commonring);
+void brcmf_commonring_unlock(struct brcmf_commonring *commonring);
+bool brcmf_commonring_write_available(struct brcmf_commonring *commonring);
+void *brcmf_commonring_reserve_for_write(struct brcmf_commonring *commonring);
+void *
+brcmf_commonring_reserve_for_write_multiple(struct brcmf_commonring *commonring,
+					    u16 n_items, u16 *alloced);
+int brcmf_commonring_write_complete(struct brcmf_commonring *commonring);
+void brcmf_commonring_write_cancel(struct brcmf_commonring *commonring,
+				   u16 n_items);
+void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
+				    u16 *n_items);
+int brcmf_commonring_read_complete(struct brcmf_commonring *commonring,
+				   u16 n_items);
+
+#define brcmf_commonring_n_items(commonring) (commonring->depth)
+#define brcmf_commonring_len_item(commonring) (commonring->item_len)
+
+
+#endif /* BRCMFMAC_COMMONRING_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
new file mode 100644
index 0000000..82753e7
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -0,0 +1,1253 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "cfg80211.h"
+#include "fwil.h"
+#include "fwsignal.h"
+#include "feature.h"
+#include "proto.h"
+#include "pcie.h"
+#include "common.h"
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
+MODULE_LICENSE("Dual BSD/GPL");
+
+#define MAX_WAIT_FOR_8021X_TX		50	/* msecs */
+
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET		0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET		2
+#define BRCMF_RXREORDER_FLAGS_OFFSET		4
+#define BRCMF_RXREORDER_CURIDX_OFFSET		6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET		8
+
+#define BRCMF_RXREORDER_DEL_FLOW		0x01
+#define BRCMF_RXREORDER_FLUSH_ALL		0x02
+#define BRCMF_RXREORDER_CURIDX_VALID		0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID		0x08
+#define BRCMF_RXREORDER_NEW_HOLE		0x10
+
+#define BRCMF_BSSIDX_INVALID			-1
+
+/* Error bits */
+int brcmf_msg_level;
+module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(debug, "level of debug output");
+
+/* P2P0 enable */
+static int brcmf_p2p_enable;
+module_param_named(p2pon, brcmf_p2p_enable, int, 0);
+MODULE_PARM_DESC(p2pon, "enable legacy p2p management functionality");
+
+char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
+{
+	if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
+		brcmf_err("ifidx %d out of range\n", ifidx);
+		return "<if_bad>";
+	}
+
+	if (drvr->iflist[ifidx] == NULL) {
+		brcmf_err("null i/f %d\n", ifidx);
+		return "<if_null>";
+	}
+
+	if (drvr->iflist[ifidx]->ndev)
+		return drvr->iflist[ifidx]->ndev->name;
+
+	return "<if_none>";
+}
+
+struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
+{
+	struct brcmf_if *ifp;
+	s32 bssidx;
+
+	if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
+		brcmf_err("ifidx %d out of range\n", ifidx);
+		return NULL;
+	}
+
+	ifp = NULL;
+	bssidx = drvr->if2bss[ifidx];
+	if (bssidx >= 0)
+		ifp = drvr->iflist[bssidx];
+
+	return ifp;
+}
+
+static void _brcmf_set_multicast_list(struct work_struct *work)
+{
+	struct brcmf_if *ifp;
+	struct net_device *ndev;
+	struct netdev_hw_addr *ha;
+	u32 cmd_value, cnt;
+	__le32 cnt_le;
+	char *buf, *bufp;
+	u32 buflen;
+	s32 err;
+
+	ifp = container_of(work, struct brcmf_if, multicast_work);
+
+	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
+	ndev = ifp->ndev;
+
+	/* Determine initial value of allmulti flag */
+	cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
+
+	/* Send down the multicast list first. */
+	cnt = netdev_mc_count(ndev);
+	buflen = sizeof(cnt) + (cnt * ETH_ALEN);
+	buf = kmalloc(buflen, GFP_ATOMIC);
+	if (!buf)
+		return;
+	bufp = buf;
+
+	cnt_le = cpu_to_le32(cnt);
+	memcpy(bufp, &cnt_le, sizeof(cnt_le));
+	bufp += sizeof(cnt_le);
+
+	netdev_for_each_mc_addr(ha, ndev) {
+		if (!cnt)
+			break;
+		memcpy(bufp, ha->addr, ETH_ALEN);
+		bufp += ETH_ALEN;
+		cnt--;
+	}
+
+	err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
+	if (err < 0) {
+		brcmf_err("Setting mcast_list failed, %d\n", err);
+		cmd_value = cnt ? true : cmd_value;
+	}
+
+	kfree(buf);
+
+	/*
+	 * Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+	err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
+	if (err < 0)
+		brcmf_err("Setting allmulti failed, %d\n", err);
+
+	/*Finally, pick up the PROMISC flag */
+	cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
+	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
+	if (err < 0)
+		brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
+			  err);
+}
+
+static void
+_brcmf_set_mac_address(struct work_struct *work)
+{
+	struct brcmf_if *ifp;
+	s32 err;
+
+	ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+
+	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
+	err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
+				       ETH_ALEN);
+	if (err < 0) {
+		brcmf_err("Setting cur_etheraddr failed, %d\n", err);
+	} else {
+		brcmf_dbg(TRACE, "MAC address updated to %pM\n",
+			  ifp->mac_addr);
+		memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+	}
+}
+
+static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+
+	memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
+	schedule_work(&ifp->setmacaddr_work);
+	return 0;
+}
+
+static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+
+	schedule_work(&ifp->multicast_work);
+}
+
+static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
+					   struct net_device *ndev)
+{
+	int ret;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct ethhdr *eh;
+
+	brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
+
+	/* Can the device send data? */
+	if (drvr->bus_if->state != BRCMF_BUS_UP) {
+		brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
+		netif_stop_queue(ndev);
+		dev_kfree_skb(skb);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	if (!drvr->iflist[ifp->bssidx]) {
+		brcmf_err("bad ifidx %d\n", ifp->bssidx);
+		netif_stop_queue(ndev);
+		dev_kfree_skb(skb);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	/* Make sure there's enough writable headroom*/
+	ret = skb_cow_head(skb, drvr->hdrlen);
+	if (ret < 0) {
+		brcmf_err("%s: skb_cow_head failed\n",
+			  brcmf_ifname(drvr, ifp->bssidx));
+		dev_kfree_skb(skb);
+		goto done;
+	}
+
+	/* validate length for ether packet */
+	if (skb->len < sizeof(*eh)) {
+		ret = -EINVAL;
+		dev_kfree_skb(skb);
+		goto done;
+	}
+
+	eh = (struct ethhdr *)(skb->data);
+
+	if (eh->h_proto == htons(ETH_P_PAE))
+		atomic_inc(&ifp->pend_8021x_cnt);
+
+	ret = brcmf_fws_process_skb(ifp, skb);
+
+done:
+	if (ret) {
+		ifp->stats.tx_dropped++;
+	} else {
+		ifp->stats.tx_packets++;
+		ifp->stats.tx_bytes += skb->len;
+	}
+
+	/* Return ok: we always eat the packet */
+	return NETDEV_TX_OK;
+}
+
+void brcmf_txflowblock_if(struct brcmf_if *ifp,
+			  enum brcmf_netif_stop_reason reason, bool state)
+{
+	unsigned long flags;
+
+	if (!ifp || !ifp->ndev)
+		return;
+
+	brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
+		  ifp->bssidx, ifp->netif_stop, reason, state);
+
+	spin_lock_irqsave(&ifp->netif_stop_lock, flags);
+	if (state) {
+		if (!ifp->netif_stop)
+			netif_stop_queue(ifp->ndev);
+		ifp->netif_stop |= reason;
+	} else {
+		ifp->netif_stop &= ~reason;
+		if (!ifp->netif_stop)
+			netif_wake_queue(ifp->ndev);
+	}
+	spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
+}
+
+void brcmf_txflowblock(struct device *dev, bool state)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	brcmf_fws_bus_blocked(drvr, state);
+}
+
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+	skb->dev = ifp->ndev;
+	skb->protocol = eth_type_trans(skb, skb->dev);
+
+	if (skb->pkt_type == PACKET_MULTICAST)
+		ifp->stats.multicast++;
+
+	/* Process special event packets */
+	brcmf_fweh_process_skb(ifp->drvr, skb);
+
+	if (!(ifp->ndev->flags & IFF_UP)) {
+		brcmu_pkt_buf_free_skb(skb);
+		return;
+	}
+
+	ifp->stats.rx_bytes += skb->len;
+	ifp->stats.rx_packets++;
+
+	brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
+	if (in_interrupt())
+		netif_rx(skb);
+	else
+		/* If the receive is not processed inside an ISR,
+		 * the softirqd must be woken explicitly to service
+		 * the NET_RX_SOFTIRQ.  This is handled by netif_rx_ni().
+		 */
+		netif_rx_ni(skb);
+}
+
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+					 u8 start, u8 end,
+					 struct sk_buff_head *skb_list)
+{
+	/* initialize return list */
+	__skb_queue_head_init(skb_list);
+
+	if (rfi->pend_pkts == 0) {
+		brcmf_dbg(INFO, "no packets in reorder queue\n");
+		return;
+	}
+
+	do {
+		if (rfi->pktslots[start]) {
+			__skb_queue_tail(skb_list, rfi->pktslots[start]);
+			rfi->pktslots[start] = NULL;
+		}
+		start++;
+		if (start > rfi->max_idx)
+			start = 0;
+	} while (start != end);
+	rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
+					 struct sk_buff *pkt)
+{
+	u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+	struct brcmf_ampdu_rx_reorder *rfi;
+	struct sk_buff_head reorder_list;
+	struct sk_buff *pnext;
+	u8 flags;
+	u32 buf_size;
+
+	flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+	flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+	/* validate flags and flow id */
+	if (flags == 0xFF) {
+		brcmf_err("invalid flags...so ignore this packet\n");
+		brcmf_netif_rx(ifp, pkt);
+		return;
+	}
+
+	rfi = ifp->drvr->reorder_flows[flow_id];
+	if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+		brcmf_dbg(INFO, "flow-%d: delete\n",
+			  flow_id);
+
+		if (rfi == NULL) {
+			brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+				  flow_id);
+			brcmf_netif_rx(ifp, pkt);
+			return;
+		}
+
+		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+					     &reorder_list);
+		/* add the last packet */
+		__skb_queue_tail(&reorder_list, pkt);
+		kfree(rfi);
+		ifp->drvr->reorder_flows[flow_id] = NULL;
+		goto netif_rx;
+	}
+	/* from here on we need a flow reorder instance */
+	if (rfi == NULL) {
+		buf_size = sizeof(*rfi);
+		max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+		buf_size += (max_idx + 1) * sizeof(pkt);
+
+		/* allocate space for flow reorder info */
+		brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+			  flow_id, max_idx);
+		rfi = kzalloc(buf_size, GFP_ATOMIC);
+		if (rfi == NULL) {
+			brcmf_err("failed to alloc buffer\n");
+			brcmf_netif_rx(ifp, pkt);
+			return;
+		}
+
+		ifp->drvr->reorder_flows[flow_id] = rfi;
+		rfi->pktslots = (struct sk_buff **)(rfi+1);
+		rfi->max_idx = max_idx;
+	}
+	if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
+		if (rfi->pend_pkts) {
+			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+						     rfi->exp_idx,
+						     &reorder_list);
+			WARN_ON(rfi->pend_pkts);
+		} else {
+			__skb_queue_head_init(&reorder_list);
+		}
+		rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+		rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+		rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+		rfi->pktslots[rfi->cur_idx] = pkt;
+		rfi->pend_pkts++;
+		brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+			  flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+	} else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+		cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+		if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+			/* still in the current hole */
+			/* enqueue the current on the buffer chain */
+			if (rfi->pktslots[cur_idx] != NULL) {
+				brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+				rfi->pktslots[cur_idx] = NULL;
+			}
+			rfi->pktslots[cur_idx] = pkt;
+			rfi->pend_pkts++;
+			rfi->cur_idx = cur_idx;
+			brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+			/* can return now as there is no reorder
+			 * list to process.
+			 */
+			return;
+		}
+		if (rfi->exp_idx == cur_idx) {
+			if (rfi->pktslots[cur_idx] != NULL) {
+				brcmf_dbg(INFO, "error buffer pending..free it\n");
+				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+				rfi->pktslots[cur_idx] = NULL;
+			}
+			rfi->pktslots[cur_idx] = pkt;
+			rfi->pend_pkts++;
+
+			/* got the expected one. flush from current to expected
+			 * and update expected
+			 */
+			brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+			rfi->cur_idx = cur_idx;
+			rfi->exp_idx = exp_idx;
+
+			brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+						     &reorder_list);
+			brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+				  flow_id, skb_queue_len(&reorder_list),
+				  rfi->pend_pkts);
+		} else {
+			u8 end_idx;
+
+			brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+				  flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+				  cur_idx, exp_idx);
+			if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+				end_idx = rfi->exp_idx;
+			else
+				end_idx = exp_idx;
+
+			/* flush pkts first */
+			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+						     &reorder_list);
+
+			if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+				__skb_queue_tail(&reorder_list, pkt);
+			} else {
+				rfi->pktslots[cur_idx] = pkt;
+				rfi->pend_pkts++;
+			}
+			rfi->exp_idx = exp_idx;
+			rfi->cur_idx = cur_idx;
+		}
+	} else {
+		/* explicity window move updating the expected index */
+		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+		brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+			  flow_id, flags, rfi->exp_idx, exp_idx);
+		if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+			end_idx =  rfi->exp_idx;
+		else
+			end_idx =  exp_idx;
+
+		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+					     &reorder_list);
+		__skb_queue_tail(&reorder_list, pkt);
+		/* set the new expected idx */
+		rfi->exp_idx = exp_idx;
+	}
+netif_rx:
+	skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+		__skb_unlink(pkt, &reorder_list);
+		brcmf_netif_rx(ifp, pkt);
+	}
+}
+
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+{
+	struct brcmf_if *ifp;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_skb_reorder_data *rd;
+	int ret;
+
+	brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
+
+	/* process and remove protocol-specific header */
+	ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
+
+	if (ret || !ifp || !ifp->ndev) {
+		if (ret != -ENODATA && ifp)
+			ifp->stats.rx_errors++;
+		brcmu_pkt_buf_free_skb(skb);
+		return;
+	}
+
+	rd = (struct brcmf_skb_reorder_data *)skb->cb;
+	if (rd->reorder)
+		brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+	else
+		brcmf_netif_rx(ifp, skb);
+}
+
+void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
+{
+	struct ethhdr *eh;
+	u16 type;
+
+	eh = (struct ethhdr *)(txp->data);
+	type = ntohs(eh->h_proto);
+
+	if (type == ETH_P_PAE) {
+		atomic_dec(&ifp->pend_8021x_cnt);
+		if (waitqueue_active(&ifp->pend_8021x_wait))
+			wake_up(&ifp->pend_8021x_wait);
+	}
+
+	if (!success)
+		ifp->stats.tx_errors++;
+
+	brcmu_pkt_buf_free_skb(txp);
+}
+
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_if *ifp;
+
+	/* await txstatus signal for firmware if active */
+	if (brcmf_fws_fc_active(drvr->fws)) {
+		if (!success)
+			brcmf_fws_bustxfail(drvr->fws, txp);
+	} else {
+		if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
+			brcmu_pkt_buf_free_skb(txp);
+		else
+			brcmf_txfinalize(ifp, txp, success);
+	}
+}
+
+static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+
+	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
+	return &ifp->stats;
+}
+
+static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
+				    struct ethtool_drvinfo *info)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
+	char drev[BRCMU_DOTREV_LEN] = "n/a";
+
+	if (drvr->revinfo.result == 0)
+		brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->version, drev, sizeof(info->version));
+	strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
+	strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
+		sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops brcmf_ethtool_ops = {
+	.get_drvinfo = brcmf_ethtool_get_drvinfo,
+};
+
+static int brcmf_netdev_stop(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+
+	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
+	brcmf_cfg80211_down(ndev);
+
+	brcmf_net_setcarrier(ifp, false);
+
+	return 0;
+}
+
+static int brcmf_netdev_open(struct net_device *ndev)
+{
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_bus *bus_if = drvr->bus_if;
+	u32 toe_ol;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
+	/* If bus is not ready, can't continue */
+	if (bus_if->state != BRCMF_BUS_UP) {
+		brcmf_err("failed bus is not ready\n");
+		return -EAGAIN;
+	}
+
+	atomic_set(&ifp->pend_8021x_cnt, 0);
+
+	/* Get current TOE mode from dongle */
+	if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
+	    && (toe_ol & TOE_TX_CSUM_OL) != 0)
+		ndev->features |= NETIF_F_IP_CSUM;
+	else
+		ndev->features &= ~NETIF_F_IP_CSUM;
+
+	if (brcmf_cfg80211_up(ndev)) {
+		brcmf_err("failed to bring up cfg80211\n");
+		return -EIO;
+	}
+
+	/* Clear, carrier, set when connected or AP mode. */
+	netif_carrier_off(ndev);
+	return 0;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_pri = {
+	.ndo_open = brcmf_netdev_open,
+	.ndo_stop = brcmf_netdev_stop,
+	.ndo_get_stats = brcmf_netdev_get_stats,
+	.ndo_start_xmit = brcmf_netdev_start_xmit,
+	.ndo_set_mac_address = brcmf_netdev_set_mac_address,
+	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct net_device *ndev;
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+		  ifp->mac_addr);
+	ndev = ifp->ndev;
+
+	/* set appropriate operations */
+	ndev->netdev_ops = &brcmf_netdev_ops_pri;
+
+	ndev->hard_header_len += drvr->hdrlen;
+	ndev->ethtool_ops = &brcmf_ethtool_ops;
+
+	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
+			      drvr->hdrlen;
+
+	/* set the mac address */
+	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+
+	INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+	INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
+
+	if (rtnl_locked)
+		err = register_netdevice(ndev);
+	else
+		err = register_netdev(ndev);
+	if (err != 0) {
+		brcmf_err("couldn't register the net device\n");
+		goto fail;
+	}
+
+	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+	return 0;
+
+fail:
+	drvr->iflist[ifp->bssidx] = NULL;
+	ndev->netdev_ops = NULL;
+	free_netdev(ndev);
+	return -EBADE;
+}
+
+static void brcmf_net_detach(struct net_device *ndev)
+{
+	if (ndev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(ndev);
+	else
+		brcmf_cfg80211_free_netdev(ndev);
+}
+
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
+{
+	struct net_device *ndev;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
+
+	ndev = ifp->ndev;
+	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
+	if (on) {
+		if (!netif_carrier_ok(ndev))
+			netif_carrier_on(ndev);
+
+	} else {
+		if (netif_carrier_ok(ndev))
+			netif_carrier_off(ndev);
+	}
+}
+
+static int brcmf_net_p2p_open(struct net_device *ndev)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	return brcmf_cfg80211_up(ndev);
+}
+
+static int brcmf_net_p2p_stop(struct net_device *ndev)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	return brcmf_cfg80211_down(ndev);
+}
+
+static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
+					    struct net_device *ndev)
+{
+	if (skb)
+		dev_kfree_skb_any(skb);
+
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_p2p = {
+	.ndo_open = brcmf_net_p2p_open,
+	.ndo_stop = brcmf_net_p2p_stop,
+	.ndo_start_xmit = brcmf_net_p2p_start_xmit
+};
+
+static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
+{
+	struct net_device *ndev;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+		  ifp->mac_addr);
+	ndev = ifp->ndev;
+
+	ndev->netdev_ops = &brcmf_netdev_ops_p2p;
+
+	/* set the mac address */
+	memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+
+	if (register_netdev(ndev) != 0) {
+		brcmf_err("couldn't register the p2p net device\n");
+		goto fail;
+	}
+
+	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+	return 0;
+
+fail:
+	ifp->drvr->iflist[ifp->bssidx] = NULL;
+	ndev->netdev_ops = NULL;
+	free_netdev(ndev);
+	return -EBADE;
+}
+
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+			      bool is_p2pdev, char *name, u8 *mac_addr)
+{
+	struct brcmf_if *ifp;
+	struct net_device *ndev;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
+
+	ifp = drvr->iflist[bssidx];
+	/*
+	 * Delete the existing interface before overwriting it
+	 * in case we missed the BRCMF_E_IF_DEL event.
+	 */
+	if (ifp) {
+		brcmf_err("ERROR: netdev:%s already exists\n",
+			  ifp->ndev->name);
+		if (ifidx) {
+			netif_stop_queue(ifp->ndev);
+			brcmf_net_detach(ifp->ndev);
+			drvr->iflist[bssidx] = NULL;
+		} else {
+			brcmf_err("ignore IF event\n");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	if (!brcmf_p2p_enable && is_p2pdev) {
+		/* this is P2P_DEVICE interface */
+		brcmf_dbg(INFO, "allocate non-netdev interface\n");
+		ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
+		if (!ifp)
+			return ERR_PTR(-ENOMEM);
+	} else {
+		brcmf_dbg(INFO, "allocate netdev interface\n");
+		/* Allocate netdev, including space for private structure */
+		ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
+				    NET_NAME_UNKNOWN, ether_setup);
+		if (!ndev)
+			return ERR_PTR(-ENOMEM);
+
+		ndev->destructor = brcmf_cfg80211_free_netdev;
+		ifp = netdev_priv(ndev);
+		ifp->ndev = ndev;
+		/* store mapping ifidx to bssidx */
+		if (drvr->if2bss[ifidx] == BRCMF_BSSIDX_INVALID)
+			drvr->if2bss[ifidx] = bssidx;
+	}
+
+	ifp->drvr = drvr;
+	drvr->iflist[bssidx] = ifp;
+	ifp->ifidx = ifidx;
+	ifp->bssidx = bssidx;
+
+	init_waitqueue_head(&ifp->pend_8021x_wait);
+	spin_lock_init(&ifp->netif_stop_lock);
+
+	if (mac_addr != NULL)
+		memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
+
+	brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
+		  current->pid, name, ifp->mac_addr);
+
+	return ifp;
+}
+
+static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
+{
+	struct brcmf_if *ifp;
+
+	ifp = drvr->iflist[bssidx];
+	drvr->iflist[bssidx] = NULL;
+	if (!ifp) {
+		brcmf_err("Null interface, idx=%d\n", bssidx);
+		return;
+	}
+	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
+	if (drvr->if2bss[ifp->ifidx] == bssidx)
+		drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
+	if (ifp->ndev) {
+		if (bssidx == 0) {
+			if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+				rtnl_lock();
+				brcmf_netdev_stop(ifp->ndev);
+				rtnl_unlock();
+			}
+		} else {
+			netif_stop_queue(ifp->ndev);
+		}
+
+		if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+			cancel_work_sync(&ifp->setmacaddr_work);
+			cancel_work_sync(&ifp->multicast_work);
+		}
+		brcmf_net_detach(ifp->ndev);
+	} else {
+		/* Only p2p device interfaces which get dynamically created
+		 * end up here. In this case the p2p module should be informed
+		 * about the removal of the interface within the firmware. If
+		 * not then p2p commands towards the firmware will cause some
+		 * serious troublesome side effects. The p2p module will clean
+		 * up the ifp if needed.
+		 */
+		brcmf_p2p_ifp_removed(ifp);
+		kfree(ifp);
+	}
+}
+
+void brcmf_remove_interface(struct brcmf_if *ifp)
+{
+	if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bssidx] != ifp))
+		return;
+	brcmf_dbg(TRACE, "Enter, bssidx=%d, ifidx=%d\n", ifp->bssidx,
+		  ifp->ifidx);
+	brcmf_fws_del_interface(ifp);
+	brcmf_del_if(ifp->drvr, ifp->bssidx);
+}
+
+int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
+{
+	int ifidx;
+	int bsscfgidx;
+	bool available;
+	int highest;
+
+	available = false;
+	bsscfgidx = 2;
+	highest = 2;
+	for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
+		if (drvr->iflist[ifidx]) {
+			if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
+				bsscfgidx = highest + 1;
+			else if (drvr->iflist[ifidx]->bssidx > highest)
+				highest = drvr->iflist[ifidx]->bssidx;
+		} else {
+			available = true;
+		}
+	}
+
+	return available ? bsscfgidx : -ENOMEM;
+}
+
+int brcmf_attach(struct device *dev)
+{
+	struct brcmf_pub *drvr = NULL;
+	int ret = 0;
+	int i;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* Allocate primary brcmf_info */
+	drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
+	if (!drvr)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++)
+		drvr->if2bss[i] = BRCMF_BSSIDX_INVALID;
+
+	mutex_init(&drvr->proto_block);
+
+	/* Link to bus module */
+	drvr->hdrlen = 0;
+	drvr->bus_if = dev_get_drvdata(dev);
+	drvr->bus_if->drvr = drvr;
+
+	/* attach debug facilities */
+	brcmf_debug_attach(drvr);
+
+	/* Attach and link in the protocol */
+	ret = brcmf_proto_attach(drvr);
+	if (ret != 0) {
+		brcmf_err("brcmf_prot_attach failed\n");
+		goto fail;
+	}
+
+	/* attach firmware event handler */
+	brcmf_fweh_attach(drvr);
+
+	return ret;
+
+fail:
+	brcmf_detach(dev);
+
+	return ret;
+}
+
+static int brcmf_revinfo_read(struct seq_file *s, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
+	struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
+	char drev[BRCMU_DOTREV_LEN];
+	char brev[BRCMU_BOARDREV_LEN];
+
+	seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
+	seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
+	seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
+	seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
+	seq_printf(s, "chiprev: %u\n", ri->chiprev);
+	seq_printf(s, "chippkg: %u\n", ri->chippkg);
+	seq_printf(s, "corerev: %u\n", ri->corerev);
+	seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
+	seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
+	seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
+	seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
+	seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
+	seq_printf(s, "bus: %u\n", ri->bus);
+	seq_printf(s, "phytype: %u\n", ri->phytype);
+	seq_printf(s, "phyrev: %u\n", ri->phyrev);
+	seq_printf(s, "anarev: %u\n", ri->anarev);
+	seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
+
+	return 0;
+}
+
+int brcmf_bus_start(struct device *dev)
+{
+	int ret = -1;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_if *ifp;
+	struct brcmf_if *p2p_ifp;
+
+	brcmf_dbg(TRACE, "\n");
+
+	/* add primary networking interface */
+	ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
+	if (IS_ERR(ifp))
+		return PTR_ERR(ifp);
+
+	p2p_ifp = NULL;
+
+	/* signal bus ready */
+	brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
+
+	/* Bus is ready, do any initialization */
+	ret = brcmf_c_preinit_dcmds(ifp);
+	if (ret < 0)
+		goto fail;
+
+	brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
+
+	/* assure we have chipid before feature attach */
+	if (!bus_if->chip) {
+		bus_if->chip = drvr->revinfo.chipnum;
+		bus_if->chiprev = drvr->revinfo.chiprev;
+		brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
+			  bus_if->chip, bus_if->chip, bus_if->chiprev);
+	}
+	brcmf_feat_attach(drvr);
+
+	ret = brcmf_fws_init(drvr);
+	if (ret < 0)
+		goto fail;
+
+	brcmf_fws_add_interface(ifp);
+
+	drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
+					     brcmf_p2p_enable);
+	if (drvr->config == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = brcmf_net_attach(ifp, false);
+
+	if ((!ret) && (brcmf_p2p_enable)) {
+		p2p_ifp = drvr->iflist[1];
+		if (p2p_ifp)
+			ret = brcmf_net_p2p_attach(p2p_ifp);
+	}
+fail:
+	if (ret < 0) {
+		brcmf_err("failed: %d\n", ret);
+		if (drvr->config) {
+			brcmf_cfg80211_detach(drvr->config);
+			drvr->config = NULL;
+		}
+		if (drvr->fws) {
+			brcmf_fws_del_interface(ifp);
+			brcmf_fws_deinit(drvr);
+		}
+		if (ifp)
+			brcmf_net_detach(ifp->ndev);
+		if (p2p_ifp)
+			brcmf_net_detach(p2p_ifp->ndev);
+		return ret;
+	}
+	return 0;
+}
+
+void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	if (drvr) {
+		drvr->hdrlen += len;
+	}
+}
+
+static void brcmf_bus_detach(struct brcmf_pub *drvr)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (drvr) {
+		/* Stop the bus module */
+		brcmf_bus_stop(drvr->bus_if);
+	}
+}
+
+void brcmf_dev_reset(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	if (drvr == NULL)
+		return;
+
+	if (drvr->iflist[0])
+		brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
+}
+
+void brcmf_detach(struct device *dev)
+{
+	s32 i;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (drvr == NULL)
+		return;
+
+	/* stop firmware event handling */
+	brcmf_fweh_detach(drvr);
+	if (drvr->config)
+		brcmf_p2p_detach(&drvr->config->p2p);
+
+	brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+
+	/* make sure primary interface removed last */
+	for (i = BRCMF_MAX_IFS-1; i > -1; i--)
+		brcmf_remove_interface(drvr->iflist[i]);
+
+	brcmf_cfg80211_detach(drvr->config);
+
+	brcmf_fws_deinit(drvr);
+
+	brcmf_bus_detach(drvr);
+
+	brcmf_proto_detach(drvr);
+
+	brcmf_debug_detach(drvr);
+	bus_if->drvr = NULL;
+	kfree(drvr);
+}
+
+s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_if *ifp = bus_if->drvr->iflist[0];
+
+	return brcmf_fil_iovar_data_set(ifp, name, data, len);
+}
+
+static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
+{
+	return atomic_read(&ifp->pend_8021x_cnt);
+}
+
+int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
+{
+	int err;
+
+	err = wait_event_timeout(ifp->pend_8021x_wait,
+				 !brcmf_get_pend_8021x_cnt(ifp),
+				 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
+
+	WARN_ON(!err);
+
+	return !err;
+}
+
+void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
+{
+	struct brcmf_pub *drvr = bus->drvr;
+	struct net_device *ndev;
+	int ifidx;
+
+	brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
+	bus->state = state;
+
+	if (state == BRCMF_BUS_UP) {
+		for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
+			if ((drvr->iflist[ifidx]) &&
+			    (drvr->iflist[ifidx]->ndev)) {
+				ndev = drvr->iflist[ifidx]->ndev;
+				if (netif_queue_stopped(ndev))
+					netif_wake_queue(ndev);
+			}
+		}
+	}
+}
+
+static void brcmf_driver_register(struct work_struct *work)
+{
+#ifdef CONFIG_BRCMFMAC_SDIO
+	brcmf_sdio_register();
+#endif
+#ifdef CONFIG_BRCMFMAC_USB
+	brcmf_usb_register();
+#endif
+#ifdef CONFIG_BRCMFMAC_PCIE
+	brcmf_pcie_register();
+#endif
+}
+static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
+
+static int __init brcmfmac_module_init(void)
+{
+	brcmf_debugfs_init();
+#ifdef CONFIG_BRCMFMAC_SDIO
+	brcmf_sdio_init();
+#endif
+	if (!schedule_work(&brcmf_driver_work))
+		return -EBUSY;
+
+	return 0;
+}
+
+static void __exit brcmfmac_module_exit(void)
+{
+	cancel_work_sync(&brcmf_driver_work);
+
+#ifdef CONFIG_BRCMFMAC_SDIO
+	brcmf_sdio_exit();
+#endif
+#ifdef CONFIG_BRCMFMAC_USB
+	brcmf_usb_exit();
+#endif
+#ifdef CONFIG_BRCMFMAC_PCIE
+	brcmf_pcie_exit();
+#endif
+	brcmf_debugfs_exit();
+}
+
+module_init(brcmfmac_module_init);
+module_exit(brcmfmac_module_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
new file mode 100644
index 0000000..2f9101b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef BRCMFMAC_CORE_H
+#define BRCMFMAC_CORE_H
+
+#include <net/cfg80211.h>
+#include "fweh.h"
+
+#define TOE_TX_CSUM_OL		0x00000001
+#define TOE_RX_CSUM_OL		0x00000002
+
+/* For supporting multiple interfaces */
+#define BRCMF_MAX_IFS	16
+
+/* Small, medium and maximum buffer size for dcmd
+ */
+#define BRCMF_DCMD_SMLEN	256
+#define BRCMF_DCMD_MEDLEN	1536
+#define BRCMF_DCMD_MAXLEN	8192
+
+/* IOCTL from host to device are limited in lenght. A device can only handle
+ * ethernet frame size. This limitation is to be applied by protocol layer.
+ */
+#define BRCMF_TX_IOCTL_MAX_MSG_SIZE	(ETH_FRAME_LEN+ETH_FCS_LEN)
+
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS		256
+
+/* Length of firmware version string stored for
+ * ethtool driver info which uses 32 bytes as well.
+ */
+#define BRCMF_DRIVER_FIRMWARE_VERSION_LEN	32
+
+/**
+ * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
+ *
+ * @pktslots: dynamic allocated array for ordering AMPDU packets.
+ * @flow_id: AMPDU flow identifier.
+ * @cur_idx: last AMPDU index from firmware.
+ * @exp_idx: expected next AMPDU index.
+ * @max_idx: maximum amount of packets per AMPDU.
+ * @pend_pkts: number of packets currently in @pktslots.
+ */
+struct brcmf_ampdu_rx_reorder {
+	struct sk_buff **pktslots;
+	u8 flow_id;
+	u8 cur_idx;
+	u8 exp_idx;
+	u8 max_idx;
+	u8 pend_pkts;
+};
+
+/* Forward decls for struct brcmf_pub (see below) */
+struct brcmf_proto;	/* device communication protocol info */
+struct brcmf_cfg80211_dev; /* cfg80211 device info */
+struct brcmf_fws_info; /* firmware signalling info */
+
+/*
+ * struct brcmf_rev_info
+ *
+ * The result field stores the error code of the
+ * revision info request from firmware. For the
+ * other fields see struct brcmf_rev_info_le in
+ * fwil_types.h
+ */
+struct brcmf_rev_info {
+	int result;
+	u32 vendorid;
+	u32 deviceid;
+	u32 radiorev;
+	u32 chiprev;
+	u32 corerev;
+	u32 boardid;
+	u32 boardvendor;
+	u32 boardrev;
+	u32 driverrev;
+	u32 ucoderev;
+	u32 bus;
+	u32 chipnum;
+	u32 phytype;
+	u32 phyrev;
+	u32 anarev;
+	u32 chippkg;
+	u32 nvramrev;
+};
+
+/* Common structure for module and instance linkage */
+struct brcmf_pub {
+	/* Linkage ponters */
+	struct brcmf_bus *bus_if;
+	struct brcmf_proto *proto;
+	struct brcmf_cfg80211_info *config;
+
+	/* Internal brcmf items */
+	uint hdrlen;		/* Total BRCMF header length (proto + bus) */
+	uint rxsz;		/* Rx buffer size bus module should use */
+
+	/* Dongle media info */
+	char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
+	u8 mac[ETH_ALEN];		/* MAC address obtained from dongle */
+
+	/* Multicast data packets sent to dongle */
+	unsigned long tx_multicast;
+
+	struct mac_address addresses[BRCMF_MAX_IFS];
+
+	struct brcmf_if *iflist[BRCMF_MAX_IFS];
+	s32 if2bss[BRCMF_MAX_IFS];
+
+	struct mutex proto_block;
+	unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
+
+	struct brcmf_fweh_info fweh;
+
+	struct brcmf_fws_info *fws;
+
+	struct brcmf_ampdu_rx_reorder
+		*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
+
+	u32 feat_flags;
+	u32 chip_quirks;
+
+	struct brcmf_rev_info revinfo;
+#ifdef DEBUG
+	struct dentry *dbgfs_dir;
+#endif
+};
+
+/* forward declarations */
+struct brcmf_cfg80211_vif;
+struct brcmf_fws_mac_descriptor;
+
+/**
+ * enum brcmf_netif_stop_reason - reason for stopping netif queue.
+ *
+ * @BRCMF_NETIF_STOP_REASON_FWS_FC:
+ *	netif stopped due to firmware signalling flow control.
+ * @BRCMF_NETIF_STOP_REASON_FLOW:
+ *	netif stopped due to flowring full.
+ * @BRCMF_NETIF_STOP_REASON_DISCONNECTED:
+ *	netif stopped due to not being connected (STA mode).
+ */
+enum brcmf_netif_stop_reason {
+	BRCMF_NETIF_STOP_REASON_FWS_FC = BIT(0),
+	BRCMF_NETIF_STOP_REASON_FLOW = BIT(1),
+	BRCMF_NETIF_STOP_REASON_DISCONNECTED = BIT(2)
+};
+
+/**
+ * struct brcmf_if - interface control information.
+ *
+ * @drvr: points to device related information.
+ * @vif: points to cfg80211 specific interface information.
+ * @ndev: associated network device.
+ * @stats: interface specific network statistics.
+ * @setmacaddr_work: worker object for setting mac address.
+ * @multicast_work: worker object for multicast provisioning.
+ * @fws_desc: interface specific firmware-signalling descriptor.
+ * @ifidx: interface index in device firmware.
+ * @bssidx: index of bss associated with this interface.
+ * @mac_addr: assigned mac address.
+ * @netif_stop: bitmap indicates reason why netif queues are stopped.
+ * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
+ * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
+ * @pend_8021x_wait: used for signalling change in count.
+ */
+struct brcmf_if {
+	struct brcmf_pub *drvr;
+	struct brcmf_cfg80211_vif *vif;
+	struct net_device *ndev;
+	struct net_device_stats stats;
+	struct work_struct setmacaddr_work;
+	struct work_struct multicast_work;
+	struct brcmf_fws_mac_descriptor *fws_desc;
+	int ifidx;
+	s32 bssidx;
+	u8 mac_addr[ETH_ALEN];
+	u8 netif_stop;
+	spinlock_t netif_stop_lock;
+	atomic_t pend_8021x_cnt;
+	wait_queue_head_t pend_8021x_wait;
+};
+
+struct brcmf_skb_reorder_data {
+	u8 *reorder;
+};
+
+int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
+
+/* Return pointer to interface name */
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+			      bool is_p2pdev, char *name, u8 *mac_addr);
+void brcmf_remove_interface(struct brcmf_if *ifp);
+int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
+void brcmf_txflowblock_if(struct brcmf_if *ifp,
+			  enum brcmf_netif_stop_reason reason, bool state);
+void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
+
+#endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
new file mode 100644
index 0000000..1299dcc
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/devcoredump.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "core.h"
+#include "bus.h"
+#include "fweh.h"
+#include "debug.h"
+
+static struct dentry *root_folder;
+
+static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+				      size_t len)
+{
+	void *dump;
+	size_t ramsize;
+
+	ramsize = brcmf_bus_get_ramsize(bus);
+	if (ramsize) {
+		dump = vzalloc(len + ramsize);
+		if (!dump)
+			return -ENOMEM;
+		memcpy(dump, data, len);
+		brcmf_bus_get_memdump(bus, dump + len, ramsize);
+		dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+	}
+	return 0;
+}
+
+static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
+					   const struct brcmf_event_msg *evtmsg,
+					   void *data)
+{
+	brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
+
+	return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+					  evtmsg->datalen);
+}
+
+void brcmf_debugfs_init(void)
+{
+	root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (IS_ERR(root_folder))
+		root_folder = NULL;
+}
+
+void brcmf_debugfs_exit(void)
+{
+	if (!root_folder)
+		return;
+
+	debugfs_remove_recursive(root_folder);
+	root_folder = NULL;
+}
+
+int brcmf_debug_attach(struct brcmf_pub *drvr)
+{
+	struct device *dev = drvr->bus_if->dev;
+
+	if (!root_folder)
+		return -ENODEV;
+
+	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+	if (IS_ERR(drvr->dbgfs_dir))
+		return PTR_ERR(drvr->dbgfs_dir);
+
+
+	return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+				   brcmf_debug_psm_watchdog_notify);
+}
+
+void brcmf_debug_detach(struct brcmf_pub *drvr)
+{
+	brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG);
+
+	if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
+		debugfs_remove_recursive(drvr->dbgfs_dir);
+}
+
+struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
+{
+	return drvr->dbgfs_dir;
+}
+
+int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+			    int (*read_fn)(struct seq_file *seq, void *data))
+{
+	struct dentry *e;
+
+	e = debugfs_create_devm_seqfile(drvr->bus_if->dev, fn,
+					drvr->dbgfs_dir, read_fn);
+	return PTR_ERR_OR_ZERO(e);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
new file mode 100644
index 0000000..d0d9676
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BRCMFMAC_DEBUG_H
+#define BRCMFMAC_DEBUG_H
+
+/* message levels */
+#define BRCMF_TRACE_VAL		0x00000002
+#define BRCMF_INFO_VAL		0x00000004
+#define BRCMF_DATA_VAL		0x00000008
+#define BRCMF_CTL_VAL		0x00000010
+#define BRCMF_TIMER_VAL		0x00000020
+#define BRCMF_HDRS_VAL		0x00000040
+#define BRCMF_BYTES_VAL		0x00000080
+#define BRCMF_INTR_VAL		0x00000100
+#define BRCMF_GLOM_VAL		0x00000200
+#define BRCMF_EVENT_VAL		0x00000400
+#define BRCMF_BTA_VAL		0x00000800
+#define BRCMF_FIL_VAL		0x00001000
+#define BRCMF_USB_VAL		0x00002000
+#define BRCMF_SCAN_VAL		0x00004000
+#define BRCMF_CONN_VAL		0x00008000
+#define BRCMF_BCDC_VAL		0x00010000
+#define BRCMF_SDIO_VAL		0x00020000
+#define BRCMF_MSGBUF_VAL	0x00040000
+#define BRCMF_PCIE_VAL		0x00080000
+#define BRCMF_FWCON_VAL		0x00100000
+
+/* set default print format */
+#undef pr_fmt
+#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt
+
+/* Macro for error messages. net_ratelimit() is used when driver
+ * debugging is not selected. When debugging the driver error
+ * messages are as important as other tracing or even more so.
+ */
+#ifndef CONFIG_BRCM_TRACING
+#ifdef CONFIG_BRCMDBG
+#define brcmf_err(fmt, ...)	pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
+#else
+#define brcmf_err(fmt, ...)						\
+	do {								\
+		if (net_ratelimit())					\
+			pr_err("%s: " fmt, __func__, ##__VA_ARGS__);	\
+	} while (0)
+#endif
+#else
+__printf(2, 3)
+void __brcmf_err(const char *func, const char *fmt, ...);
+#define brcmf_err(fmt, ...) \
+	__brcmf_err(__func__, fmt, ##__VA_ARGS__)
+#endif
+
+#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
+__printf(3, 4)
+void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...);
+#define brcmf_dbg(level, fmt, ...)				\
+do {								\
+	__brcmf_dbg(BRCMF_##level##_VAL, __func__,		\
+		    fmt, ##__VA_ARGS__);			\
+} while (0)
+#define BRCMF_DATA_ON()		(brcmf_msg_level & BRCMF_DATA_VAL)
+#define BRCMF_CTL_ON()		(brcmf_msg_level & BRCMF_CTL_VAL)
+#define BRCMF_HDRS_ON()		(brcmf_msg_level & BRCMF_HDRS_VAL)
+#define BRCMF_BYTES_ON()	(brcmf_msg_level & BRCMF_BYTES_VAL)
+#define BRCMF_GLOM_ON()		(brcmf_msg_level & BRCMF_GLOM_VAL)
+#define BRCMF_EVENT_ON()	(brcmf_msg_level & BRCMF_EVENT_VAL)
+#define BRCMF_FIL_ON()		(brcmf_msg_level & BRCMF_FIL_VAL)
+#define BRCMF_FWCON_ON()	(brcmf_msg_level & BRCMF_FWCON_VAL)
+
+#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
+
+#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+
+#define BRCMF_DATA_ON()		0
+#define BRCMF_CTL_ON()		0
+#define BRCMF_HDRS_ON()		0
+#define BRCMF_BYTES_ON()	0
+#define BRCMF_GLOM_ON()		0
+#define BRCMF_EVENT_ON()	0
+#define BRCMF_FIL_ON()		0
+#define BRCMF_FWCON_ON()	0
+
+#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
+
+#define brcmf_dbg_hex_dump(test, data, len, fmt, ...)			\
+do {									\
+	trace_brcmf_hexdump((void *)data, len);				\
+	if (test)							\
+		brcmu_dbg_hex_dump(data, len, fmt, ##__VA_ARGS__);	\
+} while (0)
+
+extern int brcmf_msg_level;
+
+struct brcmf_pub;
+#ifdef DEBUG
+void brcmf_debugfs_init(void);
+void brcmf_debugfs_exit(void);
+int brcmf_debug_attach(struct brcmf_pub *drvr);
+void brcmf_debug_detach(struct brcmf_pub *drvr);
+struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
+int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+			    int (*read_fn)(struct seq_file *seq, void *data));
+#else
+static inline void brcmf_debugfs_init(void)
+{
+}
+static inline void brcmf_debugfs_exit(void)
+{
+}
+static inline int brcmf_debug_attach(struct brcmf_pub *drvr)
+{
+	return 0;
+}
+static inline void brcmf_debug_detach(struct brcmf_pub *drvr)
+{
+}
+static inline
+int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+			    int (*read_fn)(struct seq_file *seq, void *data))
+{
+	return 0;
+}
+#endif
+
+#endif /* BRCMFMAC_DEBUG_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
new file mode 100644
index 0000000..44bb306
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include <brcm_hw_ids.h>
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "fwil.h"
+#include "feature.h"
+
+
+/* Module param feature_disable (global for all devices) */
+static int brcmf_feature_disable;
+module_param_named(feature_disable, brcmf_feature_disable, int, 0);
+MODULE_PARM_DESC(feature_disable, "Disable features");
+
+/*
+ * expand feature list to array of feature strings.
+ */
+#define BRCMF_FEAT_DEF(_f) \
+	#_f,
+static const char *brcmf_feat_names[] = {
+	BRCMF_FEAT_LIST
+};
+#undef BRCMF_FEAT_DEF
+
+#ifdef DEBUG
+/*
+ * expand quirk list to array of quirk strings.
+ */
+#define BRCMF_QUIRK_DEF(_q) \
+	#_q,
+static const char * const brcmf_quirk_names[] = {
+	BRCMF_QUIRK_LIST
+};
+#undef BRCMF_QUIRK_DEF
+
+/**
+ * brcmf_feat_debugfs_read() - expose feature info to debugfs.
+ *
+ * @seq: sequence for debugfs entry.
+ * @data: raw data pointer.
+ */
+static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	u32 feats = bus_if->drvr->feat_flags;
+	u32 quirks = bus_if->drvr->chip_quirks;
+	int id;
+
+	seq_printf(seq, "Features: %08x\n", feats);
+	for (id = 0; id < BRCMF_FEAT_LAST; id++)
+		if (feats & BIT(id))
+			seq_printf(seq, "\t%s\n", brcmf_feat_names[id]);
+	seq_printf(seq, "\nQuirks:   %08x\n", quirks);
+	for (id = 0; id < BRCMF_FEAT_QUIRK_LAST; id++)
+		if (quirks & BIT(id))
+			seq_printf(seq, "\t%s\n", brcmf_quirk_names[id]);
+	return 0;
+}
+#else
+static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif /* DEBUG */
+
+/**
+ * brcmf_feat_iovar_int_get() - determine feature through iovar query.
+ *
+ * @ifp: interface to query.
+ * @id: feature id.
+ * @name: iovar name.
+ */
+static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
+				     enum brcmf_feat_id id, char *name)
+{
+	u32 data;
+	int err;
+
+	err = brcmf_fil_iovar_int_get(ifp, name, &data);
+	if (err == 0) {
+		brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
+		ifp->drvr->feat_flags |= BIT(id);
+	} else {
+		brcmf_dbg(TRACE, "%s feature check failed: %d\n",
+			  brcmf_feat_names[id], err);
+	}
+}
+
+/**
+ * brcmf_feat_iovar_int_set() - determine feature through iovar set.
+ *
+ * @ifp: interface to query.
+ * @id: feature id.
+ * @name: iovar name.
+ */
+static void brcmf_feat_iovar_int_set(struct brcmf_if *ifp,
+				     enum brcmf_feat_id id, char *name, u32 val)
+{
+	int err;
+
+	err = brcmf_fil_iovar_int_set(ifp, name, val);
+	if (err == 0) {
+		brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
+		ifp->drvr->feat_flags |= BIT(id);
+	} else {
+		brcmf_dbg(TRACE, "%s feature check failed: %d\n",
+			  brcmf_feat_names[id], err);
+	}
+}
+
+void brcmf_feat_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
+
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
+	if (drvr->bus_if->wowl_supported)
+		brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
+	if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
+		brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_P2P, "p2p");
+
+	if (brcmf_feature_disable) {
+		brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
+			  ifp->drvr->feat_flags, brcmf_feature_disable);
+		ifp->drvr->feat_flags &= ~brcmf_feature_disable;
+	}
+
+	/* set chip related quirks */
+	switch (drvr->bus_if->chip) {
+	case BRCM_CC_43236_CHIP_ID:
+		drvr->chip_quirks |= BIT(BRCMF_FEAT_QUIRK_AUTO_AUTH);
+		break;
+	case BRCM_CC_4329_CHIP_ID:
+		drvr->chip_quirks |= BIT(BRCMF_FEAT_QUIRK_NEED_MPC);
+		break;
+	default:
+		/* no quirks */
+		break;
+	}
+
+	brcmf_debugfs_add_entry(drvr, "features", brcmf_feat_debugfs_read);
+}
+
+bool brcmf_feat_is_enabled(struct brcmf_if *ifp, enum brcmf_feat_id id)
+{
+	return (ifp->drvr->feat_flags & BIT(id));
+}
+
+bool brcmf_feat_is_quirk_enabled(struct brcmf_if *ifp,
+				 enum brcmf_feat_quirk quirk)
+{
+	return (ifp->drvr->chip_quirks & BIT(quirk));
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
new file mode 100644
index 0000000..6b381f7
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _BRCMF_FEATURE_H
+#define _BRCMF_FEATURE_H
+
+/*
+ * Features:
+ *
+ * MBSS: multiple BSSID support (eg. guest network in AP mode).
+ * MCHAN: multi-channel for concurrent P2P.
+ * PNO: preferred network offload.
+ * WOWL: Wake-On-WLAN.
+ * P2P: peer-to-peer
+ */
+#define BRCMF_FEAT_LIST \
+	BRCMF_FEAT_DEF(MBSS) \
+	BRCMF_FEAT_DEF(MCHAN) \
+	BRCMF_FEAT_DEF(PNO) \
+	BRCMF_FEAT_DEF(WOWL) \
+	BRCMF_FEAT_DEF(P2P)
+/*
+ * Quirks:
+ *
+ * AUTO_AUTH: workaround needed for automatic authentication type.
+ * NEED_MPC: driver needs to disable MPC during scanning operation.
+ */
+#define BRCMF_QUIRK_LIST \
+	BRCMF_QUIRK_DEF(AUTO_AUTH) \
+	BRCMF_QUIRK_DEF(NEED_MPC)
+
+#define BRCMF_FEAT_DEF(_f) \
+	BRCMF_FEAT_ ## _f,
+/*
+ * expand feature list to enumeration.
+ */
+enum brcmf_feat_id {
+	BRCMF_FEAT_LIST
+	BRCMF_FEAT_LAST
+};
+#undef BRCMF_FEAT_DEF
+
+#define BRCMF_QUIRK_DEF(_q) \
+	BRCMF_FEAT_QUIRK_ ## _q,
+/*
+ * expand quirk list to enumeration.
+ */
+enum brcmf_feat_quirk {
+	BRCMF_QUIRK_LIST
+	BRCMF_FEAT_QUIRK_LAST
+};
+#undef BRCMF_QUIRK_DEF
+
+/**
+ * brcmf_feat_attach() - determine features and quirks.
+ *
+ * @drvr: driver instance.
+ */
+void brcmf_feat_attach(struct brcmf_pub *drvr);
+
+/**
+ * brcmf_feat_is_enabled() - query feature.
+ *
+ * @ifp: interface instance.
+ * @id: feature id to check.
+ *
+ * Return: true is feature is enabled; otherwise false.
+ */
+bool brcmf_feat_is_enabled(struct brcmf_if *ifp, enum brcmf_feat_id id);
+
+/**
+ * brcmf_feat_is_quirk_enabled() - query chip quirk.
+ *
+ * @ifp: interface instance.
+ * @quirk: quirk id to check.
+ *
+ * Return: true is quirk is enabled; otherwise false.
+ */
+bool brcmf_feat_is_quirk_enabled(struct brcmf_if *ifp,
+				 enum brcmf_feat_quirk quirk);
+
+#endif /* _BRCMF_FEATURE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
new file mode 100644
index 0000000..4248f3c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/bcm47xx_nvram.h>
+
+#include "debug.h"
+#include "firmware.h"
+
+#define BRCMF_FW_MAX_NVRAM_SIZE			64000
+#define BRCMF_FW_NVRAM_DEVPATH_LEN		19	/* devpath0=pcie/1/4/ */
+#define BRCMF_FW_NVRAM_PCIEDEV_LEN		10	/* pcie/1/4/ + \0 */
+
+char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
+module_param_string(alternative_fw_path, brcmf_firmware_path,
+		    BRCMF_FW_PATH_LEN, 0440);
+
+enum nvram_parser_state {
+	IDLE,
+	KEY,
+	VALUE,
+	COMMENT,
+	END
+};
+
+/**
+ * struct nvram_parser - internal info for parser.
+ *
+ * @state: current parser state.
+ * @data: input buffer being parsed.
+ * @nvram: output buffer with parse result.
+ * @nvram_len: lenght of parse result.
+ * @line: current line.
+ * @column: current column in line.
+ * @pos: byte offset in input buffer.
+ * @entry: start position of key,value entry.
+ * @multi_dev_v1: detect pcie multi device v1 (compressed).
+ * @multi_dev_v2: detect pcie multi device v2.
+ */
+struct nvram_parser {
+	enum nvram_parser_state state;
+	const u8 *data;
+	u8 *nvram;
+	u32 nvram_len;
+	u32 line;
+	u32 column;
+	u32 pos;
+	u32 entry;
+	bool multi_dev_v1;
+	bool multi_dev_v2;
+};
+
+/**
+ * is_nvram_char() - check if char is a valid one for NVRAM entry
+ *
+ * It accepts all printable ASCII chars except for '#' which opens a comment.
+ * Please note that ' ' (space) while accepted is not a valid key name char.
+ */
+static bool is_nvram_char(char c)
+{
+	/* comment marker excluded */
+	if (c == '#')
+		return false;
+
+	/* key and value may have any other readable character */
+	return (c >= 0x20 && c < 0x7f);
+}
+
+static bool is_whitespace(char c)
+{
+	return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
+{
+	char c;
+
+	c = nvp->data[nvp->pos];
+	if (c == '\n')
+		return COMMENT;
+	if (is_whitespace(c))
+		goto proceed;
+	if (c == '#')
+		return COMMENT;
+	if (is_nvram_char(c)) {
+		nvp->entry = nvp->pos;
+		return KEY;
+	}
+	brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
+		  nvp->line, nvp->column);
+proceed:
+	nvp->column++;
+	nvp->pos++;
+	return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
+{
+	enum nvram_parser_state st = nvp->state;
+	char c;
+
+	c = nvp->data[nvp->pos];
+	if (c == '=') {
+		/* ignore RAW1 by treating as comment */
+		if (strncmp(&nvp->data[nvp->entry], "RAW1", 4) == 0)
+			st = COMMENT;
+		else
+			st = VALUE;
+		if (strncmp(&nvp->data[nvp->entry], "devpath", 7) == 0)
+			nvp->multi_dev_v1 = true;
+		if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0)
+			nvp->multi_dev_v2 = true;
+	} else if (!is_nvram_char(c) || c == ' ') {
+		brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
+			  nvp->line, nvp->column);
+		return COMMENT;
+	}
+
+	nvp->column++;
+	nvp->pos++;
+	return st;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_value(struct nvram_parser *nvp)
+{
+	char c;
+	char *skv;
+	char *ekv;
+	u32 cplen;
+
+	c = nvp->data[nvp->pos];
+	if (!is_nvram_char(c)) {
+		/* key,value pair complete */
+		ekv = (u8 *)&nvp->data[nvp->pos];
+		skv = (u8 *)&nvp->data[nvp->entry];
+		cplen = ekv - skv;
+		if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
+			return END;
+		/* copy to output buffer */
+		memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
+		nvp->nvram_len += cplen;
+		nvp->nvram[nvp->nvram_len] = '\0';
+		nvp->nvram_len++;
+		return IDLE;
+	}
+	nvp->pos++;
+	nvp->column++;
+	return VALUE;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_comment(struct nvram_parser *nvp)
+{
+	char *eoc, *sol;
+
+	sol = (char *)&nvp->data[nvp->pos];
+	eoc = strchr(sol, '\n');
+	if (!eoc) {
+		eoc = strchr(sol, '\0');
+		if (!eoc)
+			return END;
+	}
+
+	/* eat all moving to next line */
+	nvp->line++;
+	nvp->column = 1;
+	nvp->pos += (eoc - sol) + 1;
+	return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
+{
+	/* final state */
+	return END;
+}
+
+static enum nvram_parser_state
+(*nv_parser_states[])(struct nvram_parser *nvp) = {
+	brcmf_nvram_handle_idle,
+	brcmf_nvram_handle_key,
+	brcmf_nvram_handle_value,
+	brcmf_nvram_handle_comment,
+	brcmf_nvram_handle_end
+};
+
+static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
+				   const u8 *data, size_t data_len)
+{
+	size_t size;
+
+	memset(nvp, 0, sizeof(*nvp));
+	nvp->data = data;
+	/* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
+	if (data_len > BRCMF_FW_MAX_NVRAM_SIZE)
+		size = BRCMF_FW_MAX_NVRAM_SIZE;
+	else
+		size = data_len;
+	/* Alloc for extra 0 byte + roundup by 4 + length field */
+	size += 1 + 3 + sizeof(u32);
+	nvp->nvram = kzalloc(size, GFP_KERNEL);
+	if (!nvp->nvram)
+		return -ENOMEM;
+
+	nvp->line = 1;
+	nvp->column = 1;
+	return 0;
+}
+
+/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v1 is the version where nvram is stored
+ * compressed and "devpath" maps to index for valid entries.
+ */
+static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
+				    u16 bus_nr)
+{
+	/* Device path with a leading '=' key-value separator */
+	char pci_path[] = "=pci/?/?";
+	size_t pci_len;
+	char pcie_path[] = "=pcie/?/?";
+	size_t pcie_len;
+
+	u32 i, j;
+	bool found;
+	u8 *nvram;
+	u8 id;
+
+	nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+	if (!nvram)
+		goto fail;
+
+	/* min length: devpath0=pcie/1/4/ + 0:x=y */
+	if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
+		goto fail;
+
+	/* First search for the devpathX and see if it is the configuration
+	 * for domain_nr/bus_nr. Search complete nvp
+	 */
+	snprintf(pci_path, sizeof(pci_path), "=pci/%d/%d", domain_nr,
+		 bus_nr);
+	pci_len = strlen(pci_path);
+	snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
+		 bus_nr);
+	pcie_len = strlen(pcie_path);
+	found = false;
+	i = 0;
+	while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
+		/* Format: devpathX=pcie/Y/Z/
+		 * Y = domain_nr, Z = bus_nr, X = virtual ID
+		 */
+		if (strncmp(&nvp->nvram[i], "devpath", 7) == 0 &&
+		    (!strncmp(&nvp->nvram[i + 8], pci_path, pci_len) ||
+		     !strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len))) {
+			id = nvp->nvram[i + 7] - '0';
+			found = true;
+			break;
+		}
+		while (nvp->nvram[i] != 0)
+			i++;
+		i++;
+	}
+	if (!found)
+		goto fail;
+
+	/* Now copy all valid entries, release old nvram and assign new one */
+	i = 0;
+	j = 0;
+	while (i < nvp->nvram_len) {
+		if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
+			i += 2;
+			while (nvp->nvram[i] != 0) {
+				nvram[j] = nvp->nvram[i];
+				i++;
+				j++;
+			}
+			nvram[j] = 0;
+			j++;
+		}
+		while (nvp->nvram[i] != 0)
+			i++;
+		i++;
+	}
+	kfree(nvp->nvram);
+	nvp->nvram = nvram;
+	nvp->nvram_len = j;
+	return;
+
+fail:
+	kfree(nvram);
+	nvp->nvram_len = 0;
+}
+
+/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v2 is the version where nvram is stored
+ * uncompressed, all relevant valid entries are identified by
+ * pcie/domain_nr/bus_nr:
+ */
+static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
+				    u16 bus_nr)
+{
+	char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
+	size_t len;
+	u32 i, j;
+	u8 *nvram;
+
+	nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+	if (!nvram)
+		goto fail;
+
+	/* Copy all valid entries, release old nvram and assign new one.
+	 * Valid entries are of type pcie/X/Y/ where X = domain_nr and
+	 * Y = bus_nr.
+	 */
+	snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
+	len = strlen(prefix);
+	i = 0;
+	j = 0;
+	while (i < nvp->nvram_len - len) {
+		if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
+			i += len;
+			while (nvp->nvram[i] != 0) {
+				nvram[j] = nvp->nvram[i];
+				i++;
+				j++;
+			}
+			nvram[j] = 0;
+			j++;
+		}
+		while (nvp->nvram[i] != 0)
+			i++;
+		i++;
+	}
+	kfree(nvp->nvram);
+	nvp->nvram = nvram;
+	nvp->nvram_len = j;
+	return;
+fail:
+	kfree(nvram);
+	nvp->nvram_len = 0;
+}
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
+				  u32 *new_length, u16 domain_nr, u16 bus_nr)
+{
+	struct nvram_parser nvp;
+	u32 pad;
+	u32 token;
+	__le32 token_le;
+
+	if (brcmf_init_nvram_parser(&nvp, data, data_len) < 0)
+		return NULL;
+
+	while (nvp.pos < data_len) {
+		nvp.state = nv_parser_states[nvp.state](&nvp);
+		if (nvp.state == END)
+			break;
+	}
+	if (nvp.multi_dev_v1)
+		brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
+	else if (nvp.multi_dev_v2)
+		brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+
+	if (nvp.nvram_len == 0) {
+		kfree(nvp.nvram);
+		return NULL;
+	}
+
+	pad = nvp.nvram_len;
+	*new_length = roundup(nvp.nvram_len + 1, 4);
+	while (pad != *new_length) {
+		nvp.nvram[pad] = 0;
+		pad++;
+	}
+
+	token = *new_length / 4;
+	token = (~token << 16) | (token & 0x0000FFFF);
+	token_le = cpu_to_le32(token);
+
+	memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
+	*new_length += sizeof(token_le);
+
+	return nvp.nvram;
+}
+
+void brcmf_fw_nvram_free(void *nvram)
+{
+	kfree(nvram);
+}
+
+struct brcmf_fw {
+	struct device *dev;
+	u16 flags;
+	const struct firmware *code;
+	const char *nvram_name;
+	u16 domain_nr;
+	u16 bus_nr;
+	void (*done)(struct device *dev, const struct firmware *fw,
+		     void *nvram_image, u32 nvram_len);
+};
+
+static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
+{
+	struct brcmf_fw *fwctx = ctx;
+	u32 nvram_length = 0;
+	void *nvram = NULL;
+	u8 *data = NULL;
+	size_t data_len;
+	bool raw_nvram;
+
+	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
+	if (fw && fw->data) {
+		data = (u8 *)fw->data;
+		data_len = fw->size;
+		raw_nvram = false;
+	} else {
+		data = bcm47xx_nvram_get_contents(&data_len);
+		if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+			goto fail;
+		raw_nvram = true;
+	}
+
+	if (data)
+		nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
+					     fwctx->domain_nr, fwctx->bus_nr);
+
+	if (raw_nvram)
+		bcm47xx_nvram_release_contents(data);
+	if (fw)
+		release_firmware(fw);
+	if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+		goto fail;
+
+	fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+	kfree(fwctx);
+	return;
+
+fail:
+	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
+	release_firmware(fwctx->code);
+	device_release_driver(fwctx->dev);
+	kfree(fwctx);
+}
+
+static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
+{
+	struct brcmf_fw *fwctx = ctx;
+	int ret;
+
+	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
+	if (!fw)
+		goto fail;
+
+	/* only requested code so done here */
+	if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
+		fwctx->done(fwctx->dev, fw, NULL, 0);
+		kfree(fwctx);
+		return;
+	}
+	fwctx->code = fw;
+	ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
+				      fwctx->dev, GFP_KERNEL, fwctx,
+				      brcmf_fw_request_nvram_done);
+
+	if (!ret)
+		return;
+
+	brcmf_fw_request_nvram_done(NULL, fwctx);
+	return;
+
+fail:
+	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
+	device_release_driver(fwctx->dev);
+	kfree(fwctx);
+}
+
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+				const char *code, const char *nvram,
+				void (*fw_cb)(struct device *dev,
+					      const struct firmware *fw,
+					      void *nvram_image, u32 nvram_len),
+				u16 domain_nr, u16 bus_nr)
+{
+	struct brcmf_fw *fwctx;
+
+	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
+	if (!fw_cb || !code)
+		return -EINVAL;
+
+	if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
+		return -EINVAL;
+
+	fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
+	if (!fwctx)
+		return -ENOMEM;
+
+	fwctx->dev = dev;
+	fwctx->flags = flags;
+	fwctx->done = fw_cb;
+	if (flags & BRCMF_FW_REQUEST_NVRAM)
+		fwctx->nvram_name = nvram;
+	fwctx->domain_nr = domain_nr;
+	fwctx->bus_nr = bus_nr;
+
+	return request_firmware_nowait(THIS_MODULE, true, code, dev,
+				       GFP_KERNEL, fwctx,
+				       brcmf_fw_request_code_done);
+}
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+			   const char *code, const char *nvram,
+			   void (*fw_cb)(struct device *dev,
+					 const struct firmware *fw,
+					 void *nvram_image, u32 nvram_len))
+{
+	return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
+					   0);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
new file mode 100644
index 0000000..604dd48
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_FIRMWARE_H
+#define BRCMFMAC_FIRMWARE_H
+
+#define BRCMF_FW_REQUEST		0x000F
+#define  BRCMF_FW_REQUEST_NVRAM		0x0001
+#define BRCMF_FW_REQ_FLAGS		0x00F0
+#define  BRCMF_FW_REQ_NV_OPTIONAL	0x0010
+
+#define	BRCMF_FW_PATH_LEN	256
+#define	BRCMF_FW_NAME_LEN	32
+
+extern char brcmf_firmware_path[];
+
+void brcmf_fw_nvram_free(void *nvram);
+/*
+ * Request firmware(s) asynchronously. When the asynchronous request
+ * fails it will not use the callback, but call device_release_driver()
+ * instead which will call the driver .remove() callback.
+ */
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+				const char *code, const char *nvram,
+				void (*fw_cb)(struct device *dev,
+					      const struct firmware *fw,
+					      void *nvram_image, u32 nvram_len),
+				u16 domain_nr, u16 bus_nr);
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+			   const char *code, const char *nvram,
+			   void (*fw_cb)(struct device *dev,
+					 const struct firmware *fw,
+					 void *nvram_image, u32 nvram_len));
+
+#endif /* BRCMFMAC_FIRMWARE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
new file mode 100644
index 0000000..2ca783f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -0,0 +1,504 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <brcmu_utils.h>
+
+#include "core.h"
+#include "debug.h"
+#include "bus.h"
+#include "proto.h"
+#include "flowring.h"
+#include "msgbuf.h"
+#include "common.h"
+
+
+#define BRCMF_FLOWRING_HIGH		1024
+#define BRCMF_FLOWRING_LOW		(BRCMF_FLOWRING_HIGH - 256)
+#define BRCMF_FLOWRING_INVALID_IFIDX	0xff
+
+#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
+#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
+
+static const u8 brcmf_flowring_prio2fifo[] = {
+	1,
+	0,
+	0,
+	1,
+	2,
+	2,
+	3,
+	3
+};
+
+
+static bool
+brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
+{
+	struct brcmf_flowring_tdls_entry *search;
+
+	search = flow->tdls_entry;
+
+	while (search) {
+		if (memcmp(search->mac, mac, ETH_ALEN) == 0)
+			return true;
+		search = search->next;
+	}
+
+	return false;
+}
+
+
+u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+			  u8 prio, u8 ifidx)
+{
+	struct brcmf_flowring_hash *hash;
+	u8 hash_idx;
+	u32 i;
+	bool found;
+	bool sta;
+	u8 fifo;
+	u8 *mac;
+
+	fifo = brcmf_flowring_prio2fifo[prio];
+	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+	mac = da;
+	if ((!sta) && (is_multicast_ether_addr(da))) {
+		mac = (u8 *)ALLFFMAC;
+		fifo = 0;
+	}
+	if ((sta) && (flow->tdls_active) &&
+	    (brcmf_flowring_is_tdls_mac(flow, da))) {
+		sta = false;
+	}
+	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+	found = false;
+	hash = flow->hash;
+	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+		if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
+		    (hash[hash_idx].fifo == fifo) &&
+		    (hash[hash_idx].ifidx == ifidx)) {
+			found = true;
+			break;
+		}
+		hash_idx++;
+	}
+	if (found)
+		return hash[hash_idx].flowid;
+
+	return BRCMF_FLOWRING_INVALID_ID;
+}
+
+
+u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+			  u8 prio, u8 ifidx)
+{
+	struct brcmf_flowring_ring *ring;
+	struct brcmf_flowring_hash *hash;
+	u8 hash_idx;
+	u32 i;
+	bool found;
+	u8 fifo;
+	bool sta;
+	u8 *mac;
+
+	fifo = brcmf_flowring_prio2fifo[prio];
+	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+	mac = da;
+	if ((!sta) && (is_multicast_ether_addr(da))) {
+		mac = (u8 *)ALLFFMAC;
+		fifo = 0;
+	}
+	if ((sta) && (flow->tdls_active) &&
+	    (brcmf_flowring_is_tdls_mac(flow, da))) {
+		sta = false;
+	}
+	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+	found = false;
+	hash = flow->hash;
+	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+		if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
+		    (is_zero_ether_addr(hash[hash_idx].mac))) {
+			found = true;
+			break;
+		}
+		hash_idx++;
+	}
+	if (found) {
+		for (i = 0; i < flow->nrofrings; i++) {
+			if (flow->rings[i] == NULL)
+				break;
+		}
+		if (i == flow->nrofrings)
+			return -ENOMEM;
+
+		ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
+		if (!ring)
+			return -ENOMEM;
+
+		memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
+		hash[hash_idx].fifo = fifo;
+		hash[hash_idx].ifidx = ifidx;
+		hash[hash_idx].flowid = i;
+
+		ring->hash_id = hash_idx;
+		ring->status = RING_CLOSED;
+		skb_queue_head_init(&ring->skblist);
+		flow->rings[i] = ring;
+
+		return i;
+	}
+	return BRCMF_FLOWRING_INVALID_ID;
+}
+
+
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
+{
+	struct brcmf_flowring_ring *ring;
+
+	ring = flow->rings[flowid];
+
+	return flow->hash[ring->hash_id].fifo;
+}
+
+
+static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
+				 bool blocked)
+{
+	struct brcmf_flowring_ring *ring;
+	struct brcmf_bus *bus_if;
+	struct brcmf_pub *drvr;
+	struct brcmf_if *ifp;
+	bool currently_blocked;
+	int i;
+	u8 ifidx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&flow->block_lock, flags);
+
+	ring = flow->rings[flowid];
+	if (ring->blocked == blocked) {
+		spin_unlock_irqrestore(&flow->block_lock, flags);
+		return;
+	}
+	ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+
+	currently_blocked = false;
+	for (i = 0; i < flow->nrofrings; i++) {
+		if ((flow->rings[i]) && (i != flowid)) {
+			ring = flow->rings[i];
+			if ((ring->status == RING_OPEN) &&
+			    (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
+				if (ring->blocked) {
+					currently_blocked = true;
+					break;
+				}
+			}
+		}
+	}
+	flow->rings[flowid]->blocked = blocked;
+	if (currently_blocked) {
+		spin_unlock_irqrestore(&flow->block_lock, flags);
+		return;
+	}
+
+	bus_if = dev_get_drvdata(flow->dev);
+	drvr = bus_if->drvr;
+	ifp = brcmf_get_ifp(drvr, ifidx);
+	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
+
+	spin_unlock_irqrestore(&flow->block_lock, flags);
+}
+
+
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
+{
+	struct brcmf_flowring_ring *ring;
+	u8 hash_idx;
+	struct sk_buff *skb;
+
+	ring = flow->rings[flowid];
+	if (!ring)
+		return;
+	brcmf_flowring_block(flow, flowid, false);
+	hash_idx = ring->hash_id;
+	flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
+	eth_zero_addr(flow->hash[hash_idx].mac);
+	flow->rings[flowid] = NULL;
+
+	skb = skb_dequeue(&ring->skblist);
+	while (skb) {
+		brcmu_pkt_buf_free_skb(skb);
+		skb = skb_dequeue(&ring->skblist);
+	}
+
+	kfree(ring);
+}
+
+
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+			   struct sk_buff *skb)
+{
+	struct brcmf_flowring_ring *ring;
+
+	ring = flow->rings[flowid];
+
+	skb_queue_tail(&ring->skblist, skb);
+
+	if (!ring->blocked &&
+	    (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
+		brcmf_flowring_block(flow, flowid, true);
+		brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
+		/* To prevent (work around) possible race condition, check
+		 * queue len again. It is also possible to use locking to
+		 * protect, but that is undesirable for every enqueue and
+		 * dequeue. This simple check will solve a possible race
+		 * condition if it occurs.
+		 */
+		if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
+			brcmf_flowring_block(flow, flowid, false);
+	}
+	return skb_queue_len(&ring->skblist);
+}
+
+
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
+{
+	struct brcmf_flowring_ring *ring;
+	struct sk_buff *skb;
+
+	ring = flow->rings[flowid];
+	if (ring->status != RING_OPEN)
+		return NULL;
+
+	skb = skb_dequeue(&ring->skblist);
+
+	if (ring->blocked &&
+	    (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
+		brcmf_flowring_block(flow, flowid, false);
+		brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
+	}
+
+	return skb;
+}
+
+
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+			     struct sk_buff *skb)
+{
+	struct brcmf_flowring_ring *ring;
+
+	ring = flow->rings[flowid];
+
+	skb_queue_head(&ring->skblist, skb);
+}
+
+
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
+{
+	struct brcmf_flowring_ring *ring;
+
+	ring = flow->rings[flowid];
+	if (!ring)
+		return 0;
+
+	if (ring->status != RING_OPEN)
+		return 0;
+
+	return skb_queue_len(&ring->skblist);
+}
+
+
+void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
+{
+	struct brcmf_flowring_ring *ring;
+
+	ring = flow->rings[flowid];
+	if (!ring) {
+		brcmf_err("Ring NULL, for flowid %d\n", flowid);
+		return;
+	}
+
+	ring->status = RING_OPEN;
+}
+
+
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
+{
+	struct brcmf_flowring_ring *ring;
+	u8 hash_idx;
+
+	ring = flow->rings[flowid];
+	hash_idx = ring->hash_id;
+
+	return flow->hash[hash_idx].ifidx;
+}
+
+
+struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
+{
+	struct brcmf_flowring *flow;
+	u32 i;
+
+	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+	if (flow) {
+		flow->dev = dev;
+		flow->nrofrings = nrofrings;
+		spin_lock_init(&flow->block_lock);
+		for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
+			flow->addr_mode[i] = ADDR_INDIRECT;
+		for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
+			flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
+		flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
+				      GFP_KERNEL);
+		if (!flow->rings) {
+			kfree(flow);
+			flow = NULL;
+		}
+	}
+
+	return flow;
+}
+
+
+void brcmf_flowring_detach(struct brcmf_flowring *flow)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_flowring_tdls_entry *search;
+	struct brcmf_flowring_tdls_entry *remove;
+	u8 flowid;
+
+	for (flowid = 0; flowid < flow->nrofrings; flowid++) {
+		if (flow->rings[flowid])
+			brcmf_msgbuf_delete_flowring(drvr, flowid);
+	}
+
+	search = flow->tdls_entry;
+	while (search) {
+		remove = search;
+		search = search->next;
+		kfree(remove);
+	}
+	kfree(flow->rings);
+	kfree(flow);
+}
+
+
+void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+					enum proto_addr_mode addr_mode)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	u32 i;
+	u8 flowid;
+
+	if (flow->addr_mode[ifidx] != addr_mode) {
+		for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
+			if (flow->hash[i].ifidx == ifidx) {
+				flowid = flow->hash[i].flowid;
+				if (flow->rings[flowid]->status != RING_OPEN)
+					continue;
+				flow->rings[flowid]->status = RING_CLOSING;
+				brcmf_msgbuf_delete_flowring(drvr, flowid);
+			}
+		}
+		flow->addr_mode[ifidx] = addr_mode;
+	}
+}
+
+
+void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
+				u8 peer[ETH_ALEN])
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_flowring_hash *hash;
+	struct brcmf_flowring_tdls_entry *prev;
+	struct brcmf_flowring_tdls_entry *search;
+	u32 i;
+	u8 flowid;
+	bool sta;
+
+	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+
+	search = flow->tdls_entry;
+	prev = NULL;
+	while (search) {
+		if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
+			sta = false;
+			break;
+		}
+		prev = search;
+		search = search->next;
+	}
+
+	hash = flow->hash;
+	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+		if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
+		    (hash[i].ifidx == ifidx)) {
+			flowid = flow->hash[i].flowid;
+			if (flow->rings[flowid]->status == RING_OPEN) {
+				flow->rings[flowid]->status = RING_CLOSING;
+				brcmf_msgbuf_delete_flowring(drvr, flowid);
+			}
+		}
+	}
+
+	if (search) {
+		if (prev)
+			prev->next = search->next;
+		else
+			flow->tdls_entry = search->next;
+		kfree(search);
+		if (flow->tdls_entry == NULL)
+			flow->tdls_active = false;
+	}
+}
+
+
+void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
+				  u8 peer[ETH_ALEN])
+{
+	struct brcmf_flowring_tdls_entry *tdls_entry;
+	struct brcmf_flowring_tdls_entry *search;
+
+	tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
+	if (tdls_entry == NULL)
+		return;
+
+	memcpy(tdls_entry->mac, peer, ETH_ALEN);
+	tdls_entry->next = NULL;
+	if (flow->tdls_entry == NULL) {
+		flow->tdls_entry = tdls_entry;
+	} else {
+		search = flow->tdls_entry;
+		if (memcmp(search->mac, peer, ETH_ALEN) == 0)
+			return;
+		while (search->next) {
+			search = search->next;
+			if (memcmp(search->mac, peer, ETH_ALEN) == 0)
+				return;
+		}
+		search->next = tdls_entry;
+	}
+
+	flow->tdls_active = true;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
new file mode 100644
index 0000000..95fd1c9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_FLOWRING_H
+#define BRCMFMAC_FLOWRING_H
+
+
+#define BRCMF_FLOWRING_HASHSIZE		256
+#define BRCMF_FLOWRING_INVALID_ID	0xFFFFFFFF
+
+
+struct brcmf_flowring_hash {
+	u8 mac[ETH_ALEN];
+	u8 fifo;
+	u8 ifidx;
+	u8 flowid;
+};
+
+enum ring_status {
+	RING_CLOSED,
+	RING_CLOSING,
+	RING_OPEN
+};
+
+struct brcmf_flowring_ring {
+	u16 hash_id;
+	bool blocked;
+	enum ring_status status;
+	struct sk_buff_head skblist;
+};
+
+struct brcmf_flowring_tdls_entry {
+	u8 mac[ETH_ALEN];
+	struct brcmf_flowring_tdls_entry *next;
+};
+
+struct brcmf_flowring {
+	struct device *dev;
+	struct brcmf_flowring_hash hash[BRCMF_FLOWRING_HASHSIZE];
+	struct brcmf_flowring_ring **rings;
+	spinlock_t block_lock;
+	enum proto_addr_mode addr_mode[BRCMF_MAX_IFS];
+	u16 nrofrings;
+	bool tdls_active;
+	struct brcmf_flowring_tdls_entry *tdls_entry;
+};
+
+
+u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+			  u8 prio, u8 ifidx);
+u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+			  u8 prio, u8 ifidx);
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
+void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+			   struct sk_buff *skb);
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+			     struct sk_buff *skb);
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
+struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
+void brcmf_flowring_detach(struct brcmf_flowring *flow);
+void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+					enum proto_addr_mode addr_mode);
+void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
+				u8 peer[ETH_ALEN]);
+void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
+				  u8 peer[ETH_ALEN]);
+
+
+#endif /* BRCMFMAC_FLOWRING_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
new file mode 100644
index 0000000..3878b6f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/netdevice.h>
+
+#include "brcmu_wifi.h"
+#include "brcmu_utils.h"
+
+#include "core.h"
+#include "debug.h"
+#include "tracepoint.h"
+#include "fwsignal.h"
+#include "fweh.h"
+#include "fwil.h"
+
+/**
+ * struct brcm_ethhdr - broadcom specific ether header.
+ *
+ * @subtype: subtype for this packet.
+ * @length: TODO: length of appended data.
+ * @version: version indication.
+ * @oui: OUI of this packet.
+ * @usr_subtype: subtype for this OUI.
+ */
+struct brcm_ethhdr {
+	__be16 subtype;
+	__be16 length;
+	u8 version;
+	u8 oui[3];
+	__be16 usr_subtype;
+} __packed;
+
+struct brcmf_event_msg_be {
+	__be16 version;
+	__be16 flags;
+	__be32 event_type;
+	__be32 status;
+	__be32 reason;
+	__be32 auth_type;
+	__be32 datalen;
+	u8 addr[ETH_ALEN];
+	char ifname[IFNAMSIZ];
+	u8 ifidx;
+	u8 bsscfgidx;
+} __packed;
+
+/**
+ * struct brcmf_event - contents of broadcom event packet.
+ *
+ * @eth: standard ether header.
+ * @hdr: broadcom specific ether header.
+ * @msg: common part of the actual event message.
+ */
+struct brcmf_event {
+	struct ethhdr eth;
+	struct brcm_ethhdr hdr;
+	struct brcmf_event_msg_be msg;
+} __packed;
+
+/**
+ * struct brcmf_fweh_queue_item - event item on event queue.
+ *
+ * @q: list element for queuing.
+ * @code: event code.
+ * @ifidx: interface index related to this event.
+ * @ifaddr: ethernet address for interface.
+ * @emsg: common parameters of the firmware event message.
+ * @data: event specific data part of the firmware event.
+ */
+struct brcmf_fweh_queue_item {
+	struct list_head q;
+	enum brcmf_fweh_event_code code;
+	u8 ifidx;
+	u8 ifaddr[ETH_ALEN];
+	struct brcmf_event_msg_be emsg;
+	u8 data[0];
+};
+
+/**
+ * struct brcmf_fweh_event_name - code, name mapping entry.
+ */
+struct brcmf_fweh_event_name {
+	enum brcmf_fweh_event_code code;
+	const char *name;
+};
+
+#ifdef DEBUG
+#define BRCMF_ENUM_DEF(id, val) \
+	{ val, #id },
+
+/* array for mapping code to event name */
+static struct brcmf_fweh_event_name fweh_event_names[] = {
+	BRCMF_FWEH_EVENT_ENUM_DEFLIST
+};
+#undef BRCMF_ENUM_DEF
+
+/**
+ * brcmf_fweh_event_name() - returns name for given event code.
+ *
+ * @code: code to lookup.
+ */
+static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(fweh_event_names); i++) {
+		if (fweh_event_names[i].code == code)
+			return fweh_event_names[i].name;
+	}
+	return "unknown";
+}
+#else
+static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+{
+	return "nodebug";
+}
+#endif
+
+/**
+ * brcmf_fweh_queue_event() - create and queue event.
+ *
+ * @fweh: firmware event handling info.
+ * @event: event queue entry.
+ */
+static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
+				   struct brcmf_fweh_queue_item *event)
+{
+	ulong flags;
+
+	spin_lock_irqsave(&fweh->evt_q_lock, flags);
+	list_add_tail(&event->q, &fweh->event_q);
+	spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
+	schedule_work(&fweh->event_work);
+}
+
+static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
+					 enum brcmf_fweh_event_code code,
+					 struct brcmf_event_msg *emsg,
+					 void *data)
+{
+	struct brcmf_fweh_info *fweh;
+	int err = -EINVAL;
+
+	if (ifp) {
+		fweh = &ifp->drvr->fweh;
+
+		/* handle the event if valid interface and handler */
+		if (fweh->evt_handler[code])
+			err = fweh->evt_handler[code](ifp, emsg, data);
+		else
+			brcmf_err("unhandled event %d ignored\n", code);
+	} else {
+		brcmf_err("no interface object\n");
+	}
+	return err;
+}
+
+/**
+ * brcmf_fweh_handle_if_event() - handle IF event.
+ *
+ * @drvr: driver information object.
+ * @item: queue entry.
+ * @ifpp: interface object (may change upon ADD action).
+ */
+static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
+				       struct brcmf_event_msg *emsg,
+				       void *data)
+{
+	struct brcmf_if_event *ifevent = data;
+	struct brcmf_if *ifp;
+	bool is_p2pdev;
+	int err = 0;
+
+	brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u role: %u\n",
+		  ifevent->action, ifevent->ifidx, ifevent->bssidx,
+		  ifevent->flags, ifevent->role);
+
+	/* The P2P Device interface event must not be ignored contrary to what
+	 * firmware tells us. Older firmware uses p2p noif, with sta role.
+	 * This should be accepted when p2pdev_setup is ongoing. TDLS setup will
+	 * use the same ifevent and should be ignored.
+	 */
+	is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
+		     (ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
+		      ((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
+		       (drvr->fweh.p2pdev_setup_ongoing))));
+	if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
+		brcmf_dbg(EVENT, "event can be ignored\n");
+		return;
+	}
+	if (ifevent->ifidx >= BRCMF_MAX_IFS) {
+		brcmf_err("invalid interface index: %u\n", ifevent->ifidx);
+		return;
+	}
+
+	ifp = drvr->iflist[ifevent->bssidx];
+
+	if (ifevent->action == BRCMF_E_IF_ADD) {
+		brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
+			  emsg->addr);
+		ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
+				   is_p2pdev, emsg->ifname, emsg->addr);
+		if (IS_ERR(ifp))
+			return;
+		if (!is_p2pdev)
+			brcmf_fws_add_interface(ifp);
+		if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+			if (brcmf_net_attach(ifp, false) < 0)
+				return;
+	}
+
+	if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
+		brcmf_fws_reset_interface(ifp);
+
+	err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
+
+	if (ifp && ifevent->action == BRCMF_E_IF_DEL)
+		brcmf_remove_interface(ifp);
+}
+
+/**
+ * brcmf_fweh_dequeue_event() - get event from the queue.
+ *
+ * @fweh: firmware event handling info.
+ */
+static struct brcmf_fweh_queue_item *
+brcmf_fweh_dequeue_event(struct brcmf_fweh_info *fweh)
+{
+	struct brcmf_fweh_queue_item *event = NULL;
+	ulong flags;
+
+	spin_lock_irqsave(&fweh->evt_q_lock, flags);
+	if (!list_empty(&fweh->event_q)) {
+		event = list_first_entry(&fweh->event_q,
+					 struct brcmf_fweh_queue_item, q);
+		list_del(&event->q);
+	}
+	spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
+
+	return event;
+}
+
+/**
+ * brcmf_fweh_event_worker() - firmware event worker.
+ *
+ * @work: worker object.
+ */
+static void brcmf_fweh_event_worker(struct work_struct *work)
+{
+	struct brcmf_pub *drvr;
+	struct brcmf_if *ifp;
+	struct brcmf_fweh_info *fweh;
+	struct brcmf_fweh_queue_item *event;
+	int err = 0;
+	struct brcmf_event_msg_be *emsg_be;
+	struct brcmf_event_msg emsg;
+
+	fweh = container_of(work, struct brcmf_fweh_info, event_work);
+	drvr = container_of(fweh, struct brcmf_pub, fweh);
+
+	while ((event = brcmf_fweh_dequeue_event(fweh))) {
+		brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
+			  brcmf_fweh_event_name(event->code), event->code,
+			  event->emsg.ifidx, event->emsg.bsscfgidx,
+			  event->emsg.addr);
+
+		/* convert event message */
+		emsg_be = &event->emsg;
+		emsg.version = be16_to_cpu(emsg_be->version);
+		emsg.flags = be16_to_cpu(emsg_be->flags);
+		emsg.event_code = event->code;
+		emsg.status = be32_to_cpu(emsg_be->status);
+		emsg.reason = be32_to_cpu(emsg_be->reason);
+		emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
+		emsg.datalen = be32_to_cpu(emsg_be->datalen);
+		memcpy(emsg.addr, emsg_be->addr, ETH_ALEN);
+		memcpy(emsg.ifname, emsg_be->ifname, sizeof(emsg.ifname));
+		emsg.ifidx = emsg_be->ifidx;
+		emsg.bsscfgidx = emsg_be->bsscfgidx;
+
+		brcmf_dbg(EVENT, "  version %u flags %u status %u reason %u\n",
+			  emsg.version, emsg.flags, emsg.status, emsg.reason);
+		brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data,
+				   min_t(u32, emsg.datalen, 64),
+				   "event payload, len=%d\n", emsg.datalen);
+
+		/* special handling of interface event */
+		if (event->code == BRCMF_E_IF) {
+			brcmf_fweh_handle_if_event(drvr, &emsg, event->data);
+			goto event_free;
+		}
+
+		if (event->code == BRCMF_E_TDLS_PEER_EVENT)
+			ifp = drvr->iflist[0];
+		else
+			ifp = drvr->iflist[emsg.bsscfgidx];
+		err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
+						    event->data);
+		if (err) {
+			brcmf_err("event handler failed (%d)\n",
+				  event->code);
+			err = 0;
+		}
+event_free:
+		kfree(event);
+	}
+}
+
+/**
+ * brcmf_fweh_p2pdev_setup() - P2P device setup ongoing (or not).
+ *
+ * @ifp: ifp on which setup is taking place or finished.
+ * @ongoing: p2p device setup in progress (or not).
+ */
+void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
+{
+	ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
+}
+
+/**
+ * brcmf_fweh_attach() - initialize firmware event handling.
+ *
+ * @drvr: driver information object.
+ */
+void brcmf_fweh_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_fweh_info *fweh = &drvr->fweh;
+	INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
+	spin_lock_init(&fweh->evt_q_lock);
+	INIT_LIST_HEAD(&fweh->event_q);
+}
+
+/**
+ * brcmf_fweh_detach() - cleanup firmware event handling.
+ *
+ * @drvr: driver information object.
+ */
+void brcmf_fweh_detach(struct brcmf_pub *drvr)
+{
+	struct brcmf_fweh_info *fweh = &drvr->fweh;
+	struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
+	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+
+	if (ifp) {
+		/* clear all events */
+		memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN);
+		(void)brcmf_fil_iovar_data_set(ifp, "event_msgs",
+					       eventmask,
+					       BRCMF_EVENTING_MASK_LEN);
+	}
+	/* cancel the worker */
+	cancel_work_sync(&fweh->event_work);
+	WARN_ON(!list_empty(&fweh->event_q));
+	memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+}
+
+/**
+ * brcmf_fweh_register() - register handler for given event code.
+ *
+ * @drvr: driver information object.
+ * @code: event code.
+ * @handler: handler for the given event code.
+ */
+int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
+			brcmf_fweh_handler_t handler)
+{
+	if (drvr->fweh.evt_handler[code]) {
+		brcmf_err("event code %d already registered\n", code);
+		return -ENOSPC;
+	}
+	drvr->fweh.evt_handler[code] = handler;
+	brcmf_dbg(TRACE, "event handler registered for %s\n",
+		  brcmf_fweh_event_name(code));
+	return 0;
+}
+
+/**
+ * brcmf_fweh_unregister() - remove handler for given code.
+ *
+ * @drvr: driver information object.
+ * @code: event code.
+ */
+void brcmf_fweh_unregister(struct brcmf_pub *drvr,
+			   enum brcmf_fweh_event_code code)
+{
+	brcmf_dbg(TRACE, "event handler cleared for %s\n",
+		  brcmf_fweh_event_name(code));
+	drvr->fweh.evt_handler[code] = NULL;
+}
+
+/**
+ * brcmf_fweh_activate_events() - enables firmware events registered.
+ *
+ * @ifp: primary interface object.
+ */
+int brcmf_fweh_activate_events(struct brcmf_if *ifp)
+{
+	int i, err;
+	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+
+	for (i = 0; i < BRCMF_E_LAST; i++) {
+		if (ifp->drvr->fweh.evt_handler[i]) {
+			brcmf_dbg(EVENT, "enable event %s\n",
+				  brcmf_fweh_event_name(i));
+			setbit(eventmask, i);
+		}
+	}
+
+	/* want to handle IF event as well */
+	brcmf_dbg(EVENT, "enable event IF\n");
+	setbit(eventmask, BRCMF_E_IF);
+
+	err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
+				       eventmask, BRCMF_EVENTING_MASK_LEN);
+	if (err)
+		brcmf_err("Set event_msgs error (%d)\n", err);
+
+	return err;
+}
+
+/**
+ * brcmf_fweh_process_event() - process skb as firmware event.
+ *
+ * @drvr: driver information object.
+ * @event_packet: event packet to process.
+ *
+ * If the packet buffer contains a firmware event message it will
+ * dispatch the event to a registered handler (using worker).
+ */
+void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+			      struct brcmf_event *event_packet)
+{
+	enum brcmf_fweh_event_code code;
+	struct brcmf_fweh_info *fweh = &drvr->fweh;
+	struct brcmf_fweh_queue_item *event;
+	gfp_t alloc_flag = GFP_KERNEL;
+	void *data;
+	u32 datalen;
+
+	/* get event info */
+	code = get_unaligned_be32(&event_packet->msg.event_type);
+	datalen = get_unaligned_be32(&event_packet->msg.datalen);
+	data = &event_packet[1];
+
+	if (code >= BRCMF_E_LAST)
+		return;
+
+	if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+		return;
+
+	if (in_interrupt())
+		alloc_flag = GFP_ATOMIC;
+
+	event = kzalloc(sizeof(*event) + datalen, alloc_flag);
+	if (!event)
+		return;
+
+	event->code = code;
+	event->ifidx = event_packet->msg.ifidx;
+
+	/* use memcpy to get aligned event message */
+	memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
+	memcpy(event->data, data, datalen);
+	memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN);
+
+	brcmf_fweh_queue_event(fweh, event);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
new file mode 100644
index 0000000..d9a9428
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWEH_H_
+#define FWEH_H_
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+
+/* formward declarations */
+struct brcmf_pub;
+struct brcmf_if;
+struct brcmf_cfg80211_info;
+struct brcmf_event;
+
+/* list of firmware events */
+#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
+	BRCMF_ENUM_DEF(SET_SSID, 0) \
+	BRCMF_ENUM_DEF(JOIN, 1) \
+	BRCMF_ENUM_DEF(START, 2) \
+	BRCMF_ENUM_DEF(AUTH, 3) \
+	BRCMF_ENUM_DEF(AUTH_IND, 4) \
+	BRCMF_ENUM_DEF(DEAUTH, 5) \
+	BRCMF_ENUM_DEF(DEAUTH_IND, 6) \
+	BRCMF_ENUM_DEF(ASSOC, 7) \
+	BRCMF_ENUM_DEF(ASSOC_IND, 8) \
+	BRCMF_ENUM_DEF(REASSOC, 9) \
+	BRCMF_ENUM_DEF(REASSOC_IND, 10) \
+	BRCMF_ENUM_DEF(DISASSOC, 11) \
+	BRCMF_ENUM_DEF(DISASSOC_IND, 12) \
+	BRCMF_ENUM_DEF(QUIET_START, 13) \
+	BRCMF_ENUM_DEF(QUIET_END, 14) \
+	BRCMF_ENUM_DEF(BEACON_RX, 15) \
+	BRCMF_ENUM_DEF(LINK, 16) \
+	BRCMF_ENUM_DEF(MIC_ERROR, 17) \
+	BRCMF_ENUM_DEF(NDIS_LINK, 18) \
+	BRCMF_ENUM_DEF(ROAM, 19) \
+	BRCMF_ENUM_DEF(TXFAIL, 20) \
+	BRCMF_ENUM_DEF(PMKID_CACHE, 21) \
+	BRCMF_ENUM_DEF(RETROGRADE_TSF, 22) \
+	BRCMF_ENUM_DEF(PRUNE, 23) \
+	BRCMF_ENUM_DEF(AUTOAUTH, 24) \
+	BRCMF_ENUM_DEF(EAPOL_MSG, 25) \
+	BRCMF_ENUM_DEF(SCAN_COMPLETE, 26) \
+	BRCMF_ENUM_DEF(ADDTS_IND, 27) \
+	BRCMF_ENUM_DEF(DELTS_IND, 28) \
+	BRCMF_ENUM_DEF(BCNSENT_IND, 29) \
+	BRCMF_ENUM_DEF(BCNRX_MSG, 30) \
+	BRCMF_ENUM_DEF(BCNLOST_MSG, 31) \
+	BRCMF_ENUM_DEF(ROAM_PREP, 32) \
+	BRCMF_ENUM_DEF(PFN_NET_FOUND, 33) \
+	BRCMF_ENUM_DEF(PFN_NET_LOST, 34) \
+	BRCMF_ENUM_DEF(RESET_COMPLETE, 35) \
+	BRCMF_ENUM_DEF(JOIN_START, 36) \
+	BRCMF_ENUM_DEF(ROAM_START, 37) \
+	BRCMF_ENUM_DEF(ASSOC_START, 38) \
+	BRCMF_ENUM_DEF(IBSS_ASSOC, 39) \
+	BRCMF_ENUM_DEF(RADIO, 40) \
+	BRCMF_ENUM_DEF(PSM_WATCHDOG, 41) \
+	BRCMF_ENUM_DEF(PROBREQ_MSG, 44) \
+	BRCMF_ENUM_DEF(SCAN_CONFIRM_IND, 45) \
+	BRCMF_ENUM_DEF(PSK_SUP, 46) \
+	BRCMF_ENUM_DEF(COUNTRY_CODE_CHANGED, 47) \
+	BRCMF_ENUM_DEF(EXCEEDED_MEDIUM_TIME, 48) \
+	BRCMF_ENUM_DEF(ICV_ERROR, 49) \
+	BRCMF_ENUM_DEF(UNICAST_DECODE_ERROR, 50) \
+	BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
+	BRCMF_ENUM_DEF(TRACE, 52) \
+	BRCMF_ENUM_DEF(IF, 54) \
+	BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
+	BRCMF_ENUM_DEF(RSSI, 56) \
+	BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
+	BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
+	BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
+	BRCMF_ENUM_DEF(PRE_ASSOC_IND, 61) \
+	BRCMF_ENUM_DEF(PRE_REASSOC_IND, 62) \
+	BRCMF_ENUM_DEF(CHANNEL_ADOPTED, 63) \
+	BRCMF_ENUM_DEF(AP_STARTED, 64) \
+	BRCMF_ENUM_DEF(DFS_AP_STOP, 65) \
+	BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
+	BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
+	BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
+	BRCMF_ENUM_DEF(PROBERESP_MSG, 71) \
+	BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
+	BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
+	BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
+	BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
+	BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
+	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+
+#define BRCMF_ENUM_DEF(id, val) \
+	BRCMF_E_##id = (val),
+
+/* firmware event codes sent by the dongle */
+enum brcmf_fweh_event_code {
+	BRCMF_FWEH_EVENT_ENUM_DEFLIST
+	/* this determines event mask length which must match
+	 * minimum length check in device firmware so it is
+	 * hard-coded here.
+	 */
+	BRCMF_E_LAST = 139
+};
+#undef BRCMF_ENUM_DEF
+
+#define BRCMF_EVENTING_MASK_LEN		DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
+/* flags field values in struct brcmf_event_msg */
+#define BRCMF_EVENT_MSG_LINK		0x01
+#define BRCMF_EVENT_MSG_FLUSHTXQ	0x02
+#define BRCMF_EVENT_MSG_GROUP		0x04
+
+/* status field values in struct brcmf_event_msg */
+#define BRCMF_E_STATUS_SUCCESS			0
+#define BRCMF_E_STATUS_FAIL			1
+#define BRCMF_E_STATUS_TIMEOUT			2
+#define BRCMF_E_STATUS_NO_NETWORKS		3
+#define BRCMF_E_STATUS_ABORT			4
+#define BRCMF_E_STATUS_NO_ACK			5
+#define BRCMF_E_STATUS_UNSOLICITED		6
+#define BRCMF_E_STATUS_ATTEMPT			7
+#define BRCMF_E_STATUS_PARTIAL			8
+#define BRCMF_E_STATUS_NEWSCAN			9
+#define BRCMF_E_STATUS_NEWASSOC			10
+#define BRCMF_E_STATUS_11HQUIET			11
+#define BRCMF_E_STATUS_SUPPRESS			12
+#define BRCMF_E_STATUS_NOCHANS			13
+#define BRCMF_E_STATUS_CS_ABORT			15
+#define BRCMF_E_STATUS_ERROR			16
+
+/* reason field values in struct brcmf_event_msg */
+#define BRCMF_E_REASON_INITIAL_ASSOC		0
+#define BRCMF_E_REASON_LOW_RSSI			1
+#define BRCMF_E_REASON_DEAUTH			2
+#define BRCMF_E_REASON_DISASSOC			3
+#define BRCMF_E_REASON_BCNS_LOST		4
+#define BRCMF_E_REASON_MINTXRATE		9
+#define BRCMF_E_REASON_TXFAIL			10
+
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS		4
+#define BRCMF_E_REASON_FAST_ROAM_FAILED		5
+#define BRCMF_E_REASON_DIRECTED_ROAM		6
+#define BRCMF_E_REASON_TSPEC_REJECTED		7
+#define BRCMF_E_REASON_BETTER_AP		8
+
+#define BRCMF_E_REASON_TDLS_PEER_DISCOVERED	0
+#define BRCMF_E_REASON_TDLS_PEER_CONNECTED	1
+#define BRCMF_E_REASON_TDLS_PEER_DISCONNECTED	2
+
+/* action field values for brcmf_ifevent */
+#define BRCMF_E_IF_ADD				1
+#define BRCMF_E_IF_DEL				2
+#define BRCMF_E_IF_CHANGE			3
+
+/* flag field values for brcmf_ifevent */
+#define BRCMF_E_IF_FLAG_NOIF			1
+
+/* role field values for brcmf_ifevent */
+#define BRCMF_E_IF_ROLE_STA			0
+#define BRCMF_E_IF_ROLE_AP			1
+#define BRCMF_E_IF_ROLE_WDS			2
+#define BRCMF_E_IF_ROLE_P2P_GO			3
+#define BRCMF_E_IF_ROLE_P2P_CLIENT		4
+
+/**
+ * definitions for event packet validation.
+ */
+#define BRCMF_EVENT_OUI_OFFSET		19
+#define BRCM_OUI			"\x00\x10\x18"
+#define DOT11_OUI_LEN			3
+#define BCMILCP_BCM_SUBTYPE_EVENT	1
+
+
+/**
+ * struct brcmf_event_msg - firmware event message.
+ *
+ * @version: version information.
+ * @flags: event flags.
+ * @event_code: firmware event code.
+ * @status: status information.
+ * @reason: reason code.
+ * @auth_type: authentication type.
+ * @datalen: lenght of event data buffer.
+ * @addr: ether address.
+ * @ifname: interface name.
+ * @ifidx: interface index.
+ * @bsscfgidx: bsscfg index.
+ */
+struct brcmf_event_msg {
+	u16 version;
+	u16 flags;
+	u32 event_code;
+	u32 status;
+	u32 reason;
+	s32 auth_type;
+	u32 datalen;
+	u8 addr[ETH_ALEN];
+	char ifname[IFNAMSIZ];
+	u8 ifidx;
+	u8 bsscfgidx;
+};
+
+struct brcmf_if_event {
+	u8 ifidx;
+	u8 action;
+	u8 flags;
+	u8 bssidx;
+	u8 role;
+};
+
+typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
+				    const struct brcmf_event_msg *evtmsg,
+				    void *data);
+
+/**
+ * struct brcmf_fweh_info - firmware event handling information.
+ *
+ * @p2pdev_setup_ongoing: P2P device creation in progress.
+ * @event_work: event worker.
+ * @evt_q_lock: lock for event queue protection.
+ * @event_q: event queue.
+ * @evt_handler: registered event handlers.
+ */
+struct brcmf_fweh_info {
+	bool p2pdev_setup_ongoing;
+	struct work_struct event_work;
+	spinlock_t evt_q_lock;
+	struct list_head event_q;
+	int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
+					 const struct brcmf_event_msg *evtmsg,
+					 void *data);
+};
+
+void brcmf_fweh_attach(struct brcmf_pub *drvr);
+void brcmf_fweh_detach(struct brcmf_pub *drvr);
+int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
+			int (*handler)(struct brcmf_if *ifp,
+				       const struct brcmf_event_msg *evtmsg,
+				       void *data));
+void brcmf_fweh_unregister(struct brcmf_pub *drvr,
+			   enum brcmf_fweh_event_code code);
+int brcmf_fweh_activate_events(struct brcmf_if *ifp);
+void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+			      struct brcmf_event *event_packet);
+void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
+
+static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
+					  struct sk_buff *skb)
+{
+	struct brcmf_event *event_packet;
+	u8 *data;
+	u16 usr_stype;
+
+	/* only process events when protocol matches */
+	if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
+		return;
+
+	/* check for BRCM oui match */
+	event_packet = (struct brcmf_event *)skb_mac_header(skb);
+	data = (u8 *)event_packet;
+	data += BRCMF_EVENT_OUI_OFFSET;
+	if (memcmp(BRCM_OUI, data, DOT11_OUI_LEN))
+		return;
+
+	/* final match on usr_subtype */
+	data += DOT11_OUI_LEN;
+	usr_stype = get_unaligned_be16(data);
+	if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
+		return;
+
+	brcmf_fweh_process_event(drvr, event_packet);
+}
+
+#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
new file mode 100644
index 0000000..dcfa0bb
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* FWIL is the Firmware Interface Layer. In this module the support functions
+ * are located to set and get variables to and from the firmware.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "tracepoint.h"
+#include "fwil.h"
+#include "proto.h"
+
+
+#define MAX_HEX_DUMP_LEN	64
+
+#ifdef DEBUG
+static const char * const brcmf_fil_errstr[] = {
+	"BCME_OK",
+	"BCME_ERROR",
+	"BCME_BADARG",
+	"BCME_BADOPTION",
+	"BCME_NOTUP",
+	"BCME_NOTDOWN",
+	"BCME_NOTAP",
+	"BCME_NOTSTA",
+	"BCME_BADKEYIDX",
+	"BCME_RADIOOFF",
+	"BCME_NOTBANDLOCKED",
+	"BCME_NOCLK",
+	"BCME_BADRATESET",
+	"BCME_BADBAND",
+	"BCME_BUFTOOSHORT",
+	"BCME_BUFTOOLONG",
+	"BCME_BUSY",
+	"BCME_NOTASSOCIATED",
+	"BCME_BADSSIDLEN",
+	"BCME_OUTOFRANGECHAN",
+	"BCME_BADCHAN",
+	"BCME_BADADDR",
+	"BCME_NORESOURCE",
+	"BCME_UNSUPPORTED",
+	"BCME_BADLEN",
+	"BCME_NOTREADY",
+	"BCME_EPERM",
+	"BCME_NOMEM",
+	"BCME_ASSOCIATED",
+	"BCME_RANGE",
+	"BCME_NOTFOUND",
+	"BCME_WME_NOT_ENABLED",
+	"BCME_TSPEC_NOTFOUND",
+	"BCME_ACM_NOTSUPPORTED",
+	"BCME_NOT_WME_ASSOCIATION",
+	"BCME_SDIO_ERROR",
+	"BCME_DONGLE_DOWN",
+	"BCME_VERSION",
+	"BCME_TXFAIL",
+	"BCME_RXFAIL",
+	"BCME_NODEVICE",
+	"BCME_NMODE_DISABLED",
+	"BCME_NONRESIDENT",
+	"BCME_SCANREJECT",
+	"BCME_USAGE_ERROR",
+	"BCME_IOCTL_ERROR",
+	"BCME_SERIAL_PORT_ERR",
+	"BCME_DISABLED",
+	"BCME_DECERR",
+	"BCME_ENCERR",
+	"BCME_MICERR",
+	"BCME_REPLAY",
+	"BCME_IE_NOTFOUND",
+};
+
+static const char *brcmf_fil_get_errstr(u32 err)
+{
+	if (err >= ARRAY_SIZE(brcmf_fil_errstr))
+		return "(unknown)";
+
+	return brcmf_fil_errstr[err];
+}
+#else
+static const char *brcmf_fil_get_errstr(u32 err)
+{
+	return "";
+}
+#endif /* DEBUG */
+
+static s32
+brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+
+	if (drvr->bus_if->state != BRCMF_BUS_UP) {
+		brcmf_err("bus is down. we have nothing to do.\n");
+		return -EIO;
+	}
+
+	if (data != NULL)
+		len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
+	if (set)
+		err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
+	else
+		err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
+
+	if (err >= 0)
+		return 0;
+
+	brcmf_dbg(FIL, "Failed: %s (%d)\n",
+		  brcmf_fil_get_errstr((u32)(-err)), err);
+	return -EBADE;
+}
+
+s32
+brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+	s32 err;
+
+	mutex_lock(&ifp->drvr->proto_block);
+
+	brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
+
+	err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
+	mutex_unlock(&ifp->drvr->proto_block);
+
+	return err;
+}
+
+s32
+brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+	s32 err;
+
+	mutex_lock(&ifp->drvr->proto_block);
+	err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
+
+	brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
+
+	mutex_unlock(&ifp->drvr->proto_block);
+
+	return err;
+}
+
+
+s32
+brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+	s32 err;
+	__le32 data_le = cpu_to_le32(data);
+
+	mutex_lock(&ifp->drvr->proto_block);
+	brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
+	err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
+	mutex_unlock(&ifp->drvr->proto_block);
+
+	return err;
+}
+
+s32
+brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+	s32 err;
+	__le32 data_le = cpu_to_le32(*data);
+
+	mutex_lock(&ifp->drvr->proto_block);
+	err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
+	mutex_unlock(&ifp->drvr->proto_block);
+	*data = le32_to_cpu(data_le);
+	brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
+
+	return err;
+}
+
+static u32
+brcmf_create_iovar(char *name, const char *data, u32 datalen,
+		   char *buf, u32 buflen)
+{
+	u32 len;
+
+	len = strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	memcpy(buf, name, len);
+
+	/* append data onto the end of the name string */
+	if (data && datalen)
+		memcpy(&buf[len], data, datalen);
+
+	return len + datalen;
+}
+
+
+s32
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+			 u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
+
+	buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+				    sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+					 buflen, true);
+	} else {
+		err = -EPERM;
+		brcmf_err("Creating iovar failed\n");
+	}
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+}
+
+s32
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+			 u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+				    sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+					 buflen, false);
+		if (err == 0)
+			memcpy(data, drvr->proto_buf, len);
+	} else {
+		err = -EPERM;
+		brcmf_err("Creating iovar failed\n");
+	}
+
+	brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+}
+
+s32
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+	__le32 data_le = cpu_to_le32(data);
+
+	return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+
+s32
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+	__le32 data_le = cpu_to_le32(*data);
+	s32 err;
+
+	err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+	if (err == 0)
+		*data = le32_to_cpu(data_le);
+	return err;
+}
+
+static u32
+brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf,
+		    u32 buflen)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+	__le32 bssidx_le;
+
+	if (bssidx == 0)
+		return brcmf_create_iovar(name, data, datalen, buf, buflen);
+
+	prefixlen = strlen(prefix);
+	namelen = strlen(name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+	if (buflen < iolen) {
+		brcmf_err("buffer is too short\n");
+		return 0;
+	}
+
+	p = buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, name, namelen);
+	p += namelen;
+
+	/* bss config index as first data */
+	bssidx_le = cpu_to_le32(bssidx);
+	memcpy(p, &bssidx_le, sizeof(bssidx_le));
+	p += sizeof(bssidx_le);
+
+	/* parameter buffer follows */
+	if (datalen)
+		memcpy(p, data, datalen);
+
+	return iolen;
+}
+
+s32
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+			  void *data, u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx,
+		  ifp->bssidx, name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
+
+	buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+				     drvr->proto_buf, sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+					 buflen, true);
+	} else {
+		err = -EPERM;
+		brcmf_err("Creating bsscfg failed\n");
+	}
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+}
+
+s32
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+			  void *data, u32 len)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err;
+	u32 buflen;
+
+	mutex_lock(&drvr->proto_block);
+
+	buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+				     drvr->proto_buf, sizeof(drvr->proto_buf));
+	if (buflen) {
+		err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+					 buflen, false);
+		if (err == 0)
+			memcpy(data, drvr->proto_buf, len);
+	} else {
+		err = -EPERM;
+		brcmf_err("Creating bsscfg failed\n");
+	}
+	brcmf_dbg(FIL, "ifidx=%d, bssidx=%d, name=%s, len=%d\n", ifp->ifidx,
+		  ifp->bssidx, name, len);
+	brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+			   min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n");
+
+	mutex_unlock(&drvr->proto_block);
+	return err;
+
+}
+
+s32
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+	__le32 data_le = cpu_to_le32(data);
+
+	return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+					 sizeof(data_le));
+}
+
+s32
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+	__le32 data_le = cpu_to_le32(*data);
+	s32 err;
+
+	err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+					sizeof(data_le));
+	if (err == 0)
+		*data = le32_to_cpu(data_le);
+	return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
new file mode 100644
index 0000000..b20fc0f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _fwil_h_
+#define _fwil_h_
+
+/*******************************************************************************
+ * Dongle command codes that are interpreted by firmware
+ ******************************************************************************/
+#define BRCMF_C_GET_VERSION			1
+#define BRCMF_C_UP				2
+#define BRCMF_C_DOWN				3
+#define BRCMF_C_SET_PROMISC			10
+#define BRCMF_C_GET_RATE			12
+#define BRCMF_C_GET_INFRA			19
+#define BRCMF_C_SET_INFRA			20
+#define BRCMF_C_GET_AUTH			21
+#define BRCMF_C_SET_AUTH			22
+#define BRCMF_C_GET_BSSID			23
+#define BRCMF_C_GET_SSID			25
+#define BRCMF_C_SET_SSID			26
+#define BRCMF_C_TERMINATED			28
+#define BRCMF_C_GET_CHANNEL			29
+#define BRCMF_C_SET_CHANNEL			30
+#define BRCMF_C_GET_SRL				31
+#define BRCMF_C_SET_SRL				32
+#define BRCMF_C_GET_LRL				33
+#define BRCMF_C_SET_LRL				34
+#define BRCMF_C_GET_RADIO			37
+#define BRCMF_C_SET_RADIO			38
+#define BRCMF_C_GET_PHYTYPE			39
+#define BRCMF_C_SET_KEY				45
+#define BRCMF_C_GET_REGULATORY			46
+#define BRCMF_C_SET_REGULATORY			47
+#define BRCMF_C_SET_PASSIVE_SCAN		49
+#define BRCMF_C_SCAN				50
+#define BRCMF_C_SCAN_RESULTS			51
+#define BRCMF_C_DISASSOC			52
+#define BRCMF_C_REASSOC				53
+#define BRCMF_C_SET_ROAM_TRIGGER		55
+#define BRCMF_C_SET_ROAM_DELTA			57
+#define BRCMF_C_GET_BCNPRD			75
+#define BRCMF_C_SET_BCNPRD			76
+#define BRCMF_C_GET_DTIMPRD			77
+#define BRCMF_C_SET_DTIMPRD			78
+#define BRCMF_C_SET_COUNTRY			84
+#define BRCMF_C_GET_PM				85
+#define BRCMF_C_SET_PM				86
+#define BRCMF_C_GET_REVINFO			98
+#define BRCMF_C_GET_CURR_RATESET		114
+#define BRCMF_C_GET_AP				117
+#define BRCMF_C_SET_AP				118
+#define BRCMF_C_SET_SCB_AUTHORIZE		121
+#define BRCMF_C_SET_SCB_DEAUTHORIZE		122
+#define BRCMF_C_GET_RSSI			127
+#define BRCMF_C_GET_WSEC			133
+#define BRCMF_C_SET_WSEC			134
+#define BRCMF_C_GET_PHY_NOISE			135
+#define BRCMF_C_GET_BSS_INFO			136
+#define BRCMF_C_GET_BANDLIST			140
+#define BRCMF_C_SET_SCB_TIMEOUT			158
+#define BRCMF_C_GET_ASSOCLIST			159
+#define BRCMF_C_GET_PHYLIST			180
+#define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
+#define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
+#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define BRCMF_C_GET_VALID_CHANNELS		217
+#define BRCMF_C_GET_KEY_PRIMARY			235
+#define BRCMF_C_SET_KEY_PRIMARY			236
+#define BRCMF_C_SET_SCAN_PASSIVE_TIME		258
+#define BRCMF_C_GET_VAR				262
+#define BRCMF_C_SET_VAR				263
+
+s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+			     u32 len);
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+			     u32 len);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+			      u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+			      u32 len);
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
new file mode 100644
index 0000000..4320c4c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWIL_TYPES_H_
+#define FWIL_TYPES_H_
+
+#include <linux/if_ether.h>
+
+
+#define BRCMF_FIL_ACTION_FRAME_SIZE	1800
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define BRCMF_ARP_OL_AGENT		0x00000001
+#define BRCMF_ARP_OL_SNOOP		0x00000002
+#define BRCMF_ARP_OL_HOST_AUTO_REPLY	0x00000004
+#define BRCMF_ARP_OL_PEER_AUTO_REPLY	0x00000008
+
+#define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
+#define BRCMF_BSS_RSSI_ON_CHANNEL	0x0002
+
+#define BRCMF_STA_WME              0x00000002      /* WMM association */
+#define BRCMF_STA_AUTHE            0x00000008      /* Authenticated */
+#define BRCMF_STA_ASSOC            0x00000010      /* Associated */
+#define BRCMF_STA_AUTHO            0x00000020      /* Authorized */
+#define BRCMF_STA_SCBSTATS         0x00004000      /* Per STA debug stats */
+
+/* size of brcmf_scan_params not including variable length array */
+#define BRCMF_SCAN_PARAMS_FIXED_SIZE	64
+
+/* masks for channel and ssid count */
+#define BRCMF_SCAN_PARAMS_COUNT_MASK	0x0000ffff
+#define BRCMF_SCAN_PARAMS_NSSID_SHIFT	16
+
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT		0xFF
+#define BRCMF_SCANTYPE_ACTIVE		0
+#define BRCMF_SCANTYPE_PASSIVE		1
+
+/* primary (ie tx) key */
+#define BRCMF_PRIMARY_KEY		(1 << 1)
+#define DOT11_BSSTYPE_ANY		2
+#define BRCMF_ESCAN_REQ_VERSION		1
+
+#define BRCMF_MAXRATES_IN_SET		16	/* max # of rates in rateset */
+
+/* OBSS Coex Auto/On/Off */
+#define BRCMF_OBSS_COEX_AUTO		(-1)
+#define BRCMF_OBSS_COEX_OFF		0
+#define BRCMF_OBSS_COEX_ON		1
+
+/* WOWL bits */
+/* Wakeup on Magic packet: */
+#define BRCMF_WOWL_MAGIC		(1 << 0)
+/* Wakeup on Netpattern */
+#define BRCMF_WOWL_NET			(1 << 1)
+/* Wakeup on loss-of-link due to Disassoc/Deauth: */
+#define BRCMF_WOWL_DIS			(1 << 2)
+/* Wakeup on retrograde TSF: */
+#define BRCMF_WOWL_RETR			(1 << 3)
+/* Wakeup on loss of beacon: */
+#define BRCMF_WOWL_BCN			(1 << 4)
+/* Wakeup after test: */
+#define BRCMF_WOWL_TST			(1 << 5)
+/* Wakeup after PTK refresh: */
+#define BRCMF_WOWL_M1			(1 << 6)
+/* Wakeup after receipt of EAP-Identity Req: */
+#define BRCMF_WOWL_EAPID		(1 << 7)
+/* Wakeind via PME(0) or GPIO(1): */
+#define BRCMF_WOWL_PME_GPIO		(1 << 8)
+/* need tkip phase 1 key to be updated by the driver: */
+#define BRCMF_WOWL_NEEDTKIP1		(1 << 9)
+/* enable wakeup if GTK fails: */
+#define BRCMF_WOWL_GTK_FAILURE		(1 << 10)
+/* support extended magic packets: */
+#define BRCMF_WOWL_EXTMAGPAT		(1 << 11)
+/* support ARP/NS/keepalive offloading: */
+#define BRCMF_WOWL_ARPOFFLOAD		(1 << 12)
+/* read protocol version for EAPOL frames: */
+#define BRCMF_WOWL_WPA2			(1 << 13)
+/* If the bit is set, use key rotaton: */
+#define BRCMF_WOWL_KEYROT		(1 << 14)
+/* If the bit is set, frm received was bcast frame: */
+#define BRCMF_WOWL_BCAST		(1 << 15)
+/* If the bit is set, scan offload is enabled: */
+#define BRCMF_WOWL_SCANOL		(1 << 16)
+/* Wakeup on tcpkeep alive timeout: */
+#define BRCMF_WOWL_TCPKEEP_TIME		(1 << 17)
+/* Wakeup on mDNS Conflict Resolution: */
+#define BRCMF_WOWL_MDNS_CONFLICT	(1 << 18)
+/* Wakeup on mDNS Service Connect: */
+#define BRCMF_WOWL_MDNS_SERVICE		(1 << 19)
+/* tcp keepalive got data: */
+#define BRCMF_WOWL_TCPKEEP_DATA		(1 << 20)
+/* Firmware died in wowl mode: */
+#define BRCMF_WOWL_FW_HALT		(1 << 21)
+/* Enable detection of radio button changes: */
+#define BRCMF_WOWL_ENAB_HWRADIO		(1 << 22)
+/* Offloads detected MIC failure(s): */
+#define BRCMF_WOWL_MIC_FAIL		(1 << 23)
+/* Wakeup in Unassociated state (Net/Magic Pattern): */
+#define BRCMF_WOWL_UNASSOC		(1 << 24)
+/* Wakeup if received matched secured pattern: */
+#define BRCMF_WOWL_SECURE		(1 << 25)
+/* Link Down indication in WoWL mode: */
+#define BRCMF_WOWL_LINKDOWN		(1 << 31)
+
+#define BRCMF_WOWL_MAXPATTERNS		8
+#define BRCMF_WOWL_MAXPATTERNSIZE	128
+
+#define BRCMF_COUNTRY_BUF_SZ		4
+#define BRCMF_ANT_MAX			4
+
+#define BRCMF_MAX_ASSOCLIST		128
+
+/* join preference types for join_pref iovar */
+enum brcmf_join_pref_types {
+	BRCMF_JOIN_PREF_RSSI = 1,
+	BRCMF_JOIN_PREF_WPA,
+	BRCMF_JOIN_PREF_BAND,
+	BRCMF_JOIN_PREF_RSSI_DELTA,
+};
+
+enum brcmf_fil_p2p_if_types {
+	BRCMF_FIL_P2P_IF_CLIENT,
+	BRCMF_FIL_P2P_IF_GO,
+	BRCMF_FIL_P2P_IF_DYNBCN_GO,
+	BRCMF_FIL_P2P_IF_DEV,
+};
+
+enum brcmf_wowl_pattern_type {
+	BRCMF_WOWL_PATTERN_TYPE_BITMAP = 0,
+	BRCMF_WOWL_PATTERN_TYPE_ARP,
+	BRCMF_WOWL_PATTERN_TYPE_NA
+};
+
+struct brcmf_fil_p2p_if_le {
+	u8 addr[ETH_ALEN];
+	__le16 type;
+	__le16 chspec;
+};
+
+struct brcmf_fil_chan_info_le {
+	__le32 hw_channel;
+	__le32 target_channel;
+	__le32 scan_channel;
+};
+
+struct brcmf_fil_action_frame_le {
+	u8	da[ETH_ALEN];
+	__le16	len;
+	__le32	packet_id;
+	u8	data[BRCMF_FIL_ACTION_FRAME_SIZE];
+};
+
+struct brcmf_fil_af_params_le {
+	__le32					channel;
+	__le32					dwell_time;
+	u8					bssid[ETH_ALEN];
+	u8					pad[2];
+	struct brcmf_fil_action_frame_le	action_frame;
+};
+
+struct brcmf_fil_bss_enable_le {
+	__le32 bsscfg_idx;
+	__le32 enable;
+};
+
+struct brcmf_fil_bwcap_le {
+	__le32 band;
+	__le32 bw_cap;
+};
+
+/**
+ * struct tdls_iovar - common structure for tdls iovars.
+ *
+ * @ea: ether address of peer station.
+ * @mode: mode value depending on specific tdls iovar.
+ * @chanspec: channel specification.
+ * @pad: unused (for future use).
+ */
+struct brcmf_tdls_iovar_le {
+	u8 ea[ETH_ALEN];		/* Station address */
+	u8 mode;			/* mode: depends on iovar */
+	__le16 chanspec;
+	__le32 pad;			/* future */
+};
+
+enum brcmf_tdls_manual_ep_ops {
+	BRCMF_TDLS_MANUAL_EP_CREATE = 1,
+	BRCMF_TDLS_MANUAL_EP_DELETE = 3,
+	BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
+};
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+struct brcmf_pkt_filter_pattern_le {
+	/*
+	 * Offset within received packet to start pattern matching.
+	 * Offset '0' is the first byte of the ethernet header.
+	 */
+	__le32 offset;
+	/* Size of the pattern.  Bitmask must be the same size.*/
+	__le32 size_bytes;
+	/*
+	 * Variable length mask and pattern data. mask starts at offset 0.
+	 * Pattern immediately follows mask.
+	 */
+	u8 mask_and_pattern[1];
+};
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+struct brcmf_pkt_filter_le {
+	__le32 id;		/* Unique filter id, specified by app. */
+	__le32 type;		/* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+	__le32 negate_match;	/* Negate the result of filter matches */
+	union {			/* Filter definitions */
+		struct brcmf_pkt_filter_pattern_le pattern; /* Filter pattern */
+	} u;
+};
+
+/* IOVAR "pkt_filter_enable" parameter. */
+struct brcmf_pkt_filter_enable_le {
+	__le32 id;		/* Unique filter id */
+	__le32 enable;		/* Enable/disable bool */
+};
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in struct brcmf_scan_results)
+ */
+struct brcmf_bss_info_le {
+	__le32 version;		/* version field */
+	__le32 length;		/* byte length of data in this record,
+				 * starting at version and including IEs
+				 */
+	u8 BSSID[ETH_ALEN];
+	__le16 beacon_period;	/* units are Kusec */
+	__le16 capability;	/* Capability information */
+	u8 SSID_len;
+	u8 SSID[32];
+	struct {
+		__le32 count;   /* # rates in this set */
+		u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+	} rateset;		/* supported rates */
+	__le16 chanspec;	/* chanspec for bss */
+	__le16 atim_window;	/* units are Kusec */
+	u8 dtim_period;	/* DTIM period */
+	__le16 RSSI;		/* receive signal strength (in dBm) */
+	s8 phy_noise;		/* noise (in dBm) */
+
+	u8 n_cap;		/* BSS is 802.11N Capable */
+	/* 802.11N BSS Capabilities (based on HT_CAP_*): */
+	__le32 nbss_cap;
+	u8 ctl_ch;		/* 802.11N BSS control channel number */
+	__le32 reserved32[1];	/* Reserved for expansion of BSS properties */
+	u8 flags;		/* flags */
+	u8 reserved[3];	/* Reserved for expansion of BSS properties */
+	u8 basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	__le16 ie_offset;	/* offset at which IEs start, from beginning */
+	__le32 ie_length;	/* byte length of Information Elements */
+	__le16 SNR;		/* average SNR of during frame reception */
+	/* Add new fields here */
+	/* variable length Information Elements */
+};
+
+struct brcm_rateset_le {
+	/* # rates in this set */
+	__le32 count;
+	/* rates in 500kbps units w/hi bit set if basic */
+	u8 rates[BRCMF_MAXRATES_IN_SET];
+};
+
+struct brcmf_ssid {
+	u32 SSID_len;
+	unsigned char SSID[32];
+};
+
+struct brcmf_ssid_le {
+	__le32 SSID_len;
+	unsigned char SSID[32];
+};
+
+struct brcmf_scan_params_le {
+	struct brcmf_ssid_le ssid_le;	/* default: {0, ""} */
+	u8 bssid[ETH_ALEN];	/* default: bcast */
+	s8 bss_type;		/* default: any,
+				 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+				 */
+	u8 scan_type;	/* flags, 0 use default */
+	__le32 nprobes;	  /* -1 use default, number of probes per channel */
+	__le32 active_time;	/* -1 use default, dwell time per channel for
+				 * active scanning
+				 */
+	__le32 passive_time;	/* -1 use default, dwell time per channel
+				 * for passive scanning
+				 */
+	__le32 home_time;	/* -1 use default, dwell time for the
+				 * home channel between channel scans
+				 */
+	__le32 channel_num;	/* count of channels and ssids that follow
+				 *
+				 * low half is count of channels in
+				 * channel_list, 0 means default (use all
+				 * available channels)
+				 *
+				 * high half is entries in struct brcmf_ssid
+				 * array that follows channel_list, aligned for
+				 * s32 (4 bytes) meaning an odd channel count
+				 * implies a 2-byte pad between end of
+				 * channel_list and first ssid
+				 *
+				 * if ssid count is zero, single ssid in the
+				 * fixed parameter portion is assumed, otherwise
+				 * ssid in the fixed portion is ignored
+				 */
+	__le16 channel_list[1];	/* list of chanspecs */
+};
+
+struct brcmf_scan_results {
+	u32 buflen;
+	u32 version;
+	u32 count;
+	struct brcmf_bss_info_le bss_info_le[];
+};
+
+struct brcmf_escan_params_le {
+	__le32 version;
+	__le16 action;
+	__le16 sync_id;
+	struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+	__le32 buflen;
+	__le32 version;
+	__le16 sync_id;
+	__le16 bss_count;
+	struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+	sizeof(struct brcmf_bss_info_le))
+
+/* used for association with a specific BSSID and chanspec list */
+struct brcmf_assoc_params_le {
+	/* 00:00:00:00:00:00: broadcast scan */
+	u8 bssid[ETH_ALEN];
+	/* 0: all available channels, otherwise count of chanspecs in
+	 * chanspec_list */
+	__le32 chanspec_num;
+	/* list of chanspecs */
+	__le16 chanspec_list[1];
+};
+
+/**
+ * struct join_pref params - parameters for preferred join selection.
+ *
+ * @type: preference type (see enum brcmf_join_pref_types).
+ * @len: length of bytes following (currently always 2).
+ * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
+ * @band: band to which selection preference applies.
+ *	This is used if @type is BAND or RSSI_DELTA.
+ */
+struct brcmf_join_pref_params {
+	u8 type;
+	u8 len;
+	u8 rssi_gain;
+	u8 band;
+};
+
+/* used for join with or without a specific bssid and channel list */
+struct brcmf_join_params {
+	struct brcmf_ssid_le ssid_le;
+	struct brcmf_assoc_params_le params_le;
+};
+
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+	u8 scan_type;		/* 0 use default, active or passive scan */
+	__le32 nprobes;		/* -1 use default, nr of probes per channel */
+	__le32 active_time;	/* -1 use default, dwell time per channel for
+				 * active scanning
+				 */
+	__le32 passive_time;	/* -1 use default, dwell time per channel
+				 * for passive scanning
+				 */
+	__le32 home_time;	/* -1 use default, dwell time for the home
+				 * channel between channel scans
+				 */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+	struct brcmf_ssid_le ssid_le;	/* {0, ""}: wildcard scan */
+	struct brcmf_join_scan_params_le scan_le;
+	struct brcmf_assoc_params_le assoc_le;
+};
+
+struct brcmf_wsec_key {
+	u32 index;		/* key index */
+	u32 len;		/* key length */
+	u8 data[WLAN_MAX_KEY_LEN];	/* key data */
+	u32 pad_1[18];
+	u32 algo;	/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	u32 flags;	/* misc flags */
+	u32 pad_2[3];
+	u32 iv_initialized;	/* has IV been initialized already? */
+	u32 pad_3;
+	/* Rx IV */
+	struct {
+		u32 hi;	/* upper 32 bits of IV */
+		u16 lo;	/* lower 16 bits of IV */
+	} rxiv;
+	u32 pad_4[2];
+	u8 ea[ETH_ALEN];	/* per station */
+};
+
+/*
+ * dongle requires same struct as above but with fields in little endian order
+ */
+struct brcmf_wsec_key_le {
+	__le32 index;		/* key index */
+	__le32 len;		/* key length */
+	u8 data[WLAN_MAX_KEY_LEN];	/* key data */
+	__le32 pad_1[18];
+	__le32 algo;	/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	__le32 flags;	/* misc flags */
+	__le32 pad_2[3];
+	__le32 iv_initialized;	/* has IV been initialized already? */
+	__le32 pad_3;
+	/* Rx IV */
+	struct {
+		__le32 hi;	/* upper 32 bits of IV */
+		__le16 lo;	/* lower 16 bits of IV */
+	} rxiv;
+	__le32 pad_4[2];
+	u8 ea[ETH_ALEN];	/* per station */
+};
+
+/* Used to get specific STA parameters */
+struct brcmf_scb_val_le {
+	__le32 val;
+	u8 ea[ETH_ALEN];
+};
+
+/* channel encoding */
+struct brcmf_channel_info_le {
+	__le32 hw_channel;
+	__le32 target_channel;
+	__le32 scan_channel;
+};
+
+struct brcmf_sta_info_le {
+	__le16 ver;		/* version of this struct */
+	__le16 len;		/* length in bytes of this structure */
+	__le16 cap;		/* sta's advertised capabilities */
+	__le32 flags;		/* flags defined below */
+	__le32 idle;		/* time since data pkt rx'd from sta */
+	u8 ea[ETH_ALEN];		/* Station address */
+	__le32 count;			/* # rates in this set */
+	u8 rates[BRCMF_MAXRATES_IN_SET];	/* rates in 500kbps units */
+						/* w/hi bit set if basic */
+	__le32 in;		/* seconds elapsed since associated */
+	__le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+	__le32 tx_pkts;	/* # of packets transmitted */
+	__le32 tx_failures;	/* # of packets failed */
+	__le32 rx_ucast_pkts;	/* # of unicast packets received */
+	__le32 rx_mcast_pkts;	/* # of multicast packets received */
+	__le32 tx_rate;	/* Rate of last successful tx frame */
+	__le32 rx_rate;	/* Rate of last successful rx frame */
+	__le32 rx_decrypt_succeeds;	/* # of packet decrypted successfully */
+	__le32 rx_decrypt_failures;	/* # of packet decrypted failed */
+	__le32 tx_tot_pkts;    /* # of tx pkts (ucast + mcast) */
+	__le32 rx_tot_pkts;    /* # of data packets recvd (uni + mcast) */
+	__le32 tx_mcast_pkts;  /* # of mcast pkts txed */
+	__le64 tx_tot_bytes;   /* data bytes txed (ucast + mcast) */
+	__le64 rx_tot_bytes;   /* data bytes recvd (ucast + mcast) */
+	__le64 tx_ucast_bytes; /* data bytes txed (ucast) */
+	__le64 tx_mcast_bytes; /* # data bytes txed (mcast) */
+	__le64 rx_ucast_bytes; /* data bytes recvd (ucast) */
+	__le64 rx_mcast_bytes; /* data bytes recvd (mcast) */
+	s8 rssi[BRCMF_ANT_MAX];   /* per antenna rssi */
+	s8 nf[BRCMF_ANT_MAX];     /* per antenna noise floor */
+	__le16 aid;                    /* association ID */
+	__le16 ht_capabilities;        /* advertised ht caps */
+	__le16 vht_flags;              /* converted vht flags */
+	__le32 tx_pkts_retry_cnt;      /* # of frames where a retry was
+					 * exhausted.
+					 */
+	__le32 tx_pkts_retry_exhausted; /* # of user frames where a retry
+					 * was exhausted
+					 */
+	s8 rx_lastpkt_rssi[BRCMF_ANT_MAX]; /* Per antenna RSSI of last
+					    * received data frame.
+					    */
+	/* TX WLAN retry/failure statistics:
+	 * Separated for host requested frames and locally generated frames.
+	 * Include unicast frame only where the retries/failures can be counted.
+	 */
+	__le32 tx_pkts_total;          /* # user frames sent successfully */
+	__le32 tx_pkts_retries;        /* # user frames retries */
+	__le32 tx_pkts_fw_total;       /* # FW generated sent successfully */
+	__le32 tx_pkts_fw_retries;     /* # retries for FW generated frames */
+	__le32 tx_pkts_fw_retry_exhausted;     /* # FW generated where a retry
+						* was exhausted
+						*/
+	__le32 rx_pkts_retried;        /* # rx with retry bit set */
+	__le32 tx_rate_fallback;       /* lowest fallback TX rate */
+};
+
+struct brcmf_chanspec_list {
+	__le32	count;		/* # of entries */
+	__le32	element[1];	/* variable length uint32 list */
+};
+
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+	__be16	version;
+	__be16	chanspec;
+	__be32	rssi;
+	__be32	mactime;
+	__be32	rate;
+};
+
+/**
+ * struct brcmf_fil_wowl_pattern_le - wowl pattern configuration struct.
+ *
+ * @cmd: "add", "del" or "clr".
+ * @masksize: Size of the mask in #of bytes
+ * @offset: Pattern byte offset in packet
+ * @patternoffset: Offset of start of pattern. Starting from field masksize.
+ * @patternsize: Size of the pattern itself in #of bytes
+ * @id: id
+ * @reasonsize: Size of the wakeup reason code
+ * @type: Type of pattern (enum brcmf_wowl_pattern_type)
+ */
+struct brcmf_fil_wowl_pattern_le {
+	u8	cmd[4];
+	__le32	masksize;
+	__le32	offset;
+	__le32	patternoffset;
+	__le32	patternsize;
+	__le32	id;
+	__le32	reasonsize;
+	__le32	type;
+	/* u8 mask[] - Mask follows the structure above */
+	/* u8 pattern[] - Pattern follows the mask is at 'patternoffset' */
+};
+
+struct brcmf_mbss_ssid_le {
+	__le32	bsscfgidx;
+	__le32	SSID_len;
+	unsigned char SSID[32];
+};
+
+/**
+ * struct brcmf_fil_country_le - country configuration structure.
+ *
+ * @country_abbrev: null-terminated country code used in the country IE.
+ * @rev: revision specifier for ccode. on set, -1 indicates unspecified.
+ * @ccode: null-terminated built-in country code.
+ */
+struct brcmf_fil_country_le {
+	char country_abbrev[BRCMF_COUNTRY_BUF_SZ];
+	__le32 rev;
+	char ccode[BRCMF_COUNTRY_BUF_SZ];
+};
+
+/**
+ * struct brcmf_rev_info_le - device revision info.
+ *
+ * @vendorid: PCI vendor id.
+ * @deviceid: device id of chip.
+ * @radiorev: radio revision.
+ * @chiprev: chip revision.
+ * @corerev: core revision.
+ * @boardid: board identifier (usu. PCI sub-device id).
+ * @boardvendor: board vendor (usu. PCI sub-vendor id).
+ * @boardrev: board revision.
+ * @driverrev: driver version.
+ * @ucoderev: microcode version.
+ * @bus: bus type.
+ * @chipnum: chip number.
+ * @phytype: phy type.
+ * @phyrev: phy revision.
+ * @anarev: anacore rev.
+ * @chippkg: chip package info.
+ * @nvramrev: nvram revision number.
+ */
+struct brcmf_rev_info_le {
+	__le32 vendorid;
+	__le32 deviceid;
+	__le32 radiorev;
+	__le32 chiprev;
+	__le32 corerev;
+	__le32 boardid;
+	__le32 boardvendor;
+	__le32 boardrev;
+	__le32 driverrev;
+	__le32 ucoderev;
+	__le32 bus;
+	__le32 chipnum;
+	__le32 phytype;
+	__le32 phyrev;
+	__le32 anarev;
+	__le32 chippkg;
+	__le32 nvramrev;
+};
+
+/**
+ * struct brcmf_assoclist_le - request assoc list.
+ *
+ * @count: indicates number of stations.
+ * @mac: MAC addresses of stations.
+ */
+struct brcmf_assoclist_le {
+	__le32 count;
+	u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN];
+};
+
+#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
new file mode 100644
index 0000000..086cac3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -0,0 +1,2271 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/if_ether.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "core.h"
+#include "debug.h"
+#include "bus.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "fweh.h"
+#include "fwsignal.h"
+#include "p2p.h"
+#include "cfg80211.h"
+#include "proto.h"
+
+/**
+ * DOC: Firmware Signalling
+ *
+ * Firmware can send signals to host and vice versa, which are passed in the
+ * data packets using TLV based header. This signalling layer is on top of the
+ * BDC bus protocol layer.
+ */
+
+/*
+ * single definition for firmware-driver flow control tlv's.
+ *
+ * each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length).
+ * A length value 0 indicates variable length tlv.
+ */
+#define BRCMF_FWS_TLV_DEFLIST \
+	BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \
+	BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \
+	BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \
+	BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \
+	BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \
+	BRCMF_FWS_TLV_DEF(MACDESC_ADD,	6, 8) \
+	BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \
+	BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \
+	BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \
+	BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \
+	BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 6) \
+	BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \
+	BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \
+	BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \
+	BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \
+	BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \
+	BRCMF_FWS_TLV_DEF(FILLER, 255, 0)
+
+/*
+ * enum brcmf_fws_tlv_type - definition of tlv identifiers.
+ */
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+	BRCMF_FWS_TYPE_ ## name =  id,
+enum brcmf_fws_tlv_type {
+	BRCMF_FWS_TLV_DEFLIST
+	BRCMF_FWS_TYPE_INVALID
+};
+#undef BRCMF_FWS_TLV_DEF
+
+/*
+ * enum brcmf_fws_tlv_len - definition of tlv lengths.
+ */
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+	BRCMF_FWS_TYPE_ ## name ## _LEN = (len),
+enum brcmf_fws_tlv_len {
+	BRCMF_FWS_TLV_DEFLIST
+};
+#undef BRCMF_FWS_TLV_DEF
+
+#ifdef DEBUG
+/*
+ * brcmf_fws_tlv_names - array of tlv names.
+ */
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+	{ id, #name },
+static struct {
+	enum brcmf_fws_tlv_type id;
+	const char *name;
+} brcmf_fws_tlv_names[] = {
+	BRCMF_FWS_TLV_DEFLIST
+};
+#undef BRCMF_FWS_TLV_DEF
+
+
+static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++)
+		if (brcmf_fws_tlv_names[i].id == id)
+			return brcmf_fws_tlv_names[i].name;
+
+	return "INVALID";
+}
+#else
+static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
+{
+	return "NODEBUG";
+}
+#endif /* DEBUG */
+
+/*
+ * The PKTTAG tlv has additional bytes when firmware-signalling
+ * mode has REUSESEQ flag set.
+ */
+#define BRCMF_FWS_TYPE_SEQ_LEN				2
+
+/*
+ * flags used to enable tlv signalling from firmware.
+ */
+#define BRCMF_FWS_FLAGS_RSSI_SIGNALS			0x0001
+#define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS			0x0002
+#define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS		0x0004
+#define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE	0x0008
+#define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE	0x0010
+#define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE		0x0020
+#define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE		0x0040
+
+#define BRCMF_FWS_MAC_DESC_TABLE_SIZE			32
+#define BRCMF_FWS_MAC_DESC_ID_INVALID			0xff
+
+#define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF			0
+#define BRCMF_FWS_HOSTIF_FLOWSTATE_ON			1
+#define BRCMF_FWS_FLOWCONTROL_HIWATER			128
+#define BRCMF_FWS_FLOWCONTROL_LOWATER			64
+
+#define BRCMF_FWS_PSQ_PREC_COUNT		((BRCMF_FWS_FIFO_COUNT + 1) * 2)
+#define BRCMF_FWS_PSQ_LEN				256
+
+#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST			0x01
+#define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED		0x02
+
+#define BRCMF_FWS_RET_OK_NOSCHEDULE			0
+#define BRCMF_FWS_RET_OK_SCHEDULE			1
+
+#define BRCMF_FWS_MODE_REUSESEQ_SHIFT			3	/* seq reuse */
+#define BRCMF_FWS_MODE_SET_REUSESEQ(x, val)	((x) = \
+		((x) & ~(1 << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) | \
+		(((val) & 1) << BRCMF_FWS_MODE_REUSESEQ_SHIFT))
+#define BRCMF_FWS_MODE_GET_REUSESEQ(x)	\
+		(((x) >> BRCMF_FWS_MODE_REUSESEQ_SHIFT) & 1)
+
+/**
+ * enum brcmf_fws_skb_state - indicates processing state of skb.
+ *
+ * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver.
+ * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue.
+ * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware.
+ * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info.
+ */
+enum brcmf_fws_skb_state {
+	BRCMF_FWS_SKBSTATE_NEW,
+	BRCMF_FWS_SKBSTATE_DELAYED,
+	BRCMF_FWS_SKBSTATE_SUPPRESSED,
+	BRCMF_FWS_SKBSTATE_TIM
+};
+
+/**
+ * struct brcmf_skbuff_cb - control buffer associated with skbuff.
+ *
+ * @bus_flags: 2 bytes reserved for bus specific parameters
+ * @if_flags: holds interface index and packet related flags.
+ * @htod: host to device packet identifier (used in PKTTAG tlv).
+ * @htod_seq: this 16-bit is original seq number for every suppress packet.
+ * @state: transmit state of the packet.
+ * @mac: descriptor related to destination for this packet.
+ *
+ * This information is stored in control buffer struct sk_buff::cb, which
+ * provides 48 bytes of storage so this structure should not exceed that.
+ */
+struct brcmf_skbuff_cb {
+	u16 bus_flags;
+	u16 if_flags;
+	u32 htod;
+	u16 htod_seq;
+	enum brcmf_fws_skb_state state;
+	struct brcmf_fws_mac_descriptor *mac;
+};
+
+/*
+ * macro casting skbuff control buffer to struct brcmf_skbuff_cb.
+ */
+#define brcmf_skbcb(skb)	((struct brcmf_skbuff_cb *)((skb)->cb))
+
+/*
+ * sk_buff control if flags
+ *
+ *	b[11]  - packet sent upon firmware request.
+ *	b[10]  - packet only contains signalling data.
+ *	b[9]   - packet is a tx packet.
+ *	b[8]   - packet used requested credit
+ *	b[7]   - interface in AP mode.
+ *	b[3:0] - interface index.
+ */
+#define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK	0x0800
+#define BRCMF_SKB_IF_FLAGS_REQUESTED_SHIFT	11
+#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_MASK	0x0400
+#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT	10
+#define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK        0x0200
+#define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT	9
+#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK	0x0100
+#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT	8
+#define BRCMF_SKB_IF_FLAGS_IF_AP_MASK		0x0080
+#define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT		7
+#define BRCMF_SKB_IF_FLAGS_INDEX_MASK		0x000f
+#define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT		0
+
+#define brcmf_skb_if_flags_set_field(skb, field, value) \
+	brcmu_maskset16(&(brcmf_skbcb(skb)->if_flags), \
+			BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \
+			BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT, (value))
+#define brcmf_skb_if_flags_get_field(skb, field) \
+	brcmu_maskget16(brcmf_skbcb(skb)->if_flags, \
+			BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \
+			BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT)
+
+/*
+ * sk_buff control packet identifier
+ *
+ * 32-bit packet identifier used in PKTTAG tlv from host to dongle.
+ *
+ * - Generated at the host (e.g. dhd)
+ * - Seen as a generic sequence number by firmware except for the flags field.
+ *
+ * Generation	: b[31]	=> generation number for this packet [host->fw]
+ *			   OR, current generation number [fw->host]
+ * Flags	: b[30:27] => command, status flags
+ * FIFO-AC	: b[26:24] => AC-FIFO id
+ * h-slot	: b[23:8] => hanger-slot
+ * freerun	: b[7:0] => A free running counter
+ */
+#define BRCMF_SKB_HTOD_TAG_GENERATION_MASK		0x80000000
+#define BRCMF_SKB_HTOD_TAG_GENERATION_SHIFT		31
+#define BRCMF_SKB_HTOD_TAG_FLAGS_MASK			0x78000000
+#define BRCMF_SKB_HTOD_TAG_FLAGS_SHIFT			27
+#define BRCMF_SKB_HTOD_TAG_FIFO_MASK			0x07000000
+#define BRCMF_SKB_HTOD_TAG_FIFO_SHIFT			24
+#define BRCMF_SKB_HTOD_TAG_HSLOT_MASK			0x00ffff00
+#define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT			8
+#define BRCMF_SKB_HTOD_TAG_FREERUN_MASK			0x000000ff
+#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT		0
+
+#define brcmf_skb_htod_tag_set_field(skb, field, value) \
+	brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
+			BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
+			BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT, (value))
+#define brcmf_skb_htod_tag_get_field(skb, field) \
+	brcmu_maskget32(brcmf_skbcb(skb)->htod, \
+			BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
+			BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT)
+
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_MASK			0x2000
+#define BRCMF_SKB_HTOD_SEQ_FROMFW_SHIFT			13
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_MASK			0x1000
+#define BRCMF_SKB_HTOD_SEQ_FROMDRV_SHIFT		12
+#define BRCMF_SKB_HTOD_SEQ_NR_MASK			0x0fff
+#define BRCMF_SKB_HTOD_SEQ_NR_SHIFT			0
+
+#define brcmf_skb_htod_seq_set_field(skb, field, value) \
+	brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value))
+#define brcmf_skb_htod_seq_get_field(skb, field) \
+	brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \
+			BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT)
+
+#define BRCMF_FWS_TXSTAT_GENERATION_MASK	0x80000000
+#define BRCMF_FWS_TXSTAT_GENERATION_SHIFT	31
+#define BRCMF_FWS_TXSTAT_FLAGS_MASK		0x78000000
+#define BRCMF_FWS_TXSTAT_FLAGS_SHIFT		27
+#define BRCMF_FWS_TXSTAT_FIFO_MASK		0x07000000
+#define BRCMF_FWS_TXSTAT_FIFO_SHIFT		24
+#define BRCMF_FWS_TXSTAT_HSLOT_MASK		0x00FFFF00
+#define BRCMF_FWS_TXSTAT_HSLOT_SHIFT		8
+#define BRCMF_FWS_TXSTAT_FREERUN_MASK		0x000000FF
+#define BRCMF_FWS_TXSTAT_FREERUN_SHIFT		0
+
+#define brcmf_txstatus_get_field(txs, field) \
+	brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \
+			BRCMF_FWS_TXSTAT_ ## field ## _SHIFT)
+
+/* How long to defer borrowing in jiffies */
+#define BRCMF_FWS_BORROW_DEFER_PERIOD		(HZ / 10)
+
+/**
+ * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
+ *
+ * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
+ * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
+ * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
+ * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
+ * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
+ * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
+ * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
+ * @BRCMF_FWS_FIFO_COUNT: number of fifos.
+ */
+enum brcmf_fws_fifo {
+	BRCMF_FWS_FIFO_FIRST,
+	BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
+	BRCMF_FWS_FIFO_AC_BE,
+	BRCMF_FWS_FIFO_AC_VI,
+	BRCMF_FWS_FIFO_AC_VO,
+	BRCMF_FWS_FIFO_BCMC,
+	BRCMF_FWS_FIFO_ATIM,
+	BRCMF_FWS_FIFO_COUNT
+};
+
+/**
+ * enum brcmf_fws_txstatus - txstatus flag values.
+ *
+ * @BRCMF_FWS_TXSTATUS_DISCARD:
+ *	host is free to discard the packet.
+ * @BRCMF_FWS_TXSTATUS_CORE_SUPPRESS:
+ *	802.11 core suppressed the packet.
+ * @BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS:
+ *	firmware suppress the packet as device is already in PS mode.
+ * @BRCMF_FWS_TXSTATUS_FW_TOSSED:
+ *	firmware tossed the packet.
+ * @BRCMF_FWS_TXSTATUS_HOST_TOSSED:
+ *	host tossed the packet.
+ */
+enum brcmf_fws_txstatus {
+	BRCMF_FWS_TXSTATUS_DISCARD,
+	BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
+	BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
+	BRCMF_FWS_TXSTATUS_FW_TOSSED,
+	BRCMF_FWS_TXSTATUS_HOST_TOSSED
+};
+
+enum brcmf_fws_fcmode {
+	BRCMF_FWS_FCMODE_NONE,
+	BRCMF_FWS_FCMODE_IMPLIED_CREDIT,
+	BRCMF_FWS_FCMODE_EXPLICIT_CREDIT
+};
+
+enum brcmf_fws_mac_desc_state {
+	BRCMF_FWS_STATE_OPEN = 1,
+	BRCMF_FWS_STATE_CLOSE
+};
+
+/**
+ * struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface
+ *
+ * @occupied: slot is in use.
+ * @mac_handle: handle for mac entry determined by firmware.
+ * @interface_id: interface index.
+ * @state: current state.
+ * @suppressed: mac entry is suppressed.
+ * @generation: generation bit.
+ * @ac_bitmap: ac queue bitmap.
+ * @requested_credit: credits requested by firmware.
+ * @ea: ethernet address.
+ * @seq: per-node free-running sequence.
+ * @psq: power-save queue.
+ * @transit_count: packet in transit to firmware.
+ */
+struct brcmf_fws_mac_descriptor {
+	char name[16];
+	u8 occupied;
+	u8 mac_handle;
+	u8 interface_id;
+	u8 state;
+	bool suppressed;
+	u8 generation;
+	u8 ac_bitmap;
+	u8 requested_credit;
+	u8 requested_packet;
+	u8 ea[ETH_ALEN];
+	u8 seq[BRCMF_FWS_FIFO_COUNT];
+	struct pktq psq;
+	int transit_count;
+	int suppr_transit_count;
+	bool send_tim_signal;
+	u8 traffic_pending_bmp;
+	u8 traffic_lastreported_bmp;
+};
+
+#define BRCMF_FWS_HANGER_MAXITEMS	1024
+
+/**
+ * enum brcmf_fws_hanger_item_state - state of hanger item.
+ *
+ * @BRCMF_FWS_HANGER_ITEM_STATE_FREE: item is free for use.
+ * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE: item is in use.
+ * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED: item was suppressed.
+ */
+enum brcmf_fws_hanger_item_state {
+	BRCMF_FWS_HANGER_ITEM_STATE_FREE = 1,
+	BRCMF_FWS_HANGER_ITEM_STATE_INUSE,
+	BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED
+};
+
+
+/**
+ * struct brcmf_fws_hanger_item - single entry for tx pending packet.
+ *
+ * @state: entry is either free or occupied.
+ * @pkt: packet itself.
+ */
+struct brcmf_fws_hanger_item {
+	enum brcmf_fws_hanger_item_state state;
+	struct sk_buff *pkt;
+};
+
+/**
+ * struct brcmf_fws_hanger - holds packets awaiting firmware txstatus.
+ *
+ * @pushed: packets pushed to await txstatus.
+ * @popped: packets popped upon handling txstatus.
+ * @failed_to_push: packets that could not be pushed.
+ * @failed_to_pop: packets that could not be popped.
+ * @failed_slotfind: packets for which failed to find an entry.
+ * @slot_pos: last returned item index for a free entry.
+ * @items: array of hanger items.
+ */
+struct brcmf_fws_hanger {
+	u32 pushed;
+	u32 popped;
+	u32 failed_to_push;
+	u32 failed_to_pop;
+	u32 failed_slotfind;
+	u32 slot_pos;
+	struct brcmf_fws_hanger_item items[BRCMF_FWS_HANGER_MAXITEMS];
+};
+
+struct brcmf_fws_macdesc_table {
+	struct brcmf_fws_mac_descriptor nodes[BRCMF_FWS_MAC_DESC_TABLE_SIZE];
+	struct brcmf_fws_mac_descriptor iface[BRCMF_MAX_IFS];
+	struct brcmf_fws_mac_descriptor other;
+};
+
+struct brcmf_fws_stats {
+	u32 tlv_parse_failed;
+	u32 tlv_invalid_type;
+	u32 header_only_pkt;
+	u32 header_pulls;
+	u32 pkt2bus;
+	u32 send_pkts[5];
+	u32 requested_sent[5];
+	u32 generic_error;
+	u32 mac_update_failed;
+	u32 mac_ps_update_failed;
+	u32 if_update_failed;
+	u32 packet_request_failed;
+	u32 credit_request_failed;
+	u32 rollback_success;
+	u32 rollback_failed;
+	u32 delayq_full_error;
+	u32 supprq_full_error;
+	u32 txs_indicate;
+	u32 txs_discard;
+	u32 txs_supp_core;
+	u32 txs_supp_ps;
+	u32 txs_tossed;
+	u32 txs_host_tossed;
+	u32 bus_flow_block;
+	u32 fws_flow_block;
+};
+
+struct brcmf_fws_info {
+	struct brcmf_pub *drvr;
+	spinlock_t spinlock;
+	ulong flags;
+	struct brcmf_fws_stats stats;
+	struct brcmf_fws_hanger hanger;
+	enum brcmf_fws_fcmode fcmode;
+	bool fw_signals;
+	bool bcmc_credit_check;
+	struct brcmf_fws_macdesc_table desc;
+	struct workqueue_struct *fws_wq;
+	struct work_struct fws_dequeue_work;
+	u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT];
+	int fifo_credit[BRCMF_FWS_FIFO_COUNT];
+	int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1];
+	int deq_node_pos[BRCMF_FWS_FIFO_COUNT];
+	u32 fifo_credit_map;
+	u32 fifo_delay_map;
+	unsigned long borrow_defer_timestamp;
+	bool bus_flow_blocked;
+	bool creditmap_received;
+	u8 mode;
+	bool avoid_queueing;
+};
+
+/*
+ * brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index.
+ */
+static const int brcmf_fws_prio2fifo[] = {
+	BRCMF_FWS_FIFO_AC_BE,
+	BRCMF_FWS_FIFO_AC_BK,
+	BRCMF_FWS_FIFO_AC_BK,
+	BRCMF_FWS_FIFO_AC_BE,
+	BRCMF_FWS_FIFO_AC_VI,
+	BRCMF_FWS_FIFO_AC_VI,
+	BRCMF_FWS_FIFO_AC_VO,
+	BRCMF_FWS_FIFO_AC_VO
+};
+
+static int fcmode;
+module_param(fcmode, int, S_IRUSR);
+MODULE_PARM_DESC(fcmode, "mode of firmware signalled flow control");
+
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+	case BRCMF_FWS_TYPE_ ## name: \
+		return len;
+
+/**
+ * brcmf_fws_get_tlv_len() - returns defined length for given tlv id.
+ *
+ * @fws: firmware-signalling information.
+ * @id: identifier of the TLV.
+ *
+ * Return: the specified length for the given TLV; Otherwise -EINVAL.
+ */
+static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
+				 enum brcmf_fws_tlv_type id)
+{
+	switch (id) {
+	BRCMF_FWS_TLV_DEFLIST
+	default:
+		fws->stats.tlv_invalid_type++;
+		break;
+	}
+	return -EINVAL;
+}
+#undef BRCMF_FWS_TLV_DEF
+
+static void brcmf_fws_lock(struct brcmf_fws_info *fws)
+		__acquires(&fws->spinlock)
+{
+	spin_lock_irqsave(&fws->spinlock, fws->flags);
+}
+
+static void brcmf_fws_unlock(struct brcmf_fws_info *fws)
+		__releases(&fws->spinlock)
+{
+	spin_unlock_irqrestore(&fws->spinlock, fws->flags);
+}
+
+static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
+{
+	u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+	return ifidx == *(int *)arg;
+}
+
+static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+				int ifidx)
+{
+	bool (*matchfn)(struct sk_buff *, void *) = NULL;
+	struct sk_buff *skb;
+	int prec;
+
+	if (ifidx != -1)
+		matchfn = brcmf_fws_ifidx_match;
+	for (prec = 0; prec < q->num_prec; prec++) {
+		skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+		while (skb) {
+			brcmu_pkt_buf_free_skb(skb);
+			skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+		}
+	}
+}
+
+static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
+{
+	int i;
+
+	memset(hanger, 0, sizeof(*hanger));
+	for (i = 0; i < ARRAY_SIZE(hanger->items); i++)
+		hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
+}
+
+static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
+{
+	u32 i;
+
+	i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS;
+
+	while (i != h->slot_pos) {
+		if (h->items[i].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
+			h->slot_pos = i;
+			goto done;
+		}
+		i++;
+		if (i == BRCMF_FWS_HANGER_MAXITEMS)
+			i = 0;
+	}
+	brcmf_err("all slots occupied\n");
+	h->failed_slotfind++;
+	i = BRCMF_FWS_HANGER_MAXITEMS;
+done:
+	return i;
+}
+
+static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
+				    struct sk_buff *pkt, u32 slot_id)
+{
+	if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
+		return -ENOENT;
+
+	if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
+		brcmf_err("slot is not free\n");
+		h->failed_to_push++;
+		return -EINVAL;
+	}
+
+	h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE;
+	h->items[slot_id].pkt = pkt;
+	h->pushed++;
+	return 0;
+}
+
+static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+					  u32 slot_id, struct sk_buff **pktout,
+					  bool remove_item)
+{
+	if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
+		return -ENOENT;
+
+	if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
+		brcmf_err("entry not in use\n");
+		h->failed_to_pop++;
+		return -EINVAL;
+	}
+
+	*pktout = h->items[slot_id].pkt;
+	if (remove_item) {
+		h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
+		h->items[slot_id].pkt = NULL;
+		h->popped++;
+	}
+	return 0;
+}
+
+static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
+					    u32 slot_id)
+{
+	if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
+		return -ENOENT;
+
+	if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
+		brcmf_err("entry not in use\n");
+		return -EINVAL;
+	}
+
+	h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
+	return 0;
+}
+
+static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
+				     bool (*fn)(struct sk_buff *, void *),
+				     int ifidx)
+{
+	struct brcmf_fws_hanger *h = &fws->hanger;
+	struct sk_buff *skb;
+	int i;
+	enum brcmf_fws_hanger_item_state s;
+
+	for (i = 0; i < ARRAY_SIZE(h->items); i++) {
+		s = h->items[i].state;
+		if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE ||
+		    s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+			skb = h->items[i].pkt;
+			if (fn == NULL || fn(skb, &ifidx)) {
+				/* suppress packets freed from psq */
+				if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE)
+					brcmu_pkt_buf_free_skb(skb);
+				h->items[i].state =
+					BRCMF_FWS_HANGER_ITEM_STATE_FREE;
+			}
+		}
+	}
+}
+
+static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
+				       struct brcmf_fws_mac_descriptor *desc)
+{
+	if (desc == &fws->desc.other)
+		strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
+	else if (desc->mac_handle)
+		scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
+			  desc->mac_handle, desc->interface_id);
+	else
+		scnprintf(desc->name, sizeof(desc->name), "MACIF:%d",
+			  desc->interface_id);
+}
+
+static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc,
+				   u8 *addr, u8 ifidx)
+{
+	brcmf_dbg(TRACE,
+		  "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx);
+	desc->occupied = 1;
+	desc->state = BRCMF_FWS_STATE_OPEN;
+	desc->requested_credit = 0;
+	desc->requested_packet = 0;
+	/* depending on use may need ifp->bssidx instead */
+	desc->interface_id = ifidx;
+	desc->ac_bitmap = 0xff; /* update this when handling APSD */
+	if (addr)
+		memcpy(&desc->ea[0], addr, ETH_ALEN);
+}
+
+static
+void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc)
+{
+	brcmf_dbg(TRACE,
+		  "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id);
+	desc->occupied = 0;
+	desc->state = BRCMF_FWS_STATE_CLOSE;
+	desc->requested_credit = 0;
+	desc->requested_packet = 0;
+}
+
+static struct brcmf_fws_mac_descriptor *
+brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea)
+{
+	struct brcmf_fws_mac_descriptor *entry;
+	int i;
+
+	if (ea == NULL)
+		return ERR_PTR(-EINVAL);
+
+	entry = &fws->desc.nodes[0];
+	for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) {
+		if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN))
+			return entry;
+		entry++;
+	}
+
+	return ERR_PTR(-ENOENT);
+}
+
+static struct brcmf_fws_mac_descriptor*
+brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da)
+{
+	struct brcmf_fws_mac_descriptor *entry = &fws->desc.other;
+	bool multicast;
+
+	multicast = is_multicast_ether_addr(da);
+
+	/* Multicast destination, STA and P2P clients get the interface entry.
+	 * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+	 * have their own entry.
+	 */
+	if (multicast && ifp->fws_desc) {
+		entry = ifp->fws_desc;
+		goto done;
+	}
+
+	entry = brcmf_fws_macdesc_lookup(fws, da);
+	if (IS_ERR(entry))
+		entry = ifp->fws_desc;
+
+done:
+	return entry;
+}
+
+static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws,
+				     struct brcmf_fws_mac_descriptor *entry,
+				     int fifo)
+{
+	struct brcmf_fws_mac_descriptor *if_entry;
+	bool closed;
+
+	/* for unique destination entries the related interface
+	 * may be closed.
+	 */
+	if (entry->mac_handle) {
+		if_entry = &fws->desc.iface[entry->interface_id];
+		if (if_entry->state == BRCMF_FWS_STATE_CLOSE)
+			return true;
+	}
+	/* an entry is closed when the state is closed and
+	 * the firmware did not request anything.
+	 */
+	closed = entry->state == BRCMF_FWS_STATE_CLOSE &&
+		 !entry->requested_credit && !entry->requested_packet;
+
+	/* Or firmware does not allow traffic for given fifo */
+	return closed || !(entry->ac_bitmap & BIT(fifo));
+}
+
+static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws,
+				      struct brcmf_fws_mac_descriptor *entry,
+				      int ifidx)
+{
+	if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) {
+		brcmf_fws_psq_flush(fws, &entry->psq, ifidx);
+		entry->occupied = !!(entry->psq.len);
+	}
+}
+
+static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws,
+				      bool (*fn)(struct sk_buff *, void *),
+				      int ifidx)
+{
+	struct brcmf_fws_hanger_item *hi;
+	struct pktq *txq;
+	struct sk_buff *skb;
+	int prec;
+	u32 hslot;
+
+	txq = brcmf_bus_gettxq(fws->drvr->bus_if);
+	if (IS_ERR(txq)) {
+		brcmf_dbg(TRACE, "no txq to clean up\n");
+		return;
+	}
+
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
+		while (skb) {
+			hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+			hi = &fws->hanger.items[hslot];
+			WARN_ON(skb != hi->pkt);
+			hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
+			brcmu_pkt_buf_free_skb(skb);
+			skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
+		}
+	}
+}
+
+static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
+{
+	int i;
+	struct brcmf_fws_mac_descriptor *table;
+	bool (*matchfn)(struct sk_buff *, void *) = NULL;
+
+	if (fws == NULL)
+		return;
+
+	if (ifidx != -1)
+		matchfn = brcmf_fws_ifidx_match;
+
+	/* cleanup individual nodes */
+	table = &fws->desc.nodes[0];
+	for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++)
+		brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx);
+
+	brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx);
+	brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx);
+	brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
+}
+
+static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
+{
+	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+	u8 *wlh;
+	u16 data_offset = 0;
+	u8 fillers;
+	__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
+	__le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq);
+
+	brcmf_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n",
+		  entry->name, brcmf_skb_if_flags_get_field(skb, INDEX),
+		  (le32_to_cpu(pkttag) >> 8) & 0xffff,
+		  brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq);
+	if (entry->send_tim_signal)
+		data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
+	if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode))
+		data_offset += BRCMF_FWS_TYPE_SEQ_LEN;
+	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+	data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
+	fillers = round_up(data_offset, 4) - data_offset;
+	data_offset += fillers;
+
+	skb_push(skb, data_offset);
+	wlh = skb->data;
+
+	wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
+	wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
+	memcpy(&wlh[2], &pkttag, sizeof(pkttag));
+	if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+		wlh[1] += BRCMF_FWS_TYPE_SEQ_LEN;
+		memcpy(&wlh[2 + BRCMF_FWS_TYPE_PKTTAG_LEN], &pktseq,
+		       sizeof(pktseq));
+	}
+	wlh += wlh[1] + 2;
+
+	if (entry->send_tim_signal) {
+		entry->send_tim_signal = 0;
+		wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
+		wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
+		wlh[2] = entry->mac_handle;
+		wlh[3] = entry->traffic_pending_bmp;
+		brcmf_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n",
+			  entry->mac_handle, entry->traffic_pending_bmp);
+		wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
+		entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+	}
+	if (fillers)
+		memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
+
+	return (u8)(data_offset >> 2);
+}
+
+static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
+				 struct brcmf_fws_mac_descriptor *entry,
+				 int fifo, bool send_immediately)
+{
+	struct sk_buff *skb;
+	struct brcmf_skbuff_cb *skcb;
+	s32 err;
+	u32 len;
+	u8 data_offset;
+	int ifidx;
+
+	/* check delayedQ and suppressQ in one call using bitmap */
+	if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
+		entry->traffic_pending_bmp &= ~NBITVAL(fifo);
+	else
+		entry->traffic_pending_bmp |= NBITVAL(fifo);
+
+	entry->send_tim_signal = false;
+	if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp)
+		entry->send_tim_signal = true;
+	if (send_immediately && entry->send_tim_signal &&
+	    entry->state == BRCMF_FWS_STATE_CLOSE) {
+		/* create a dummy packet and sent that. The traffic          */
+		/* bitmap info will automatically be attached to that packet */
+		len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
+		      BRCMF_FWS_TYPE_SEQ_LEN +
+		      BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
+		      4 + fws->drvr->hdrlen;
+		skb = brcmu_pkt_buf_get_skb(len);
+		if (skb == NULL)
+			return false;
+		skb_pull(skb, len);
+		skcb = brcmf_skbcb(skb);
+		skcb->mac = entry;
+		skcb->state = BRCMF_FWS_SKBSTATE_TIM;
+		skcb->htod = 0;
+		skcb->htod_seq = 0;
+		data_offset = brcmf_fws_hdrpush(fws, skb);
+		ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+		brcmf_fws_unlock(fws);
+		err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
+		brcmf_fws_lock(fws);
+		if (err)
+			brcmu_pkt_buf_free_skb(skb);
+		return true;
+	}
+	return false;
+}
+
+static void
+brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
+			     u8 if_id)
+{
+	struct brcmf_if *ifp = brcmf_get_ifp(fws->drvr, if_id);
+
+	if (WARN_ON(!ifp))
+		return;
+
+	if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
+	    pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER)
+		brcmf_txflowblock_if(ifp,
+				     BRCMF_NETIF_STOP_REASON_FWS_FC, false);
+	if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
+	    pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) {
+		fws->stats.fws_flow_block++;
+		brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true);
+	}
+	return;
+}
+
+static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
+{
+	brcmf_dbg(CTL, "rssi %d\n", rssi);
+	return 0;
+}
+
+static
+int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
+{
+	struct brcmf_fws_mac_descriptor *entry, *existing;
+	u8 mac_handle;
+	u8 ifidx;
+	u8 *addr;
+
+	mac_handle = *data++;
+	ifidx = *data++;
+	addr = data;
+
+	entry = &fws->desc.nodes[mac_handle & 0x1F];
+	if (type == BRCMF_FWS_TYPE_MACDESC_DEL) {
+		if (entry->occupied) {
+			brcmf_dbg(TRACE, "deleting %s mac %pM\n",
+				  entry->name, addr);
+			brcmf_fws_lock(fws);
+			brcmf_fws_macdesc_cleanup(fws, entry, -1);
+			brcmf_fws_macdesc_deinit(entry);
+			brcmf_fws_unlock(fws);
+		} else
+			fws->stats.mac_update_failed++;
+		return 0;
+	}
+
+	existing = brcmf_fws_macdesc_lookup(fws, addr);
+	if (IS_ERR(existing)) {
+		if (!entry->occupied) {
+			brcmf_fws_lock(fws);
+			entry->mac_handle = mac_handle;
+			brcmf_fws_macdesc_init(entry, addr, ifidx);
+			brcmf_fws_macdesc_set_name(fws, entry);
+			brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
+					BRCMF_FWS_PSQ_LEN);
+			brcmf_fws_unlock(fws);
+			brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
+		} else {
+			fws->stats.mac_update_failed++;
+		}
+	} else {
+		if (entry != existing) {
+			brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
+			brcmf_fws_lock(fws);
+			memcpy(entry, existing,
+			       offsetof(struct brcmf_fws_mac_descriptor, psq));
+			entry->mac_handle = mac_handle;
+			brcmf_fws_macdesc_deinit(existing);
+			brcmf_fws_macdesc_set_name(fws, entry);
+			brcmf_fws_unlock(fws);
+			brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
+				  addr);
+		} else {
+			brcmf_dbg(TRACE, "use existing\n");
+			WARN_ON(entry->mac_handle != mac_handle);
+			/* TODO: what should we do here: continue, reinit, .. */
+		}
+	}
+	return 0;
+}
+
+static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
+					    u8 type, u8 *data)
+{
+	struct brcmf_fws_mac_descriptor *entry;
+	u8 mac_handle;
+	int ret;
+
+	mac_handle = data[0];
+	entry = &fws->desc.nodes[mac_handle & 0x1F];
+	if (!entry->occupied) {
+		fws->stats.mac_ps_update_failed++;
+		return -ESRCH;
+	}
+	brcmf_fws_lock(fws);
+	/* a state update should wipe old credits */
+	entry->requested_credit = 0;
+	entry->requested_packet = 0;
+	if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
+		entry->state = BRCMF_FWS_STATE_OPEN;
+		ret = BRCMF_FWS_RET_OK_SCHEDULE;
+	} else {
+		entry->state = BRCMF_FWS_STATE_CLOSE;
+		brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false);
+		brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false);
+		brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false);
+		brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
+		ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
+	}
+	brcmf_fws_unlock(fws);
+	return ret;
+}
+
+static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
+					      u8 type, u8 *data)
+{
+	struct brcmf_fws_mac_descriptor *entry;
+	u8 ifidx;
+	int ret;
+
+	ifidx = data[0];
+
+	if (ifidx >= BRCMF_MAX_IFS) {
+		ret = -ERANGE;
+		goto fail;
+	}
+
+	entry = &fws->desc.iface[ifidx];
+	if (!entry->occupied) {
+		ret = -ESRCH;
+		goto fail;
+	}
+
+	brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
+		  entry->name);
+	brcmf_fws_lock(fws);
+	switch (type) {
+	case BRCMF_FWS_TYPE_INTERFACE_OPEN:
+		entry->state = BRCMF_FWS_STATE_OPEN;
+		ret = BRCMF_FWS_RET_OK_SCHEDULE;
+		break;
+	case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
+		entry->state = BRCMF_FWS_STATE_CLOSE;
+		ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
+		break;
+	default:
+		ret = -EINVAL;
+		brcmf_fws_unlock(fws);
+		goto fail;
+	}
+	brcmf_fws_unlock(fws);
+	return ret;
+
+fail:
+	fws->stats.if_update_failed++;
+	return ret;
+}
+
+static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
+				      u8 *data)
+{
+	struct brcmf_fws_mac_descriptor *entry;
+
+	entry = &fws->desc.nodes[data[1] & 0x1F];
+	if (!entry->occupied) {
+		if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
+			fws->stats.credit_request_failed++;
+		else
+			fws->stats.packet_request_failed++;
+		return -ESRCH;
+	}
+
+	brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
+		  brcmf_fws_get_tlv_name(type), type, entry->name,
+		  data[0], data[2]);
+	brcmf_fws_lock(fws);
+	if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
+		entry->requested_credit = data[0];
+	else
+		entry->requested_packet = data[0];
+
+	entry->ac_bitmap = data[2];
+	brcmf_fws_unlock(fws);
+	return BRCMF_FWS_RET_OK_SCHEDULE;
+}
+
+static void
+brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry,
+				 struct sk_buff *skb)
+{
+	if (entry->requested_credit > 0) {
+		entry->requested_credit--;
+		brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
+		brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1);
+		if (entry->state != BRCMF_FWS_STATE_CLOSE)
+			brcmf_err("requested credit set while mac not closed!\n");
+	} else if (entry->requested_packet > 0) {
+		entry->requested_packet--;
+		brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
+		brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
+		if (entry->state != BRCMF_FWS_STATE_CLOSE)
+			brcmf_err("requested packet set while mac not closed!\n");
+	} else {
+		brcmf_skb_if_flags_set_field(skb, REQUESTED, 0);
+		brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
+	}
+}
+
+static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb)
+{
+	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+
+	if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) &&
+	    (entry->state == BRCMF_FWS_STATE_CLOSE))
+		entry->requested_credit++;
+}
+
+static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
+				     u8 fifo, u8 credits)
+{
+	int lender_ac;
+	int *borrowed;
+	int *fifo_credit;
+
+	if (!credits)
+		return;
+
+	fws->fifo_credit_map |= 1 << fifo;
+
+	if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
+	    (fws->credits_borrowed[0])) {
+		for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
+		     lender_ac--) {
+			borrowed = &fws->credits_borrowed[lender_ac];
+			if (*borrowed) {
+				fws->fifo_credit_map |= (1 << lender_ac);
+				fifo_credit = &fws->fifo_credit[lender_ac];
+				if (*borrowed >= credits) {
+					*borrowed -= credits;
+					*fifo_credit += credits;
+					return;
+				} else {
+					credits -= *borrowed;
+					*fifo_credit += *borrowed;
+					*borrowed = 0;
+				}
+			}
+		}
+	}
+
+	fws->fifo_credit[fifo] += credits;
+}
+
+static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
+{
+	/* only schedule dequeue when there are credits for delayed traffic */
+	if ((fws->fifo_credit_map & fws->fifo_delay_map) ||
+	    (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map))
+		queue_work(fws->fws_wq, &fws->fws_dequeue_work);
+}
+
+static int brcmf_fws_enq(struct brcmf_fws_info *fws,
+			 enum brcmf_fws_skb_state state, int fifo,
+			 struct sk_buff *p)
+{
+	int prec = 2 * fifo;
+	u32 *qfull_stat = &fws->stats.delayq_full_error;
+	struct brcmf_fws_mac_descriptor *entry;
+	struct pktq *pq;
+	struct sk_buff_head *queue;
+	struct sk_buff *p_head;
+	struct sk_buff *p_tail;
+	u32 fr_new;
+	u32 fr_compare;
+
+	entry = brcmf_skbcb(p)->mac;
+	if (entry == NULL) {
+		brcmf_err("no mac descriptor found for skb %p\n", p);
+		return -ENOENT;
+	}
+
+	brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p);
+	if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
+		prec += 1;
+		qfull_stat = &fws->stats.supprq_full_error;
+
+		/* Fix out of order delivery of frames. Dont assume frame    */
+		/* can be inserted at the end, but look for correct position */
+		pq = &entry->psq;
+		if (pktq_full(pq) || pktq_pfull(pq, prec)) {
+			*qfull_stat += 1;
+			return -ENFILE;
+		}
+		queue = &pq->q[prec].skblist;
+
+		p_head = skb_peek(queue);
+		p_tail = skb_peek_tail(queue);
+		fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN);
+
+		while (p_head != p_tail) {
+			fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+								  FREERUN);
+			/* be sure to handle wrap of 256 */
+			if (((fr_new > fr_compare) &&
+			     ((fr_new - fr_compare) < 128)) ||
+			    ((fr_new < fr_compare) &&
+			     ((fr_compare - fr_new) > 128)))
+				break;
+			p_tail = skb_queue_prev(queue, p_tail);
+		}
+		/* Position found. Determine what to do */
+		if (p_tail == NULL) {
+			/* empty list */
+			__skb_queue_tail(queue, p);
+		} else {
+			fr_compare = brcmf_skb_htod_tag_get_field(p_tail,
+								  FREERUN);
+			if (((fr_new > fr_compare) &&
+			     ((fr_new - fr_compare) < 128)) ||
+			    ((fr_new < fr_compare) &&
+			     ((fr_compare - fr_new) > 128))) {
+				/* After tail */
+				__skb_queue_after(queue, p_tail, p);
+			} else {
+				/* Before tail */
+				__skb_insert(p, p_tail->prev, p_tail, queue);
+			}
+		}
+
+		/* Complete the counters and statistics */
+		pq->len++;
+		if (pq->hi_prec < prec)
+			pq->hi_prec = (u8) prec;
+	} else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
+		*qfull_stat += 1;
+		return -ENFILE;
+	}
+
+	/* increment total enqueued packet count */
+	fws->fifo_delay_map |= 1 << fifo;
+	fws->fifo_enqpkt[fifo]++;
+
+	/* update the sk_buff state */
+	brcmf_skbcb(p)->state = state;
+
+	/*
+	 * A packet has been pushed so update traffic
+	 * availability bitmap, if applicable
+	 */
+	brcmf_fws_tim_update(fws, entry, fifo, true);
+	brcmf_fws_flow_control_check(fws, &entry->psq,
+				     brcmf_skb_if_flags_get_field(p, INDEX));
+	return 0;
+}
+
+static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
+{
+	struct brcmf_fws_mac_descriptor *table;
+	struct brcmf_fws_mac_descriptor *entry;
+	struct sk_buff *p;
+	int num_nodes;
+	int node_pos;
+	int prec_out;
+	int pmsk;
+	int i;
+
+	table = (struct brcmf_fws_mac_descriptor *)&fws->desc;
+	num_nodes = sizeof(fws->desc) / sizeof(struct brcmf_fws_mac_descriptor);
+	node_pos = fws->deq_node_pos[fifo];
+
+	for (i = 0; i < num_nodes; i++) {
+		entry = &table[(node_pos + i) % num_nodes];
+		if (!entry->occupied ||
+		    brcmf_fws_macdesc_closed(fws, entry, fifo))
+			continue;
+
+		if (entry->suppressed)
+			pmsk = 2;
+		else
+			pmsk = 3;
+		p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
+		if (p == NULL) {
+			if (entry->suppressed) {
+				if (entry->suppr_transit_count)
+					continue;
+				entry->suppressed = false;
+				p = brcmu_pktq_mdeq(&entry->psq,
+						    1 << (fifo * 2), &prec_out);
+			}
+		}
+		if  (p == NULL)
+			continue;
+
+		brcmf_fws_macdesc_use_req_credit(entry, p);
+
+		/* move dequeue position to ensure fair round-robin */
+		fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes;
+		brcmf_fws_flow_control_check(fws, &entry->psq,
+					     brcmf_skb_if_flags_get_field(p,
+									  INDEX)
+					     );
+		/*
+		 * A packet has been picked up, update traffic
+		 * availability bitmap, if applicable
+		 */
+		brcmf_fws_tim_update(fws, entry, fifo, false);
+
+		/*
+		 * decrement total enqueued fifo packets and
+		 * clear delay bitmap if done.
+		 */
+		fws->fifo_enqpkt[fifo]--;
+		if (fws->fifo_enqpkt[fifo] == 0)
+			fws->fifo_delay_map &= ~(1 << fifo);
+		goto done;
+	}
+	p = NULL;
+done:
+	brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p);
+	return p;
+}
+
+static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
+					 struct sk_buff *skb,
+					 u32 genbit, u16 seq)
+{
+	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+	u32 hslot;
+	int ret;
+
+	hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+
+	/* this packet was suppressed */
+	if (!entry->suppressed) {
+		entry->suppressed = true;
+		entry->suppr_transit_count = entry->transit_count;
+		brcmf_dbg(DATA, "suppress %s: transit %d\n",
+			  entry->name, entry->transit_count);
+	}
+
+	entry->generation = genbit;
+
+	brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+	brcmf_skbcb(skb)->htod_seq = seq;
+	if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+		brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+		brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+	} else {
+		brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
+	}
+	ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
+
+	if (ret != 0) {
+		/* suppress q is full drop this packet */
+		brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
+	} else {
+		/* Mark suppressed to avoid a double free during wlfc cleanup */
+		brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
+	}
+
+	return ret;
+}
+
+static int
+brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
+		      u32 genbit, u16 seq)
+{
+	u32 fifo;
+	int ret;
+	bool remove_from_hanger = true;
+	struct sk_buff *skb;
+	struct brcmf_skbuff_cb *skcb;
+	struct brcmf_fws_mac_descriptor *entry = NULL;
+	struct brcmf_if *ifp;
+
+	brcmf_dbg(DATA, "flags %d\n", flags);
+
+	if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
+		fws->stats.txs_discard++;
+	else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) {
+		fws->stats.txs_supp_core++;
+		remove_from_hanger = false;
+	} else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) {
+		fws->stats.txs_supp_ps++;
+		remove_from_hanger = false;
+	} else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
+		fws->stats.txs_tossed++;
+	else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
+		fws->stats.txs_host_tossed++;
+	else
+		brcmf_err("unexpected txstatus\n");
+
+	ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+				      remove_from_hanger);
+	if (ret != 0) {
+		brcmf_err("no packet in hanger slot: hslot=%d\n", hslot);
+		return ret;
+	}
+
+	skcb = brcmf_skbcb(skb);
+	entry = skcb->mac;
+	if (WARN_ON(!entry)) {
+		brcmu_pkt_buf_free_skb(skb);
+		return -EINVAL;
+	}
+	entry->transit_count--;
+	if (entry->suppressed && entry->suppr_transit_count)
+		entry->suppr_transit_count--;
+
+	brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags,
+		  skcb->htod, seq);
+
+	/* pick up the implicit credit from this packet */
+	fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
+	if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) ||
+	    (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
+	    (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) {
+		brcmf_fws_return_credits(fws, fifo, 1);
+		brcmf_fws_schedule_deq(fws);
+	}
+	brcmf_fws_macdesc_return_req_credit(skb);
+
+	ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp);
+	if (ret) {
+		brcmu_pkt_buf_free_skb(skb);
+		return -EINVAL;
+	}
+	if (!remove_from_hanger)
+		ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb,
+						    genbit, seq);
+	if (remove_from_hanger || ret)
+		brcmf_txfinalize(ifp, skb, true);
+
+	return 0;
+}
+
+static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
+					     u8 *data)
+{
+	int i;
+
+	if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
+		brcmf_dbg(INFO, "ignored\n");
+		return BRCMF_FWS_RET_OK_NOSCHEDULE;
+	}
+
+	brcmf_dbg(DATA, "enter: data %pM\n", data);
+	brcmf_fws_lock(fws);
+	for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
+		brcmf_fws_return_credits(fws, i, data[i]);
+
+	brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
+		  fws->fifo_delay_map);
+	brcmf_fws_unlock(fws);
+	return BRCMF_FWS_RET_OK_SCHEDULE;
+}
+
+static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
+{
+	__le32 status_le;
+	__le16 seq_le;
+	u32 status;
+	u32 hslot;
+	u32 genbit;
+	u8 flags;
+	u16 seq;
+
+	fws->stats.txs_indicate++;
+	memcpy(&status_le, data, sizeof(status_le));
+	status = le32_to_cpu(status_le);
+	flags = brcmf_txstatus_get_field(status, FLAGS);
+	hslot = brcmf_txstatus_get_field(status, HSLOT);
+	genbit = brcmf_txstatus_get_field(status, GENERATION);
+	if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
+		memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN],
+		       sizeof(seq_le));
+		seq = le16_to_cpu(seq_le);
+	} else {
+		seq = 0;
+	}
+
+	brcmf_fws_lock(fws);
+	brcmf_fws_txs_process(fws, flags, hslot, genbit, seq);
+	brcmf_fws_unlock(fws);
+	return BRCMF_FWS_RET_OK_NOSCHEDULE;
+}
+
+static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
+{
+	__le32 timestamp;
+
+	memcpy(&timestamp, &data[2], sizeof(timestamp));
+	brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1],
+		  le32_to_cpu(timestamp));
+	return 0;
+}
+
+static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
+				       const struct brcmf_event_msg *e,
+				       void *data)
+{
+	struct brcmf_fws_info *fws = ifp->drvr->fws;
+	int i;
+	u8 *credits = data;
+
+	if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
+		brcmf_err("event payload too small (%d)\n", e->datalen);
+		return -EINVAL;
+	}
+	if (fws->creditmap_received)
+		return 0;
+
+	fws->creditmap_received = true;
+
+	brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
+	brcmf_fws_lock(fws);
+	for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
+		if (*credits)
+			fws->fifo_credit_map |= 1 << i;
+		else
+			fws->fifo_credit_map &= ~(1 << i);
+		fws->fifo_credit[i] = *credits++;
+	}
+	brcmf_fws_schedule_deq(fws);
+	brcmf_fws_unlock(fws);
+	return 0;
+}
+
+static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
+						const struct brcmf_event_msg *e,
+						void *data)
+{
+	struct brcmf_fws_info *fws = ifp->drvr->fws;
+
+	brcmf_fws_lock(fws);
+	if (fws)
+		fws->bcmc_credit_check = true;
+	brcmf_fws_unlock(fws);
+	return 0;
+}
+
+void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
+{
+	struct brcmf_skb_reorder_data *rd;
+	struct brcmf_fws_info *fws = ifp->drvr->fws;
+	u8 *signal_data;
+	s16 data_len;
+	u8 type;
+	u8 len;
+	u8 *data;
+	s32 status;
+	s32 err;
+
+	brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n",
+		  ifp->ifidx, skb->len, siglen);
+
+	WARN_ON(siglen > skb->len);
+
+	if (!siglen)
+		return;
+	/* if flow control disabled, skip to packet data and leave */
+	if ((!fws) || (!fws->fw_signals)) {
+		skb_pull(skb, siglen);
+		return;
+	}
+
+	fws->stats.header_pulls++;
+	data_len = siglen;
+	signal_data = skb->data;
+
+	status = BRCMF_FWS_RET_OK_NOSCHEDULE;
+	while (data_len > 0) {
+		/* extract tlv info */
+		type = signal_data[0];
+
+		/* FILLER type is actually not a TLV, but
+		 * a single byte that can be skipped.
+		 */
+		if (type == BRCMF_FWS_TYPE_FILLER) {
+			signal_data += 1;
+			data_len -= 1;
+			continue;
+		}
+		len = signal_data[1];
+		data = signal_data + 2;
+
+		brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n",
+			  brcmf_fws_get_tlv_name(type), type, len,
+			  brcmf_fws_get_tlv_len(fws, type));
+
+		/* abort parsing when length invalid */
+		if (data_len < len + 2)
+			break;
+
+		if (len < brcmf_fws_get_tlv_len(fws, type))
+			break;
+
+		err = BRCMF_FWS_RET_OK_NOSCHEDULE;
+		switch (type) {
+		case BRCMF_FWS_TYPE_COMP_TXSTATUS:
+			break;
+		case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
+			rd = (struct brcmf_skb_reorder_data *)skb->cb;
+			rd->reorder = data;
+			break;
+		case BRCMF_FWS_TYPE_MACDESC_ADD:
+		case BRCMF_FWS_TYPE_MACDESC_DEL:
+			brcmf_fws_macdesc_indicate(fws, type, data);
+			break;
+		case BRCMF_FWS_TYPE_MAC_OPEN:
+		case BRCMF_FWS_TYPE_MAC_CLOSE:
+			err = brcmf_fws_macdesc_state_indicate(fws, type, data);
+			break;
+		case BRCMF_FWS_TYPE_INTERFACE_OPEN:
+		case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
+			err = brcmf_fws_interface_state_indicate(fws, type,
+								 data);
+			break;
+		case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT:
+		case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET:
+			err = brcmf_fws_request_indicate(fws, type, data);
+			break;
+		case BRCMF_FWS_TYPE_TXSTATUS:
+			brcmf_fws_txstatus_indicate(fws, data);
+			break;
+		case BRCMF_FWS_TYPE_FIFO_CREDITBACK:
+			err = brcmf_fws_fifocreditback_indicate(fws, data);
+			break;
+		case BRCMF_FWS_TYPE_RSSI:
+			brcmf_fws_rssi_indicate(fws, *data);
+			break;
+		case BRCMF_FWS_TYPE_TRANS_ID:
+			brcmf_fws_dbg_seqnum_check(fws, data);
+			break;
+		case BRCMF_FWS_TYPE_PKTTAG:
+		case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP:
+		default:
+			fws->stats.tlv_invalid_type++;
+			break;
+		}
+		if (err == BRCMF_FWS_RET_OK_SCHEDULE)
+			status = BRCMF_FWS_RET_OK_SCHEDULE;
+		signal_data += len + 2;
+		data_len -= len + 2;
+	}
+
+	if (data_len != 0)
+		fws->stats.tlv_parse_failed++;
+
+	if (status == BRCMF_FWS_RET_OK_SCHEDULE)
+		brcmf_fws_schedule_deq(fws);
+
+	/* signalling processing result does
+	 * not affect the actual ethernet packet.
+	 */
+	skb_pull(skb, siglen);
+
+	/* this may be a signal-only packet
+	 */
+	if (skb->len == 0)
+		fws->stats.header_only_pkt++;
+}
+
+static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
+				   struct sk_buff *p)
+{
+	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
+	struct brcmf_fws_mac_descriptor *entry = skcb->mac;
+	u8 flags;
+
+	if (skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED)
+		brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
+	flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
+	if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
+		/*
+		 * Indicate that this packet is being sent in response to an
+		 * explicit request from the firmware side.
+		 */
+		flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
+	}
+	brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
+	return brcmf_fws_hdrpush(fws, p);
+}
+
+static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
+				   struct sk_buff *skb, int fifo)
+{
+	struct brcmf_fws_mac_descriptor *entry;
+	struct sk_buff *pktout;
+	int qidx, hslot;
+	int rc = 0;
+
+	entry = brcmf_skbcb(skb)->mac;
+	if (entry->occupied) {
+		qidx = 2 * fifo;
+		if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
+			qidx++;
+
+		pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb);
+		if (pktout == NULL) {
+			brcmf_err("%s queue %d full\n", entry->name, qidx);
+			rc = -ENOSPC;
+		}
+	} else {
+		brcmf_err("%s entry removed\n", entry->name);
+		rc = -ENOENT;
+	}
+
+	if (rc) {
+		fws->stats.rollback_failed++;
+		hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+		brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
+				      hslot, 0, 0);
+	} else {
+		fws->stats.rollback_success++;
+		brcmf_fws_return_credits(fws, fifo, 1);
+		brcmf_fws_macdesc_return_req_credit(skb);
+	}
+}
+
+static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
+{
+	int lender_ac;
+
+	if (time_after(fws->borrow_defer_timestamp, jiffies)) {
+		fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
+		return -ENAVAIL;
+	}
+
+	for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
+		if (fws->fifo_credit[lender_ac]) {
+			fws->credits_borrowed[lender_ac]++;
+			fws->fifo_credit[lender_ac]--;
+			if (fws->fifo_credit[lender_ac] == 0)
+				fws->fifo_credit_map &= ~(1 << lender_ac);
+			fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE);
+			brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac);
+			return 0;
+		}
+	}
+	fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
+	return -ENAVAIL;
+}
+
+static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
+				struct sk_buff *skb)
+{
+	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
+	struct brcmf_fws_mac_descriptor *entry;
+	int rc;
+	u8 ifidx;
+	u8 data_offset;
+
+	entry = skcb->mac;
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+
+	data_offset = brcmf_fws_precommit_skb(fws, fifo, skb);
+	entry->transit_count++;
+	if (entry->suppressed)
+		entry->suppr_transit_count++;
+	ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
+	brcmf_fws_unlock(fws);
+	rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb);
+	brcmf_fws_lock(fws);
+	brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
+		  skcb->if_flags, skcb->htod, rc);
+	if (rc < 0) {
+		entry->transit_count--;
+		if (entry->suppressed)
+			entry->suppr_transit_count--;
+		(void)brcmf_proto_hdrpull(fws->drvr, false, skb, NULL);
+		goto rollback;
+	}
+
+	fws->stats.pkt2bus++;
+	fws->stats.send_pkts[fifo]++;
+	if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
+		fws->stats.requested_sent[fifo]++;
+
+	return rc;
+
+rollback:
+	brcmf_fws_rollback_toq(fws, skb, fifo);
+	return rc;
+}
+
+static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p,
+				  int fifo)
+{
+	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
+	int rc, hslot;
+
+	skcb->htod = 0;
+	skcb->htod_seq = 0;
+	hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
+	brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
+	brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]);
+	brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
+	rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
+	if (!rc)
+		skcb->mac->seq[fifo]++;
+	else
+		fws->stats.generic_error++;
+	return rc;
+}
+
+int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_fws_info *fws = drvr->fws;
+	struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
+	struct ethhdr *eh = (struct ethhdr *)(skb->data);
+	int fifo = BRCMF_FWS_FIFO_BCMC;
+	bool multicast = is_multicast_ether_addr(eh->h_dest);
+	int rc = 0;
+
+	brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
+	/* determine the priority */
+	if (!skb->priority)
+		skb->priority = cfg80211_classify8021d(skb, NULL);
+
+	drvr->tx_multicast += !!multicast;
+
+	if (fws->avoid_queueing) {
+		rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
+		if (rc < 0)
+			brcmf_txfinalize(ifp, skb, false);
+		return rc;
+	}
+
+	/* set control buffer information */
+	skcb->if_flags = 0;
+	skcb->state = BRCMF_FWS_SKBSTATE_NEW;
+	brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
+	if (!multicast)
+		fifo = brcmf_fws_prio2fifo[skb->priority];
+
+	brcmf_fws_lock(fws);
+	if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
+		fws->borrow_defer_timestamp = jiffies +
+					      BRCMF_FWS_BORROW_DEFER_PERIOD;
+
+	skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest);
+	brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name,
+		  eh->h_dest, multicast, fifo);
+	if (!brcmf_fws_assign_htod(fws, skb, fifo)) {
+		brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
+		brcmf_fws_schedule_deq(fws);
+	} else {
+		brcmf_err("drop skb: no hanger slot\n");
+		brcmf_txfinalize(ifp, skb, false);
+		rc = -ENOMEM;
+	}
+	brcmf_fws_unlock(fws);
+
+	return rc;
+}
+
+void brcmf_fws_reset_interface(struct brcmf_if *ifp)
+{
+	struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
+
+	brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
+	if (!entry)
+		return;
+
+	brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
+}
+
+void brcmf_fws_add_interface(struct brcmf_if *ifp)
+{
+	struct brcmf_fws_info *fws = ifp->drvr->fws;
+	struct brcmf_fws_mac_descriptor *entry;
+
+	if (!ifp->ndev)
+		return;
+
+	entry = &fws->desc.iface[ifp->ifidx];
+	ifp->fws_desc = entry;
+	brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
+	brcmf_fws_macdesc_set_name(fws, entry);
+	brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
+			BRCMF_FWS_PSQ_LEN);
+	brcmf_dbg(TRACE, "added %s\n", entry->name);
+}
+
+void brcmf_fws_del_interface(struct brcmf_if *ifp)
+{
+	struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
+
+	if (!entry)
+		return;
+
+	brcmf_fws_lock(ifp->drvr->fws);
+	ifp->fws_desc = NULL;
+	brcmf_dbg(TRACE, "deleting %s\n", entry->name);
+	brcmf_fws_macdesc_deinit(entry);
+	brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
+	brcmf_fws_unlock(ifp->drvr->fws);
+}
+
+static void brcmf_fws_dequeue_worker(struct work_struct *worker)
+{
+	struct brcmf_fws_info *fws;
+	struct brcmf_pub *drvr;
+	struct sk_buff *skb;
+	int fifo;
+	u32 hslot;
+	u32 ifidx;
+	int ret;
+
+	fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
+	drvr = fws->drvr;
+
+	brcmf_fws_lock(fws);
+	for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
+	     fifo--) {
+		if (!brcmf_fws_fc_active(fws)) {
+			while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
+				hslot = brcmf_skb_htod_tag_get_field(skb,
+								     HSLOT);
+				brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
+							&skb, true);
+				ifidx = brcmf_skb_if_flags_get_field(skb,
+								     INDEX);
+				/* Use proto layer to send data frame */
+				brcmf_fws_unlock(fws);
+				ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
+				brcmf_fws_lock(fws);
+				if (ret < 0)
+					brcmf_txfinalize(brcmf_get_ifp(drvr,
+								       ifidx),
+							 skb, false);
+				if (fws->bus_flow_blocked)
+					break;
+			}
+			continue;
+		}
+		while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
+		       (fifo == BRCMF_FWS_FIFO_BCMC))) {
+			skb = brcmf_fws_deq(fws, fifo);
+			if (!skb)
+				break;
+			fws->fifo_credit[fifo]--;
+			if (brcmf_fws_commit_skb(fws, fifo, skb))
+				break;
+			if (fws->bus_flow_blocked)
+				break;
+		}
+		if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
+		    (fws->fifo_credit[fifo] == 0) &&
+		    (!fws->bus_flow_blocked)) {
+			while (brcmf_fws_borrow_credit(fws) == 0) {
+				skb = brcmf_fws_deq(fws, fifo);
+				if (!skb) {
+					brcmf_fws_return_credits(fws, fifo, 1);
+					break;
+				}
+				if (brcmf_fws_commit_skb(fws, fifo, skb))
+					break;
+				if (fws->bus_flow_blocked)
+					break;
+			}
+		}
+	}
+	brcmf_fws_unlock(fws);
+}
+
+#ifdef DEBUG
+static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats;
+
+	seq_printf(seq,
+		   "header_pulls:      %u\n"
+		   "header_only_pkt:   %u\n"
+		   "tlv_parse_failed:  %u\n"
+		   "tlv_invalid_type:  %u\n"
+		   "mac_update_fails:  %u\n"
+		   "ps_update_fails:   %u\n"
+		   "if_update_fails:   %u\n"
+		   "pkt2bus:           %u\n"
+		   "generic_error:     %u\n"
+		   "rollback_success:  %u\n"
+		   "rollback_failed:   %u\n"
+		   "delayq_full:       %u\n"
+		   "supprq_full:       %u\n"
+		   "txs_indicate:      %u\n"
+		   "txs_discard:       %u\n"
+		   "txs_suppr_core:    %u\n"
+		   "txs_suppr_ps:      %u\n"
+		   "txs_tossed:        %u\n"
+		   "txs_host_tossed:   %u\n"
+		   "bus_flow_block:    %u\n"
+		   "fws_flow_block:    %u\n"
+		   "send_pkts:         BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
+		   "requested_sent:    BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
+		   fwstats->header_pulls,
+		   fwstats->header_only_pkt,
+		   fwstats->tlv_parse_failed,
+		   fwstats->tlv_invalid_type,
+		   fwstats->mac_update_failed,
+		   fwstats->mac_ps_update_failed,
+		   fwstats->if_update_failed,
+		   fwstats->pkt2bus,
+		   fwstats->generic_error,
+		   fwstats->rollback_success,
+		   fwstats->rollback_failed,
+		   fwstats->delayq_full_error,
+		   fwstats->supprq_full_error,
+		   fwstats->txs_indicate,
+		   fwstats->txs_discard,
+		   fwstats->txs_supp_core,
+		   fwstats->txs_supp_ps,
+		   fwstats->txs_tossed,
+		   fwstats->txs_host_tossed,
+		   fwstats->bus_flow_block,
+		   fwstats->fws_flow_block,
+		   fwstats->send_pkts[0], fwstats->send_pkts[1],
+		   fwstats->send_pkts[2], fwstats->send_pkts[3],
+		   fwstats->send_pkts[4],
+		   fwstats->requested_sent[0],
+		   fwstats->requested_sent[1],
+		   fwstats->requested_sent[2],
+		   fwstats->requested_sent[3],
+		   fwstats->requested_sent[4]);
+
+	return 0;
+}
+#else
+static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif
+
+int brcmf_fws_init(struct brcmf_pub *drvr)
+{
+	struct brcmf_fws_info *fws;
+	struct brcmf_if *ifp;
+	u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
+	int rc;
+	u32 mode;
+
+	drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
+	if (!drvr->fws) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	fws = drvr->fws;
+
+	spin_lock_init(&fws->spinlock);
+
+	/* set linkage back */
+	fws->drvr = drvr;
+	fws->fcmode = fcmode;
+
+	if ((drvr->bus_if->always_use_fws_queue == false) &&
+	    (fcmode == BRCMF_FWS_FCMODE_NONE)) {
+		fws->avoid_queueing = true;
+		brcmf_dbg(INFO, "FWS queueing will be avoided\n");
+		return 0;
+	}
+
+	fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
+	if (fws->fws_wq == NULL) {
+		brcmf_err("workqueue creation failed\n");
+		rc = -EBADF;
+		goto fail;
+	}
+	INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
+
+	/* enable firmware signalling if fcmode active */
+	if (fws->fcmode != BRCMF_FWS_FCMODE_NONE)
+		tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
+		       BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
+		       BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+		       BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE;
+
+	rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
+				 brcmf_fws_notify_credit_map);
+	if (rc < 0) {
+		brcmf_err("register credit map handler failed\n");
+		goto fail;
+	}
+	rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT,
+				 brcmf_fws_notify_bcmc_credit_support);
+	if (rc < 0) {
+		brcmf_err("register bcmc credit handler failed\n");
+		brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
+		goto fail;
+	}
+
+	/* Setting the iovar may fail if feature is unsupported
+	 * so leave the rc as is so driver initialization can
+	 * continue. Set mode back to none indicating not enabled.
+	 */
+	fws->fw_signals = true;
+	ifp = brcmf_get_ifp(drvr, 0);
+	if (brcmf_fil_iovar_int_set(ifp, "tlv", tlv)) {
+		brcmf_err("failed to set bdcv2 tlv signaling\n");
+		fws->fcmode = BRCMF_FWS_FCMODE_NONE;
+		fws->fw_signals = false;
+	}
+
+	if (brcmf_fil_iovar_int_set(ifp, "ampdu_hostreorder", 1))
+		brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
+
+	/* Enable seq number reuse, if supported */
+	if (brcmf_fil_iovar_int_get(ifp, "wlfc_mode", &mode) == 0) {
+		if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) {
+			mode = 0;
+			BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1);
+			if (brcmf_fil_iovar_int_set(ifp,
+						    "wlfc_mode", mode) == 0) {
+				BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1);
+			}
+		}
+	}
+
+	brcmf_fws_hanger_init(&fws->hanger);
+	brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
+	brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
+	brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
+			BRCMF_FWS_PSQ_LEN);
+
+	/* create debugfs file for statistics */
+	brcmf_debugfs_add_entry(drvr, "fws_stats",
+				brcmf_debugfs_fws_stats_read);
+
+	brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
+		  fws->fw_signals ? "enabled" : "disabled", tlv);
+	return 0;
+
+fail:
+	brcmf_fws_deinit(drvr);
+	return rc;
+}
+
+void brcmf_fws_deinit(struct brcmf_pub *drvr)
+{
+	struct brcmf_fws_info *fws = drvr->fws;
+
+	if (!fws)
+		return;
+
+	if (drvr->fws->fws_wq)
+		destroy_workqueue(drvr->fws->fws_wq);
+
+	/* cleanup */
+	brcmf_fws_lock(fws);
+	brcmf_fws_cleanup(fws, -1);
+	drvr->fws = NULL;
+	brcmf_fws_unlock(fws);
+
+	/* free top structure */
+	kfree(fws);
+}
+
+bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
+{
+	if (!fws->creditmap_received)
+		return false;
+
+	return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
+}
+
+void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
+{
+	u32 hslot;
+
+	if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
+		brcmu_pkt_buf_free_skb(skb);
+		return;
+	}
+	brcmf_fws_lock(fws);
+	hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+	brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0);
+	brcmf_fws_unlock(fws);
+}
+
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
+{
+	struct brcmf_fws_info *fws = drvr->fws;
+
+	fws->bus_flow_blocked = flow_blocked;
+	if (!flow_blocked)
+		brcmf_fws_schedule_deq(fws);
+	else
+		fws->stats.bus_flow_block++;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
new file mode 100644
index 0000000..a36bac1
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWSIGNAL_H_
+#define FWSIGNAL_H_
+
+int brcmf_fws_init(struct brcmf_pub *drvr);
+void brcmf_fws_deinit(struct brcmf_pub *drvr);
+bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
+void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
+int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
+
+void brcmf_fws_reset_interface(struct brcmf_if *ifp);
+void brcmf_fws_add_interface(struct brcmf_if *ifp);
+void brcmf_fws_del_interface(struct brcmf_if *ifp);
+void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
+
+#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
new file mode 100644
index 0000000..44e618f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -0,0 +1,1561 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*******************************************************************************
+ * Communicates with the dongle by using dcmd codes.
+ * For certain dcmd codes, the dongle interprets string data from the host.
+ ******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "core.h"
+#include "debug.h"
+#include "proto.h"
+#include "msgbuf.h"
+#include "commonring.h"
+#include "flowring.h"
+#include "bus.h"
+#include "tracepoint.h"
+
+
+#define MSGBUF_IOCTL_RESP_TIMEOUT		2000
+
+#define MSGBUF_TYPE_GEN_STATUS			0x1
+#define MSGBUF_TYPE_RING_STATUS			0x2
+#define MSGBUF_TYPE_FLOW_RING_CREATE		0x3
+#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT	0x4
+#define MSGBUF_TYPE_FLOW_RING_DELETE		0x5
+#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT	0x6
+#define MSGBUF_TYPE_FLOW_RING_FLUSH		0x7
+#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT	0x8
+#define MSGBUF_TYPE_IOCTLPTR_REQ		0x9
+#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK		0xA
+#define MSGBUF_TYPE_IOCTLRESP_BUF_POST		0xB
+#define MSGBUF_TYPE_IOCTL_CMPLT			0xC
+#define MSGBUF_TYPE_EVENT_BUF_POST		0xD
+#define MSGBUF_TYPE_WL_EVENT			0xE
+#define MSGBUF_TYPE_TX_POST			0xF
+#define MSGBUF_TYPE_TX_STATUS			0x10
+#define MSGBUF_TYPE_RXBUF_POST			0x11
+#define MSGBUF_TYPE_RX_CMPLT			0x12
+#define MSGBUF_TYPE_LPBK_DMAXFER		0x13
+#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT		0x14
+
+#define NR_TX_PKTIDS				2048
+#define NR_RX_PKTIDS				1024
+
+#define BRCMF_IOCTL_REQ_PKTID			0xFFFE
+
+#define BRCMF_MSGBUF_MAX_PKT_SIZE		2048
+#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD	32
+#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST	8
+#define BRCMF_MSGBUF_MAX_EVENTBUF_POST		8
+
+#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3	0x01
+#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT	5
+
+#define BRCMF_MSGBUF_TX_FLUSH_CNT1		32
+#define BRCMF_MSGBUF_TX_FLUSH_CNT2		96
+
+#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS	96
+#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS	32
+#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS		48
+
+
+struct msgbuf_common_hdr {
+	u8				msgtype;
+	u8				ifidx;
+	u8				flags;
+	u8				rsvd0;
+	__le32				request_id;
+};
+
+struct msgbuf_buf_addr {
+	__le32				low_addr;
+	__le32				high_addr;
+};
+
+struct msgbuf_ioctl_req_hdr {
+	struct msgbuf_common_hdr	msg;
+	__le32				cmd;
+	__le16				trans_id;
+	__le16				input_buf_len;
+	__le16				output_buf_len;
+	__le16				rsvd0[3];
+	struct msgbuf_buf_addr		req_buf_addr;
+	__le32				rsvd1[2];
+};
+
+struct msgbuf_tx_msghdr {
+	struct msgbuf_common_hdr	msg;
+	u8				txhdr[ETH_HLEN];
+	u8				flags;
+	u8				seg_cnt;
+	struct msgbuf_buf_addr		metadata_buf_addr;
+	struct msgbuf_buf_addr		data_buf_addr;
+	__le16				metadata_buf_len;
+	__le16				data_len;
+	__le32				rsvd0;
+};
+
+struct msgbuf_rx_bufpost {
+	struct msgbuf_common_hdr	msg;
+	__le16				metadata_buf_len;
+	__le16				data_buf_len;
+	__le32				rsvd0;
+	struct msgbuf_buf_addr		metadata_buf_addr;
+	struct msgbuf_buf_addr		data_buf_addr;
+};
+
+struct msgbuf_rx_ioctl_resp_or_event {
+	struct msgbuf_common_hdr	msg;
+	__le16				host_buf_len;
+	__le16				rsvd0[3];
+	struct msgbuf_buf_addr		host_buf_addr;
+	__le32				rsvd1[4];
+};
+
+struct msgbuf_completion_hdr {
+	__le16				status;
+	__le16				flow_ring_id;
+};
+
+struct msgbuf_rx_event {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le16				event_data_len;
+	__le16				seqnum;
+	__le16				rsvd0[4];
+};
+
+struct msgbuf_ioctl_resp_hdr {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le16				resp_len;
+	__le16				trans_id;
+	__le32				cmd;
+	__le32				rsvd0;
+};
+
+struct msgbuf_tx_status {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le16				metadata_len;
+	__le16				tx_status;
+};
+
+struct msgbuf_rx_complete {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le16				metadata_len;
+	__le16				data_len;
+	__le16				data_offset;
+	__le16				flags;
+	__le32				rx_status_0;
+	__le32				rx_status_1;
+	__le32				rsvd0;
+};
+
+struct msgbuf_tx_flowring_create_req {
+	struct msgbuf_common_hdr	msg;
+	u8				da[ETH_ALEN];
+	u8				sa[ETH_ALEN];
+	u8				tid;
+	u8				if_flags;
+	__le16				flow_ring_id;
+	u8				tc;
+	u8				priority;
+	__le16				int_vector;
+	__le16				max_items;
+	__le16				len_item;
+	struct msgbuf_buf_addr		flow_ring_addr;
+};
+
+struct msgbuf_tx_flowring_delete_req {
+	struct msgbuf_common_hdr	msg;
+	__le16				flow_ring_id;
+	__le16				reason;
+	__le32				rsvd0[7];
+};
+
+struct msgbuf_flowring_create_resp {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le32				rsvd0[3];
+};
+
+struct msgbuf_flowring_delete_resp {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le32				rsvd0[3];
+};
+
+struct msgbuf_flowring_flush_resp {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le32				rsvd0[3];
+};
+
+struct brcmf_msgbuf_work_item {
+	struct list_head queue;
+	u32 flowid;
+	int ifidx;
+	u8 sa[ETH_ALEN];
+	u8 da[ETH_ALEN];
+};
+
+struct brcmf_msgbuf {
+	struct brcmf_pub *drvr;
+
+	struct brcmf_commonring **commonrings;
+	struct brcmf_commonring **flowrings;
+	dma_addr_t *flowring_dma_handle;
+	u16 nrof_flowrings;
+
+	u16 rx_dataoffset;
+	u32 max_rxbufpost;
+	u16 rx_metadata_offset;
+	u32 rxbufpost;
+
+	u32 max_ioctlrespbuf;
+	u32 cur_ioctlrespbuf;
+	u32 max_eventbuf;
+	u32 cur_eventbuf;
+
+	void *ioctbuf;
+	dma_addr_t ioctbuf_handle;
+	u32 ioctbuf_phys_hi;
+	u32 ioctbuf_phys_lo;
+	int ioctl_resp_status;
+	u32 ioctl_resp_ret_len;
+	u32 ioctl_resp_pktid;
+
+	u16 data_seq_no;
+	u16 ioctl_seq_no;
+	u32 reqid;
+	wait_queue_head_t ioctl_resp_wait;
+	bool ctl_completed;
+
+	struct brcmf_msgbuf_pktids *tx_pktids;
+	struct brcmf_msgbuf_pktids *rx_pktids;
+	struct brcmf_flowring *flow;
+
+	struct workqueue_struct *txflow_wq;
+	struct work_struct txflow_work;
+	unsigned long *flow_map;
+	unsigned long *txstatus_done_map;
+
+	struct work_struct flowring_work;
+	spinlock_t flowring_work_lock;
+	struct list_head work_queue;
+};
+
+struct brcmf_msgbuf_pktid {
+	atomic_t  allocated;
+	u16 data_offset;
+	struct sk_buff *skb;
+	dma_addr_t physaddr;
+};
+
+struct brcmf_msgbuf_pktids {
+	u32 array_size;
+	u32 last_allocated_idx;
+	enum dma_data_direction direction;
+	struct brcmf_msgbuf_pktid *array;
+};
+
+static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
+
+
+static struct brcmf_msgbuf_pktids *
+brcmf_msgbuf_init_pktids(u32 nr_array_entries,
+			 enum dma_data_direction direction)
+{
+	struct brcmf_msgbuf_pktid *array;
+	struct brcmf_msgbuf_pktids *pktids;
+
+	array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
+	if (!array)
+		return NULL;
+
+	pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
+	if (!pktids) {
+		kfree(array);
+		return NULL;
+	}
+	pktids->array = array;
+	pktids->array_size = nr_array_entries;
+
+	return pktids;
+}
+
+
+static int
+brcmf_msgbuf_alloc_pktid(struct device *dev,
+			 struct brcmf_msgbuf_pktids *pktids,
+			 struct sk_buff *skb, u16 data_offset,
+			 dma_addr_t *physaddr, u32 *idx)
+{
+	struct brcmf_msgbuf_pktid *array;
+	u32 count;
+
+	array = pktids->array;
+
+	*physaddr = dma_map_single(dev, skb->data + data_offset,
+				   skb->len - data_offset, pktids->direction);
+
+	if (dma_mapping_error(dev, *physaddr)) {
+		brcmf_err("dma_map_single failed !!\n");
+		return -ENOMEM;
+	}
+
+	*idx = pktids->last_allocated_idx;
+
+	count = 0;
+	do {
+		(*idx)++;
+		if (*idx == pktids->array_size)
+			*idx = 0;
+		if (array[*idx].allocated.counter == 0)
+			if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
+				break;
+		count++;
+	} while (count < pktids->array_size);
+
+	if (count == pktids->array_size)
+		return -ENOMEM;
+
+	array[*idx].data_offset = data_offset;
+	array[*idx].physaddr = *physaddr;
+	array[*idx].skb = skb;
+
+	pktids->last_allocated_idx = *idx;
+
+	return 0;
+}
+
+
+static struct sk_buff *
+brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
+		       u32 idx)
+{
+	struct brcmf_msgbuf_pktid *pktid;
+	struct sk_buff *skb;
+
+	if (idx >= pktids->array_size) {
+		brcmf_err("Invalid packet id %d (max %d)\n", idx,
+			  pktids->array_size);
+		return NULL;
+	}
+	if (pktids->array[idx].allocated.counter) {
+		pktid = &pktids->array[idx];
+		dma_unmap_single(dev, pktid->physaddr,
+				 pktid->skb->len - pktid->data_offset,
+				 pktids->direction);
+		skb = pktid->skb;
+		pktid->allocated.counter = 0;
+		return skb;
+	} else {
+		brcmf_err("Invalid packet id %d (not in use)\n", idx);
+	}
+
+	return NULL;
+}
+
+
+static void
+brcmf_msgbuf_release_array(struct device *dev,
+			   struct brcmf_msgbuf_pktids *pktids)
+{
+	struct brcmf_msgbuf_pktid *array;
+	struct brcmf_msgbuf_pktid *pktid;
+	u32 count;
+
+	array = pktids->array;
+	count = 0;
+	do {
+		if (array[count].allocated.counter) {
+			pktid = &array[count];
+			dma_unmap_single(dev, pktid->physaddr,
+					 pktid->skb->len - pktid->data_offset,
+					 pktids->direction);
+			brcmu_pkt_buf_free_skb(pktid->skb);
+		}
+		count++;
+	} while (count < pktids->array_size);
+
+	kfree(array);
+	kfree(pktids);
+}
+
+
+static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
+{
+	if (msgbuf->rx_pktids)
+		brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
+					   msgbuf->rx_pktids);
+	if (msgbuf->tx_pktids)
+		brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
+					   msgbuf->tx_pktids);
+}
+
+
+static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
+				 uint cmd, void *buf, uint len)
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_commonring *commonring;
+	struct msgbuf_ioctl_req_hdr *request;
+	u16 buf_len;
+	void *ret_ptr;
+	int err;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	brcmf_commonring_lock(commonring);
+	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+	if (!ret_ptr) {
+		brcmf_err("Failed to reserve space in commonring\n");
+		brcmf_commonring_unlock(commonring);
+		return -ENOMEM;
+	}
+
+	msgbuf->reqid++;
+
+	request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
+	request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
+	request->msg.ifidx = (u8)ifidx;
+	request->msg.flags = 0;
+	request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
+	request->cmd = cpu_to_le32(cmd);
+	request->output_buf_len = cpu_to_le16(len);
+	request->trans_id = cpu_to_le16(msgbuf->reqid);
+
+	buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
+	request->input_buf_len = cpu_to_le16(buf_len);
+	request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
+	request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
+	if (buf)
+		memcpy(msgbuf->ioctbuf, buf, buf_len);
+	else
+		memset(msgbuf->ioctbuf, 0, buf_len);
+
+	err = brcmf_commonring_write_complete(commonring);
+	brcmf_commonring_unlock(commonring);
+
+	return err;
+}
+
+
+static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
+{
+	return wait_event_timeout(msgbuf->ioctl_resp_wait,
+				  msgbuf->ctl_completed,
+				  msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT));
+}
+
+
+static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
+{
+	msgbuf->ctl_completed = true;
+	if (waitqueue_active(&msgbuf->ioctl_resp_wait))
+		wake_up(&msgbuf->ioctl_resp_wait);
+}
+
+
+static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+				   uint cmd, void *buf, uint len)
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct sk_buff *skb = NULL;
+	int timeout;
+	int err;
+
+	brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
+	msgbuf->ctl_completed = false;
+	err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
+	if (err)
+		return err;
+
+	timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
+	if (!timeout) {
+		brcmf_err("Timeout on response for query command\n");
+		return -EIO;
+	}
+
+	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+				     msgbuf->rx_pktids,
+				     msgbuf->ioctl_resp_pktid);
+	if (msgbuf->ioctl_resp_ret_len != 0) {
+		if (!skb)
+			return -EBADF;
+
+		memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
+				       len : msgbuf->ioctl_resp_ret_len);
+	}
+	brcmu_pkt_buf_free_skb(skb);
+
+	return msgbuf->ioctl_resp_status;
+}
+
+
+static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+				 uint cmd, void *buf, uint len)
+{
+	return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
+}
+
+
+static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+				struct sk_buff *skb, struct brcmf_if **ifp)
+{
+	return -ENODEV;
+}
+
+
+static void
+brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
+{
+	u32 dma_sz;
+	void *dma_buf;
+
+	brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
+
+	dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
+	dma_buf = msgbuf->flowrings[flowid]->buf_addr;
+	dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
+			  msgbuf->flowring_dma_handle[flowid]);
+
+	brcmf_flowring_delete(msgbuf->flow, flowid);
+}
+
+
+static struct brcmf_msgbuf_work_item *
+brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
+{
+	struct brcmf_msgbuf_work_item *work = NULL;
+	ulong flags;
+
+	spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
+	if (!list_empty(&msgbuf->work_queue)) {
+		work = list_first_entry(&msgbuf->work_queue,
+					struct brcmf_msgbuf_work_item, queue);
+		list_del(&work->queue);
+	}
+	spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
+
+	return work;
+}
+
+
+static u32
+brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
+				    struct brcmf_msgbuf_work_item *work)
+{
+	struct msgbuf_tx_flowring_create_req *create;
+	struct brcmf_commonring *commonring;
+	void *ret_ptr;
+	u32 flowid;
+	void *dma_buf;
+	u32 dma_sz;
+	u64 address;
+	int err;
+
+	flowid = work->flowid;
+	dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
+	dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
+				     &msgbuf->flowring_dma_handle[flowid],
+				     GFP_KERNEL);
+	if (!dma_buf) {
+		brcmf_err("dma_alloc_coherent failed\n");
+		brcmf_flowring_delete(msgbuf->flow, flowid);
+		return BRCMF_FLOWRING_INVALID_ID;
+	}
+
+	brcmf_commonring_config(msgbuf->flowrings[flowid],
+				BRCMF_H2D_TXFLOWRING_MAX_ITEM,
+				BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	brcmf_commonring_lock(commonring);
+	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+	if (!ret_ptr) {
+		brcmf_err("Failed to reserve space in commonring\n");
+		brcmf_commonring_unlock(commonring);
+		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+		return BRCMF_FLOWRING_INVALID_ID;
+	}
+
+	create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
+	create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
+	create->msg.ifidx = work->ifidx;
+	create->msg.request_id = 0;
+	create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
+	create->flow_ring_id = cpu_to_le16(flowid +
+					   BRCMF_NROF_H2D_COMMON_MSGRINGS);
+	memcpy(create->sa, work->sa, ETH_ALEN);
+	memcpy(create->da, work->da, ETH_ALEN);
+	address = (u64)msgbuf->flowring_dma_handle[flowid];
+	create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
+	create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
+	create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+	create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
+
+	brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
+		  flowid, work->da, create->tid, work->ifidx);
+
+	err = brcmf_commonring_write_complete(commonring);
+	brcmf_commonring_unlock(commonring);
+	if (err) {
+		brcmf_err("Failed to write commonring\n");
+		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+		return BRCMF_FLOWRING_INVALID_ID;
+	}
+
+	return flowid;
+}
+
+
+static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
+{
+	struct brcmf_msgbuf *msgbuf;
+	struct brcmf_msgbuf_work_item *create;
+
+	msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
+
+	while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
+		brcmf_msgbuf_flowring_create_worker(msgbuf, create);
+		kfree(create);
+	}
+}
+
+
+static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
+					struct sk_buff *skb)
+{
+	struct brcmf_msgbuf_work_item *create;
+	struct ethhdr *eh = (struct ethhdr *)(skb->data);
+	u32 flowid;
+	ulong flags;
+
+	create = kzalloc(sizeof(*create), GFP_ATOMIC);
+	if (create == NULL)
+		return BRCMF_FLOWRING_INVALID_ID;
+
+	flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
+				       skb->priority, ifidx);
+	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
+		kfree(create);
+		return flowid;
+	}
+
+	create->flowid = flowid;
+	create->ifidx = ifidx;
+	memcpy(create->sa, eh->h_source, ETH_ALEN);
+	memcpy(create->da, eh->h_dest, ETH_ALEN);
+
+	spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
+	list_add_tail(&create->queue, &msgbuf->work_queue);
+	spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
+	schedule_work(&msgbuf->flowring_work);
+
+	return flowid;
+}
+
+
+static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
+{
+	struct brcmf_flowring *flow = msgbuf->flow;
+	struct brcmf_commonring *commonring;
+	void *ret_ptr;
+	u32 count;
+	struct sk_buff *skb;
+	dma_addr_t physaddr;
+	u32 pktid;
+	struct msgbuf_tx_msghdr *tx_msghdr;
+	u64 address;
+
+	commonring = msgbuf->flowrings[flowid];
+	if (!brcmf_commonring_write_available(commonring))
+		return;
+
+	brcmf_commonring_lock(commonring);
+
+	count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
+	while (brcmf_flowring_qlen(flow, flowid)) {
+		skb = brcmf_flowring_dequeue(flow, flowid);
+		if (skb == NULL) {
+			brcmf_err("No SKB, but qlen %d\n",
+				  brcmf_flowring_qlen(flow, flowid));
+			break;
+		}
+		skb_orphan(skb);
+		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
+					     msgbuf->tx_pktids, skb, ETH_HLEN,
+					     &physaddr, &pktid)) {
+			brcmf_flowring_reinsert(flow, flowid, skb);
+			brcmf_err("No PKTID available !!\n");
+			break;
+		}
+		ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+		if (!ret_ptr) {
+			brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+					       msgbuf->tx_pktids, pktid);
+			brcmf_flowring_reinsert(flow, flowid, skb);
+			break;
+		}
+		count++;
+
+		tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
+
+		tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
+		tx_msghdr->msg.request_id = cpu_to_le32(pktid);
+		tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+		tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
+		tx_msghdr->flags |= (skb->priority & 0x07) <<
+				    BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
+		tx_msghdr->seg_cnt = 1;
+		memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
+		tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
+		address = (u64)physaddr;
+		tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
+		tx_msghdr->data_buf_addr.low_addr =
+			cpu_to_le32(address & 0xffffffff);
+		tx_msghdr->metadata_buf_len = 0;
+		tx_msghdr->metadata_buf_addr.high_addr = 0;
+		tx_msghdr->metadata_buf_addr.low_addr = 0;
+		atomic_inc(&commonring->outstanding_tx);
+		if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
+			brcmf_commonring_write_complete(commonring);
+			count = 0;
+		}
+	}
+	if (count)
+		brcmf_commonring_write_complete(commonring);
+	brcmf_commonring_unlock(commonring);
+}
+
+
+static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
+{
+	struct brcmf_msgbuf *msgbuf;
+	u32 flowid;
+
+	msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
+	for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
+		clear_bit(flowid, msgbuf->flow_map);
+		brcmf_msgbuf_txflow(msgbuf, flowid);
+	}
+}
+
+
+static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
+					bool force)
+{
+	struct brcmf_commonring *commonring;
+
+	set_bit(flowid, msgbuf->flow_map);
+	commonring = msgbuf->flowrings[flowid];
+	if ((force) || (atomic_read(&commonring->outstanding_tx) <
+			BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
+		queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
+
+	return 0;
+}
+
+
+static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
+			       u8 offset, struct sk_buff *skb)
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_flowring *flow = msgbuf->flow;
+	struct ethhdr *eh = (struct ethhdr *)(skb->data);
+	u32 flowid;
+	u32 queue_count;
+	bool force;
+
+	flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
+	if (flowid == BRCMF_FLOWRING_INVALID_ID) {
+		flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
+		if (flowid == BRCMF_FLOWRING_INVALID_ID)
+			return -ENOMEM;
+	}
+	queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
+	force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
+	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
+
+	return 0;
+}
+
+
+static void
+brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
+				 enum proto_addr_mode addr_mode)
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+	brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
+}
+
+
+static void
+brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+	brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
+}
+
+
+static void
+brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+
+	brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
+}
+
+
+static void
+brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+	struct msgbuf_ioctl_resp_hdr *ioctl_resp;
+
+	ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
+
+	msgbuf->ioctl_resp_status =
+			(s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
+	msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
+	msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
+
+	brcmf_msgbuf_ioctl_resp_wake(msgbuf);
+
+	if (msgbuf->cur_ioctlrespbuf)
+		msgbuf->cur_ioctlrespbuf--;
+	brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
+}
+
+
+static void
+brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+	struct brcmf_commonring *commonring;
+	struct msgbuf_tx_status *tx_status;
+	u32 idx;
+	struct sk_buff *skb;
+	u16 flowid;
+
+	tx_status = (struct msgbuf_tx_status *)buf;
+	idx = le32_to_cpu(tx_status->msg.request_id);
+	flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
+	flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+				     msgbuf->tx_pktids, idx);
+	if (!skb)
+		return;
+
+	set_bit(flowid, msgbuf->txstatus_done_map);
+	commonring = msgbuf->flowrings[flowid];
+	atomic_dec(&commonring->outstanding_tx);
+
+	brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
+			 skb, true);
+}
+
+
+static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
+{
+	struct brcmf_commonring *commonring;
+	void *ret_ptr;
+	struct sk_buff *skb;
+	u16 alloced;
+	u32 pktlen;
+	dma_addr_t physaddr;
+	struct msgbuf_rx_bufpost *rx_bufpost;
+	u64 address;
+	u32 pktid;
+	u32 i;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+	ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
+							      count,
+							      &alloced);
+	if (!ret_ptr) {
+		brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
+		return 0;
+	}
+
+	for (i = 0; i < alloced; i++) {
+		rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
+		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
+
+		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
+
+		if (skb == NULL) {
+			brcmf_err("Failed to alloc SKB\n");
+			brcmf_commonring_write_cancel(commonring, alloced - i);
+			break;
+		}
+
+		pktlen = skb->len;
+		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
+					     msgbuf->rx_pktids, skb, 0,
+					     &physaddr, &pktid)) {
+			dev_kfree_skb_any(skb);
+			brcmf_err("No PKTID available !!\n");
+			brcmf_commonring_write_cancel(commonring, alloced - i);
+			break;
+		}
+
+		if (msgbuf->rx_metadata_offset) {
+			address = (u64)physaddr;
+			rx_bufpost->metadata_buf_len =
+				cpu_to_le16(msgbuf->rx_metadata_offset);
+			rx_bufpost->metadata_buf_addr.high_addr =
+				cpu_to_le32(address >> 32);
+			rx_bufpost->metadata_buf_addr.low_addr =
+				cpu_to_le32(address & 0xffffffff);
+
+			skb_pull(skb, msgbuf->rx_metadata_offset);
+			pktlen = skb->len;
+			physaddr += msgbuf->rx_metadata_offset;
+		}
+		rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
+		rx_bufpost->msg.request_id = cpu_to_le32(pktid);
+
+		address = (u64)physaddr;
+		rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
+		rx_bufpost->data_buf_addr.high_addr =
+			cpu_to_le32(address >> 32);
+		rx_bufpost->data_buf_addr.low_addr =
+			cpu_to_le32(address & 0xffffffff);
+
+		ret_ptr += brcmf_commonring_len_item(commonring);
+	}
+
+	if (i)
+		brcmf_commonring_write_complete(commonring);
+
+	return i;
+}
+
+
+static void
+brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
+{
+	u32 fillbufs;
+	u32 retcount;
+
+	fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
+
+	while (fillbufs) {
+		retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
+		if (!retcount)
+			break;
+		msgbuf->rxbufpost += retcount;
+		fillbufs -= retcount;
+	}
+}
+
+
+static void
+brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
+{
+	msgbuf->rxbufpost -= rxcnt;
+	if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
+				  BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
+		brcmf_msgbuf_rxbuf_data_fill(msgbuf);
+}
+
+
+static u32
+brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
+			     u32 count)
+{
+	struct brcmf_commonring *commonring;
+	void *ret_ptr;
+	struct sk_buff *skb;
+	u16 alloced;
+	u32 pktlen;
+	dma_addr_t physaddr;
+	struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
+	u64 address;
+	u32 pktid;
+	u32 i;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	brcmf_commonring_lock(commonring);
+	ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
+							      count,
+							      &alloced);
+	if (!ret_ptr) {
+		brcmf_err("Failed to reserve space in commonring\n");
+		brcmf_commonring_unlock(commonring);
+		return 0;
+	}
+
+	for (i = 0; i < alloced; i++) {
+		rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
+		memset(rx_bufpost, 0, sizeof(*rx_bufpost));
+
+		skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
+
+		if (skb == NULL) {
+			brcmf_err("Failed to alloc SKB\n");
+			brcmf_commonring_write_cancel(commonring, alloced - i);
+			break;
+		}
+
+		pktlen = skb->len;
+		if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
+					     msgbuf->rx_pktids, skb, 0,
+					     &physaddr, &pktid)) {
+			dev_kfree_skb_any(skb);
+			brcmf_err("No PKTID available !!\n");
+			brcmf_commonring_write_cancel(commonring, alloced - i);
+			break;
+		}
+		if (event_buf)
+			rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
+		else
+			rx_bufpost->msg.msgtype =
+				MSGBUF_TYPE_IOCTLRESP_BUF_POST;
+		rx_bufpost->msg.request_id = cpu_to_le32(pktid);
+
+		address = (u64)physaddr;
+		rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
+		rx_bufpost->host_buf_addr.high_addr =
+			cpu_to_le32(address >> 32);
+		rx_bufpost->host_buf_addr.low_addr =
+			cpu_to_le32(address & 0xffffffff);
+
+		ret_ptr += brcmf_commonring_len_item(commonring);
+	}
+
+	if (i)
+		brcmf_commonring_write_complete(commonring);
+
+	brcmf_commonring_unlock(commonring);
+
+	return i;
+}
+
+
+static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
+{
+	u32 count;
+
+	count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
+	count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
+	msgbuf->cur_ioctlrespbuf += count;
+}
+
+
+static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
+{
+	u32 count;
+
+	count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
+	count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
+	msgbuf->cur_eventbuf += count;
+}
+
+
+static void
+brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
+		    u8 ifidx)
+{
+	struct brcmf_if *ifp;
+
+	ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
+	if (!ifp || !ifp->ndev) {
+		brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
+		brcmu_pkt_buf_free_skb(skb);
+		return;
+	}
+	brcmf_netif_rx(ifp, skb);
+}
+
+
+static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+	struct msgbuf_rx_event *event;
+	u32 idx;
+	u16 buflen;
+	struct sk_buff *skb;
+
+	event = (struct msgbuf_rx_event *)buf;
+	idx = le32_to_cpu(event->msg.request_id);
+	buflen = le16_to_cpu(event->event_data_len);
+
+	if (msgbuf->cur_eventbuf)
+		msgbuf->cur_eventbuf--;
+	brcmf_msgbuf_rxbuf_event_post(msgbuf);
+
+	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+				     msgbuf->rx_pktids, idx);
+	if (!skb)
+		return;
+
+	if (msgbuf->rx_dataoffset)
+		skb_pull(skb, msgbuf->rx_dataoffset);
+
+	skb_trim(skb, buflen);
+
+	brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
+}
+
+
+static void
+brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+	struct msgbuf_rx_complete *rx_complete;
+	struct sk_buff *skb;
+	u16 data_offset;
+	u16 buflen;
+	u32 idx;
+
+	brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
+
+	rx_complete = (struct msgbuf_rx_complete *)buf;
+	data_offset = le16_to_cpu(rx_complete->data_offset);
+	buflen = le16_to_cpu(rx_complete->data_len);
+	idx = le32_to_cpu(rx_complete->msg.request_id);
+
+	skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
+				     msgbuf->rx_pktids, idx);
+	if (!skb)
+		return;
+
+	if (data_offset)
+		skb_pull(skb, data_offset);
+	else if (msgbuf->rx_dataoffset)
+		skb_pull(skb, msgbuf->rx_dataoffset);
+
+	skb_trim(skb, buflen);
+
+	brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
+}
+
+
+static void
+brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
+					       void *buf)
+{
+	struct msgbuf_flowring_create_resp *flowring_create_resp;
+	u16 status;
+	u16 flowid;
+
+	flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
+
+	flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
+	flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+	status =  le16_to_cpu(flowring_create_resp->compl_hdr.status);
+
+	if (status) {
+		brcmf_err("Flowring creation failed, code %d\n", status);
+		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+		return;
+	}
+	brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
+		  status);
+
+	brcmf_flowring_open(msgbuf->flow, flowid);
+
+	brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
+}
+
+
+static void
+brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
+					       void *buf)
+{
+	struct msgbuf_flowring_delete_resp *flowring_delete_resp;
+	u16 status;
+	u16 flowid;
+
+	flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
+
+	flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
+	flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
+	status =  le16_to_cpu(flowring_delete_resp->compl_hdr.status);
+
+	if (status) {
+		brcmf_err("Flowring deletion failed, code %d\n", status);
+		brcmf_flowring_delete(msgbuf->flow, flowid);
+		return;
+	}
+	brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
+		  status);
+
+	brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+}
+
+
+static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
+{
+	struct msgbuf_common_hdr *msg;
+
+	msg = (struct msgbuf_common_hdr *)buf;
+	switch (msg->msgtype) {
+	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
+		brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
+		break;
+	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
+		brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
+		break;
+	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
+		break;
+	case MSGBUF_TYPE_IOCTL_CMPLT:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
+		brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
+		break;
+	case MSGBUF_TYPE_WL_EVENT:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
+		brcmf_msgbuf_process_event(msgbuf, buf);
+		break;
+	case MSGBUF_TYPE_TX_STATUS:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
+		brcmf_msgbuf_process_txstatus(msgbuf, buf);
+		break;
+	case MSGBUF_TYPE_RX_CMPLT:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
+		brcmf_msgbuf_process_rx_complete(msgbuf, buf);
+		break;
+	default:
+		brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
+		break;
+	}
+}
+
+
+static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
+				    struct brcmf_commonring *commonring)
+{
+	void *buf;
+	u16 count;
+	u16 processed;
+
+again:
+	buf = brcmf_commonring_get_read_ptr(commonring, &count);
+	if (buf == NULL)
+		return;
+
+	processed = 0;
+	while (count) {
+		brcmf_msgbuf_process_msgtype(msgbuf,
+					     buf + msgbuf->rx_dataoffset);
+		buf += brcmf_commonring_len_item(commonring);
+		processed++;
+		if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
+			brcmf_commonring_read_complete(commonring, processed);
+			processed = 0;
+		}
+		count--;
+	}
+	if (processed)
+		brcmf_commonring_read_complete(commonring, processed);
+
+	if (commonring->r_ptr == 0)
+		goto again;
+}
+
+
+int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_commonring *commonring;
+	void *buf;
+	u32 flowid;
+	int qlen;
+
+	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+	brcmf_msgbuf_process_rx(msgbuf, buf);
+	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+	brcmf_msgbuf_process_rx(msgbuf, buf);
+	buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+	brcmf_msgbuf_process_rx(msgbuf, buf);
+
+	for_each_set_bit(flowid, msgbuf->txstatus_done_map,
+			 msgbuf->nrof_flowrings) {
+		clear_bit(flowid, msgbuf->txstatus_done_map);
+		commonring = msgbuf->flowrings[flowid];
+		qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
+		if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
+		    ((qlen) && (atomic_read(&commonring->outstanding_tx) <
+				BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
+			brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
+	}
+
+	return 0;
+}
+
+
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct msgbuf_tx_flowring_delete_req *delete;
+	struct brcmf_commonring *commonring;
+	void *ret_ptr;
+	u8 ifidx;
+	int err;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	brcmf_commonring_lock(commonring);
+	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+	if (!ret_ptr) {
+		brcmf_err("FW unaware, flowring will be removed !!\n");
+		brcmf_commonring_unlock(commonring);
+		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+		return;
+	}
+
+	delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
+
+	ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
+
+	delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
+	delete->msg.ifidx = ifidx;
+	delete->msg.request_id = 0;
+
+	delete->flow_ring_id = cpu_to_le16(flowid +
+					   BRCMF_NROF_H2D_COMMON_MSGRINGS);
+	delete->reason = 0;
+
+	brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
+		  flowid, ifidx);
+
+	err = brcmf_commonring_write_complete(commonring);
+	brcmf_commonring_unlock(commonring);
+	if (err) {
+		brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
+		brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+	}
+}
+
+#ifdef DEBUG
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_commonring *commonring;
+	u16 i;
+	struct brcmf_flowring_ring *ring;
+	struct brcmf_flowring_hash *hash;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+	seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+	seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+	seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+	seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+
+	seq_printf(seq, "\nh2d_flowrings: depth %u\n",
+		   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+	seq_puts(seq, "Active flowrings:\n");
+	hash = msgbuf->flow->hash;
+	for (i = 0; i < msgbuf->flow->nrofrings; i++) {
+		if (!msgbuf->flow->rings[i])
+			continue;
+		ring = msgbuf->flow->rings[i];
+		if (ring->status != RING_OPEN)
+			continue;
+		commonring = msgbuf->flowrings[i];
+		hash = &msgbuf->flow->hash[ring->hash_id];
+		seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
+				"        ifidx %u, fifo %u, da %pM\n",
+				i, commonring->r_ptr, commonring->w_ptr,
+				skb_queue_len(&ring->skblist), ring->blocked,
+				hash->ifidx, hash->fifo, hash->mac);
+	}
+
+	return 0;
+}
+#else
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif
+
+int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_bus_msgbuf *if_msgbuf;
+	struct brcmf_msgbuf *msgbuf;
+	u64 address;
+	u32 count;
+
+	if_msgbuf = drvr->bus_if->msgbuf;
+	msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
+	if (!msgbuf)
+		goto fail;
+
+	msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
+	if (msgbuf->txflow_wq == NULL) {
+		brcmf_err("workqueue creation failed\n");
+		goto fail;
+	}
+	INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
+	count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
+	count = count * sizeof(unsigned long);
+	msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
+	if (!msgbuf->flow_map)
+		goto fail;
+
+	msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
+	if (!msgbuf->txstatus_done_map)
+		goto fail;
+
+	msgbuf->drvr = drvr;
+	msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
+					     BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+					     &msgbuf->ioctbuf_handle,
+					     GFP_KERNEL);
+	if (!msgbuf->ioctbuf)
+		goto fail;
+	address = (u64)msgbuf->ioctbuf_handle;
+	msgbuf->ioctbuf_phys_hi = address >> 32;
+	msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
+
+	drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
+	drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
+	drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
+	drvr->proto->txdata = brcmf_msgbuf_txdata;
+	drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
+	drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
+	drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
+	drvr->proto->pd = msgbuf;
+
+	init_waitqueue_head(&msgbuf->ioctl_resp_wait);
+
+	msgbuf->commonrings =
+		(struct brcmf_commonring **)if_msgbuf->commonrings;
+	msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
+	msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
+	msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
+		sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
+	if (!msgbuf->flowring_dma_handle)
+		goto fail;
+
+	msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
+	msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
+
+	msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
+	msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
+
+	msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
+						     DMA_TO_DEVICE);
+	if (!msgbuf->tx_pktids)
+		goto fail;
+	msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
+						     DMA_FROM_DEVICE);
+	if (!msgbuf->rx_pktids)
+		goto fail;
+
+	msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
+					     if_msgbuf->nrof_flowrings);
+	if (!msgbuf->flow)
+		goto fail;
+
+
+	brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
+		  msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
+		  msgbuf->max_ioctlrespbuf);
+	count = 0;
+	do {
+		brcmf_msgbuf_rxbuf_data_fill(msgbuf);
+		if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
+			msleep(10);
+		else
+			break;
+		count++;
+	} while (count < 10);
+	brcmf_msgbuf_rxbuf_event_post(msgbuf);
+	brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
+
+	INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
+	spin_lock_init(&msgbuf->flowring_work_lock);
+	INIT_LIST_HEAD(&msgbuf->work_queue);
+
+	brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
+
+	return 0;
+
+fail:
+	if (msgbuf) {
+		kfree(msgbuf->flow_map);
+		kfree(msgbuf->txstatus_done_map);
+		brcmf_msgbuf_release_pktids(msgbuf);
+		kfree(msgbuf->flowring_dma_handle);
+		if (msgbuf->ioctbuf)
+			dma_free_coherent(drvr->bus_if->dev,
+					  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+					  msgbuf->ioctbuf,
+					  msgbuf->ioctbuf_handle);
+		kfree(msgbuf);
+	}
+	return -ENOMEM;
+}
+
+
+void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
+{
+	struct brcmf_msgbuf *msgbuf;
+	struct brcmf_msgbuf_work_item *work;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (drvr->proto->pd) {
+		msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+		cancel_work_sync(&msgbuf->flowring_work);
+		while (!list_empty(&msgbuf->work_queue)) {
+			work = list_first_entry(&msgbuf->work_queue,
+						struct brcmf_msgbuf_work_item,
+						queue);
+			list_del(&work->queue);
+			kfree(work);
+		}
+		kfree(msgbuf->flow_map);
+		kfree(msgbuf->txstatus_done_map);
+		if (msgbuf->txflow_wq)
+			destroy_workqueue(msgbuf->txflow_wq);
+
+		brcmf_flowring_detach(msgbuf->flow);
+		dma_free_coherent(drvr->bus_if->dev,
+				  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+				  msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
+		brcmf_msgbuf_release_pktids(msgbuf);
+		kfree(msgbuf->flowring_dma_handle);
+		kfree(msgbuf);
+		drvr->proto->pd = NULL;
+	}
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
new file mode 100644
index 0000000..3d513e4
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_MSGBUF_H
+#define BRCMFMAC_MSGBUF_H
+
+#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
+
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM	64
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM	512
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM	64
+#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM		1024
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM		512
+#define BRCMF_H2D_TXFLOWRING_MAX_ITEM			512
+
+#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE	40
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE	32
+#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE	24
+#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE		16
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE		32
+#define BRCMF_H2D_TXFLOWRING_ITEMSIZE			48
+
+
+int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
+int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
+void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
+#else
+static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+{
+	return 0;
+}
+static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {}
+#endif
+
+#endif /* BRCMFMAC_MSGBUF_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
new file mode 100644
index 0000000..03f35e0
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/mmc/card.h>
+#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include <defs.h>
+#include "debug.h"
+#include "sdio.h"
+
+void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
+{
+	struct device *dev = sdiodev->dev;
+	struct device_node *np = dev->of_node;
+	int irq;
+	u32 irqf;
+	u32 val;
+
+	if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+		return;
+
+	sdiodev->pdata = devm_kzalloc(dev, sizeof(*sdiodev->pdata), GFP_KERNEL);
+	if (!sdiodev->pdata)
+		return;
+
+	if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+		sdiodev->pdata->drive_strength = val;
+
+	/* make sure there are interrupts defined in the node */
+	if (!of_find_property(np, "interrupts", NULL))
+		return;
+
+	irq = irq_of_parse_and_map(np, 0);
+	if (!irq) {
+		brcmf_err("interrupt could not be mapped\n");
+		return;
+	}
+	irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
+
+	sdiodev->pdata->oob_irq_supported = true;
+	sdiodev->pdata->oob_irq_nr = irq;
+	sdiodev->pdata->oob_irq_flags = irqf;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.h b/drivers/net/wireless/brcm80211/brcmfmac/of.h
new file mode 100644
index 0000000..5f7c355
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifdef CONFIG_OF
+void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev);
+#else
+static void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_OF */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
new file mode 100644
index 0000000..d224b3d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -0,0 +1,2401 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include "core.h"
+#include "debug.h"
+#include "fwil.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "cfg80211.h"
+
+/* parameters used for p2p escan */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
+#define BRCMF_P2P_WILDCARD_SSID		"DIRECT-"
+#define BRCMF_P2P_WILDCARD_SSID_LEN	(sizeof(BRCMF_P2P_WILDCARD_SSID) - 1)
+
+#define SOCIAL_CHAN_1		1
+#define SOCIAL_CHAN_2		6
+#define SOCIAL_CHAN_3		11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+					 (channel == SOCIAL_CHAN_2) || \
+					 (channel == SOCIAL_CHAN_3))
+#define BRCMF_P2P_TEMP_CHAN	SOCIAL_CHAN_3
+#define SOCIAL_CHAN_CNT		3
+#define AF_PEER_SEARCH_CNT	2
+
+#define BRCMF_SCB_TIMEOUT_VALUE	20
+
+#define P2P_VER			9	/* P2P version: 9=WiFi P2P v1.0 */
+#define P2P_PUB_AF_CATEGORY	0x04
+#define P2P_PUB_AF_ACTION	0x09
+#define P2P_AF_CATEGORY		0x7f
+#define P2P_OUI			"\x50\x6F\x9A"	/* P2P OUI */
+#define P2P_OUI_LEN		3		/* P2P OUI length */
+
+/* Action Frame Constants */
+#define DOT11_ACTION_HDR_LEN	2	/* action frame category + action */
+#define DOT11_ACTION_CAT_OFF	0	/* category offset */
+#define DOT11_ACTION_ACT_OFF	1	/* action offset */
+
+#define P2P_AF_DWELL_TIME		200
+#define P2P_AF_MIN_DWELL_TIME		100
+#define P2P_AF_MED_DWELL_TIME		400
+#define P2P_AF_LONG_DWELL_TIME		1000
+#define P2P_AF_TX_MAX_RETRY		1
+#define P2P_AF_MAX_WAIT_TIME		2000
+#define P2P_INVALID_CHANNEL		-1
+#define P2P_CHANNEL_SYNC_RETRY		5
+#define P2P_AF_FRM_SCAN_MAX_WAIT	1500
+#define P2P_DEFAULT_SLEEP_TIME_VSDB	200
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ		0	/* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP		1	/* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF	2	/* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ	3	/* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP	4	/* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ	5	/* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP	6	/* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ	7	/* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP	8	/* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID	255	/* Invalid Subtype */
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE	0	/* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ		1	/* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP		2	/* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ		3	/* GO Discoverability Request */
+
+/* P2P Service Discovery related */
+#define P2PSD_ACTION_CATEGORY		0x04	/* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ	0x0a	/* GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP	0x0b	/* GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ	0x0c	/* GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP	0x0d	/* GAS Comback Response AF */
+
+/**
+ * struct brcmf_p2p_disc_st_le - set discovery state in firmware.
+ *
+ * @state: requested discovery state (see enum brcmf_p2p_disc_state).
+ * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state.
+ * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state.
+ */
+struct brcmf_p2p_disc_st_le {
+	u8 state;
+	__le16 chspec;
+	__le16 dwell;
+};
+
+/**
+ * enum brcmf_p2p_disc_state - P2P discovery state values
+ *
+ * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE.
+ * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time.
+ * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE.
+ */
+enum brcmf_p2p_disc_state {
+	WL_P2P_DISC_ST_SCAN,
+	WL_P2P_DISC_ST_LISTEN,
+	WL_P2P_DISC_ST_SEARCH
+};
+
+/**
+ * struct brcmf_p2p_scan_le - P2P specific scan request.
+ *
+ * @type: type of scan method requested (values: 'E' or 'S').
+ * @reserved: reserved (ignored).
+ * @eparams: parameters used for type 'E'.
+ * @sparams: parameters used for type 'S'.
+ */
+struct brcmf_p2p_scan_le {
+	u8 type;
+	u8 reserved[3];
+	union {
+		struct brcmf_escan_params_le eparams;
+		struct brcmf_scan_params_le sparams;
+	};
+};
+
+/**
+ * struct brcmf_p2p_pub_act_frame - WiFi P2P Public Action Frame
+ *
+ * @category: P2P_PUB_AF_CATEGORY
+ * @action: P2P_PUB_AF_ACTION
+ * @oui[3]: P2P_OUI
+ * @oui_type: OUI type - P2P_VER
+ * @subtype: OUI subtype - P2P_TYPE_*
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_pub_act_frame {
+	u8	category;
+	u8	action;
+	u8	oui[3];
+	u8	oui_type;
+	u8	subtype;
+	u8	dialog_token;
+	u8	elts[1];
+};
+
+/**
+ * struct brcmf_p2p_action_frame - WiFi P2P Action Frame
+ *
+ * @category: P2P_AF_CATEGORY
+ * @OUI[3]: OUI - P2P_OUI
+ * @type: OUI Type - P2P_VER
+ * @subtype: OUI Subtype - P2P_AF_*
+ * @dialog_token: nonzero, identifies req/resp tranaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_action_frame {
+	u8	category;
+	u8	oui[3];
+	u8	type;
+	u8	subtype;
+	u8	dialog_token;
+	u8	elts[1];
+};
+
+/**
+ * struct brcmf_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame
+ *
+ * @category: 0x04 Public Action Frame
+ * @action: 0x6c Advertisement Protocol
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ */
+struct brcmf_p2psd_gas_pub_act_frame {
+	u8	category;
+	u8	action;
+	u8	dialog_token;
+	u8	query_data[1];
+};
+
+/**
+ * struct brcmf_config_af_params - Action Frame Parameters for tx.
+ *
+ * @mpc_onoff: To make sure to send successfully action frame, we have to
+ *             turn off mpc  0: off, 1: on,  (-1): do nothing
+ * @search_channel: 1: search peer's channel to send af
+ * extra_listen: keep the dwell time to get af response frame.
+ */
+struct brcmf_config_af_params {
+	s32 mpc_onoff;
+	bool search_channel;
+	bool extra_listen;
+};
+
+/**
+ * brcmf_p2p_is_pub_action() - true if p2p public type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p public action type
+ */
+static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
+{
+	struct brcmf_p2p_pub_act_frame *pact_frm;
+
+	if (frame == NULL)
+		return false;
+
+	pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+	if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+		return false;
+
+	if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+	    pact_frm->action == P2P_PUB_AF_ACTION &&
+	    pact_frm->oui_type == P2P_VER &&
+	    memcmp(pact_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+		return true;
+
+	return false;
+}
+
+/**
+ * brcmf_p2p_is_p2p_action() - true if p2p action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p action type
+ */
+static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
+{
+	struct brcmf_p2p_action_frame *act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	act_frm = (struct brcmf_p2p_action_frame *)frame;
+	if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+		return false;
+
+	if (act_frm->category == P2P_AF_CATEGORY &&
+	    act_frm->type  == P2P_VER &&
+	    memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+		return true;
+
+	return false;
+}
+
+/**
+ * brcmf_p2p_is_gas_action() - true if p2p gas action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p gas action type
+ */
+static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
+{
+	struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+	if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+		return false;
+
+	if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+		return false;
+
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+	    sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+	    sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+	    sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return true;
+
+	return false;
+}
+
+/**
+ * brcmf_p2p_print_actframe() - debug print routine.
+ *
+ * @tx: Received or to be transmitted
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Print information about the p2p action frame
+ */
+
+#ifdef DEBUG
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+	struct brcmf_p2p_pub_act_frame *pact_frm;
+	struct brcmf_p2p_action_frame *act_frm;
+	struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+	if (!frame || frame_len <= 2)
+		return;
+
+	if (brcmf_p2p_is_pub_action(frame, frame_len)) {
+		pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+		switch (pact_frm->subtype) {
+		case P2P_PAF_GON_REQ:
+			brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_GON_RSP:
+			brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_GON_CONF:
+			brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_INVITE_REQ:
+			brcmf_dbg(TRACE, "%s P2P Invitation Request  Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_INVITE_RSP:
+			brcmf_dbg(TRACE, "%s P2P Invitation Response Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_DEVDIS_REQ:
+			brcmf_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_DEVDIS_RSP:
+			brcmf_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_PROVDIS_REQ:
+			brcmf_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_PAF_PROVDIS_RSP:
+			brcmf_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		default:
+			brcmf_dbg(TRACE, "%s Unknown P2P Public Action Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		}
+	} else if (brcmf_p2p_is_p2p_action(frame, frame_len)) {
+		act_frm = (struct brcmf_p2p_action_frame *)frame;
+		switch (act_frm->subtype) {
+		case P2P_AF_NOTICE_OF_ABSENCE:
+			brcmf_dbg(TRACE, "%s P2P Notice of Absence Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_AF_PRESENCE_REQ:
+			brcmf_dbg(TRACE, "%s P2P Presence Request Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_AF_PRESENCE_RSP:
+			brcmf_dbg(TRACE, "%s P2P Presence Response Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2P_AF_GO_DISC_REQ:
+			brcmf_dbg(TRACE, "%s P2P Discoverability Request Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		default:
+			brcmf_dbg(TRACE, "%s Unknown P2P Action Frame\n",
+				  (tx) ? "TX" : "RX");
+		}
+
+	} else if (brcmf_p2p_is_gas_action(frame, frame_len)) {
+		sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+		switch (sd_act_frm->action) {
+		case P2PSD_ACTION_ID_GAS_IREQ:
+			brcmf_dbg(TRACE, "%s P2P GAS Initial Request\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2PSD_ACTION_ID_GAS_IRESP:
+			brcmf_dbg(TRACE, "%s P2P GAS Initial Response\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2PSD_ACTION_ID_GAS_CREQ:
+			brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		case P2PSD_ACTION_ID_GAS_CRESP:
+			brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		default:
+			brcmf_dbg(TRACE, "%s Unknown P2P GAS Frame\n",
+				  (tx) ? "TX" : "RX");
+			break;
+		}
+	}
+}
+
+#else
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+}
+
+#endif
+
+
+/**
+ * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation.
+ *
+ * @ifp: ifp to use for iovars (primary).
+ * @p2p_mac: mac address to configure for p2p_da_override
+ */
+static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
+{
+	s32 ret = 0;
+
+	brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+	brcmf_fil_iovar_int_set(ifp, "apsta", 1);
+	brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+
+	/* In case of COB type, firmware has default mac address
+	 * After Initializing firmware, we have to set current mac address to
+	 * firmware for P2P device address. This must be done with discovery
+	 * disabled.
+	 */
+	brcmf_fil_iovar_int_set(ifp, "p2p_disc", 0);
+
+	ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac,
+				       ETH_ALEN);
+	if (ret)
+		brcmf_err("failed to update device address ret %d\n", ret);
+
+	return ret;
+}
+
+/**
+ * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P.
+ *
+ * @p2p: P2P specific data.
+ * @dev_addr: optional device address.
+ *
+ * P2P needs mac addresses for P2P device and interface. If no device
+ * address it specified, these are derived from the primary net device, ie.
+ * the permanent ethernet address of the device.
+ */
+static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
+{
+	struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+	bool local_admin = false;
+
+	if (!dev_addr || is_zero_ether_addr(dev_addr)) {
+		dev_addr = pri_ifp->mac_addr;
+		local_admin = true;
+	}
+
+	/* Generate the P2P Device Address.  This consists of the device's
+	 * primary MAC address with the locally administered bit set.
+	 */
+	memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+	if (local_admin)
+		p2p->dev_addr[0] |= 0x02;
+
+	/* Generate the P2P Interface Address.  If the discovery and connection
+	 * BSSCFGs need to simultaneously co-exist, then this address must be
+	 * different from the P2P Device Address, but also locally administered.
+	 */
+	memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
+	p2p->int_addr[0] |= 0x02;
+	p2p->int_addr[4] ^= 0x80;
+}
+
+/**
+ * brcmf_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan.
+ *
+ * @request: the scan request as received from cfg80211.
+ *
+ * returns true if one of the ssids in the request matches the
+ * P2P wildcard ssid; otherwise returns false.
+ */
+static bool brcmf_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request)
+{
+	struct cfg80211_ssid *ssids = request->ssids;
+	int i;
+
+	for (i = 0; i < request->n_ssids; i++) {
+		if (ssids[i].ssid_len != BRCMF_P2P_WILDCARD_SSID_LEN)
+			continue;
+
+		brcmf_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid);
+		if (!memcmp(BRCMF_P2P_WILDCARD_SSID, ssids[i].ssid,
+			    BRCMF_P2P_WILDCARD_SSID_LEN))
+			return true;
+	}
+	return false;
+}
+
+/**
+ * brcmf_p2p_set_discover_state - set discover state in firmware.
+ *
+ * @ifp: low-level interface object.
+ * @state: discover state to set.
+ * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only).
+ * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only).
+ */
+static s32 brcmf_p2p_set_discover_state(struct brcmf_if *ifp, u8 state,
+					u16 chanspec, u16 listen_ms)
+{
+	struct brcmf_p2p_disc_st_le discover_state;
+	s32 ret = 0;
+	brcmf_dbg(TRACE, "enter\n");
+
+	discover_state.state = state;
+	discover_state.chspec = cpu_to_le16(chanspec);
+	discover_state.dwell = cpu_to_le16(listen_ms);
+	ret = brcmf_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state,
+					sizeof(discover_state));
+	return ret;
+}
+
+/**
+ * brcmf_p2p_deinit_discovery() - disable P2P device discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Resets the discovery state and disables it in firmware.
+ */
+static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p)
+{
+	struct brcmf_cfg80211_vif *vif;
+
+	brcmf_dbg(TRACE, "enter\n");
+
+	/* Set the discovery state to SCAN */
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	(void)brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+
+	/* Disable P2P discovery in the firmware */
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+	(void)brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 0);
+
+	return 0;
+}
+
+/**
+ * brcmf_p2p_enable_discovery() - initialize and configure discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Initializes the discovery device and configure the virtual interface.
+ */
+static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
+{
+	struct brcmf_cfg80211_vif *vif;
+	s32 ret = 0;
+
+	brcmf_dbg(TRACE, "enter\n");
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	if (!vif) {
+		brcmf_err("P2P config device not available\n");
+		ret = -EPERM;
+		goto exit;
+	}
+
+	if (test_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status)) {
+		brcmf_dbg(INFO, "P2P config device already configured\n");
+		goto exit;
+	}
+
+	/* Re-initialize P2P Discovery in the firmware */
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+	ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1);
+	if (ret < 0) {
+		brcmf_err("set p2p_disc error\n");
+		goto exit;
+	}
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+	if (ret < 0) {
+		brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n");
+		goto exit;
+	}
+
+	/*
+	 * Set wsec to any non-zero value in the discovery bsscfg
+	 * to ensure our P2P probe responses have the privacy bit
+	 * set in the 802.11 WPA IE. Some peer devices may not
+	 * initiate WPS with us if this bit is not set.
+	 */
+	ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED);
+	if (ret < 0) {
+		brcmf_err("wsec error %d\n", ret);
+		goto exit;
+	}
+
+	set_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status);
+exit:
+	return ret;
+}
+
+/**
+ * brcmf_p2p_escan() - initiate a P2P scan.
+ *
+ * @p2p: P2P specific data.
+ * @num_chans: number of channels to scan.
+ * @chanspecs: channel parameters for @num_chans channels.
+ * @search_state: P2P discover state to use.
+ * @action: scan action to pass to firmware.
+ * @bss_type: type of P2P bss.
+ */
+static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
+			   u16 chanspecs[], s32 search_state, u16 action,
+			   enum p2p_bss_type bss_type)
+{
+	s32 ret = 0;
+	s32 memsize = offsetof(struct brcmf_p2p_scan_le,
+			       eparams.params_le.channel_list);
+	s32 nprobes;
+	s32 active;
+	u32 i;
+	u8 *memblk;
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_p2p_scan_le *p2p_params;
+	struct brcmf_scan_params_le *sparams;
+	struct brcmf_ssid ssid;
+
+	memsize += num_chans * sizeof(__le16);
+	memblk = kzalloc(memsize, GFP_KERNEL);
+	if (!memblk)
+		return -ENOMEM;
+
+	vif = p2p->bss_idx[bss_type].vif;
+	if (vif == NULL) {
+		brcmf_err("no vif for bss type %d\n", bss_type);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	switch (search_state) {
+	case WL_P2P_DISC_ST_SEARCH:
+		/*
+		 * If we in SEARCH STATE, we don't need to set SSID explictly
+		 * because dongle use P2P WILDCARD internally by default
+		 */
+		/* use null ssid */
+		ssid.SSID_len = 0;
+		memset(ssid.SSID, 0, sizeof(ssid.SSID));
+		break;
+	case WL_P2P_DISC_ST_SCAN:
+		/*
+		 * wpa_supplicant has p2p_find command with type social or
+		 * progressive. For progressive, we need to set the ssid to
+		 * P2P WILDCARD because we just do broadcast scan unless
+		 * setting SSID.
+		 */
+		ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN;
+		memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len);
+		break;
+	default:
+		brcmf_err(" invalid search state %d\n", search_state);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	brcmf_p2p_set_discover_state(vif->ifp, search_state, 0, 0);
+
+	/*
+	 * set p2p scan parameters.
+	 */
+	p2p_params = (struct brcmf_p2p_scan_le *)memblk;
+	p2p_params->type = 'E';
+
+	/* determine the scan engine parameters */
+	sparams = &p2p_params->eparams.params_le;
+	sparams->bss_type = DOT11_BSSTYPE_ANY;
+	if (p2p->cfg->active_scan)
+		sparams->scan_type = 0;
+	else
+		sparams->scan_type = 1;
+
+	eth_broadcast_addr(sparams->bssid);
+	if (ssid.SSID_len)
+		memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
+	sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+	sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS);
+
+	/*
+	 * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan
+	 * supported by the supplicant.
+	 */
+	if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1))
+		active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+	else if (num_chans == AF_PEER_SEARCH_CNT)
+		active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+	else if (brcmf_get_vif_state_any(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
+		active = -1;
+	else
+		active = P2PAPI_SCAN_DWELL_TIME_MS;
+
+	/* Override scan params to find a peer for a connection */
+	if (num_chans == 1) {
+		active = WL_SCAN_CONNECT_DWELL_TIME_MS;
+		/* WAR to sync with presence period of VSDB GO.
+		 * send probe request more frequently
+		 */
+		nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+	} else {
+		nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS;
+	}
+
+	if (nprobes <= 0)
+		nprobes = 1;
+
+	brcmf_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active);
+	sparams->active_time = cpu_to_le32(active);
+	sparams->nprobes = cpu_to_le32(nprobes);
+	sparams->passive_time = cpu_to_le32(-1);
+	sparams->channel_num = cpu_to_le32(num_chans &
+					   BRCMF_SCAN_PARAMS_COUNT_MASK);
+	for (i = 0; i < num_chans; i++)
+		sparams->channel_list[i] = cpu_to_le16(chanspecs[i]);
+
+	/* set the escan specific parameters */
+	p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+	p2p_params->eparams.action =  cpu_to_le16(action);
+	p2p_params->eparams.sync_id = cpu_to_le16(0x1234);
+	/* perform p2p scan on primary device */
+	ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize);
+	if (!ret)
+		set_bit(BRCMF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status);
+exit:
+	kfree(memblk);
+	return ret;
+}
+
+/**
+ * brcmf_p2p_run_escan() - escan callback for peer-to-peer.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device for which scan is requested.
+ * @request: scan request from cfg80211.
+ * @action: scan action.
+ *
+ * Determines the P2P discovery state based to scan request parameters and
+ * validates the channels in the request.
+ */
+static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
+			       struct brcmf_if *ifp,
+			       struct cfg80211_scan_request *request,
+			       u16 action)
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	s32 err = 0;
+	s32 search_state = WL_P2P_DISC_ST_SCAN;
+	struct brcmf_cfg80211_vif *vif;
+	struct net_device *dev = NULL;
+	int i, num_nodfs = 0;
+	u16 *chanspecs;
+
+	brcmf_dbg(TRACE, "enter\n");
+
+	if (!request) {
+		err = -EINVAL;
+		goto exit;
+	}
+
+	if (request->n_channels) {
+		chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs),
+				    GFP_KERNEL);
+		if (!chanspecs) {
+			err = -ENOMEM;
+			goto exit;
+		}
+		vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+		if (vif)
+			dev = vif->wdev.netdev;
+		if (request->n_channels == 3 &&
+		    request->channels[0]->hw_value == SOCIAL_CHAN_1 &&
+		    request->channels[1]->hw_value == SOCIAL_CHAN_2 &&
+		    request->channels[2]->hw_value == SOCIAL_CHAN_3) {
+			/* SOCIAL CHANNELS 1, 6, 11 */
+			search_state = WL_P2P_DISC_ST_SEARCH;
+			brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+		} else if (dev != NULL &&
+			   vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+			/* If you are already a GO, then do SEARCH only */
+			brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+			search_state = WL_P2P_DISC_ST_SEARCH;
+		} else {
+			brcmf_dbg(INFO, "P2P SCAN STATE START\n");
+		}
+
+		/*
+		 * no P2P scanning on passive or DFS channels.
+		 */
+		for (i = 0; i < request->n_channels; i++) {
+			struct ieee80211_channel *chan = request->channels[i];
+
+			if (chan->flags & (IEEE80211_CHAN_RADAR |
+					   IEEE80211_CHAN_NO_IR))
+				continue;
+
+			chanspecs[i] = channel_to_chanspec(&p2p->cfg->d11inf,
+							   chan);
+			brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n",
+				  num_nodfs, chan->hw_value, chanspecs[i]);
+			num_nodfs++;
+		}
+		err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
+				      action, P2PAPI_BSSCFG_DEVICE);
+		kfree(chanspecs);
+	}
+exit:
+	if (err)
+		brcmf_err("error (%d)\n", err);
+	return err;
+}
+
+
+/**
+ * brcmf_p2p_find_listen_channel() - find listen channel in ie string.
+ *
+ * @ie: string of information elements.
+ * @ie_len: length of string.
+ *
+ * Scan ie for p2p ie and look for attribute 6 channel. If available determine
+ * channel and return it.
+ */
+static s32 brcmf_p2p_find_listen_channel(const u8 *ie, u32 ie_len)
+{
+	u8 channel_ie[5];
+	s32 listen_channel;
+	s32 err;
+
+	err = cfg80211_get_p2p_attr(ie, ie_len,
+				    IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+				    channel_ie, sizeof(channel_ie));
+	if (err < 0)
+		return err;
+
+	/* listen channel subel length format:     */
+	/* 3(country) + 1(op. class) + 1(chan num) */
+	listen_channel = (s32)channel_ie[3 + 1];
+
+	if (listen_channel == SOCIAL_CHAN_1 ||
+	    listen_channel == SOCIAL_CHAN_2 ||
+	    listen_channel == SOCIAL_CHAN_3) {
+		brcmf_dbg(INFO, "Found my Listen Channel %d\n", listen_channel);
+		return listen_channel;
+	}
+
+	return -EPERM;
+}
+
+
+/**
+ * brcmf_p2p_scan_prep() - prepare scan based on request.
+ *
+ * @wiphy: wiphy device.
+ * @request: scan request from cfg80211.
+ * @vif: vif on which scan request is to be executed.
+ *
+ * Prepare the scan appropriately for type of scan requested. Overrides the
+ * escan .run() callback for peer-to-peer scanning.
+ */
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+			struct cfg80211_scan_request *request,
+			struct brcmf_cfg80211_vif *vif)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	int err = 0;
+
+	if (brcmf_p2p_scan_is_p2p_request(request)) {
+		/* find my listen channel */
+		err = brcmf_p2p_find_listen_channel(request->ie,
+						    request->ie_len);
+		if (err < 0)
+			return err;
+
+		p2p->afx_hdl.my_listen_chan = err;
+
+		clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+		brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+		err = brcmf_p2p_enable_discovery(p2p);
+		if (err)
+			return err;
+
+		vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
+		/* override .run_escan() callback. */
+		cfg->escan_info.run = brcmf_p2p_run_escan;
+	}
+	err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+				    request->ie, request->ie_len);
+	return err;
+}
+
+
+/**
+ * brcmf_p2p_discover_listen() - set firmware to discover listen state.
+ *
+ * @p2p: p2p device.
+ * @channel: channel nr for discover listen.
+ * @duration: time in ms to stay on channel.
+ *
+ */
+static s32
+brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
+{
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmu_chan ch;
+	s32 err = 0;
+
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	if (!vif) {
+		brcmf_err("Discovery is not set, so we have nothing to do\n");
+		err = -EPERM;
+		goto exit;
+	}
+
+	if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) {
+		brcmf_err("Previous LISTEN is not completed yet\n");
+		/* WAR: prevent cookie mismatch in wpa_supplicant return OK */
+		goto exit;
+	}
+
+	ch.chnum = channel;
+	ch.bw = BRCMU_CHAN_BW_20;
+	p2p->cfg->d11inf.encchspec(&ch);
+	err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN,
+					   ch.chspec, (u16)duration);
+	if (!err) {
+		set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status);
+		p2p->remain_on_channel_cookie++;
+	}
+exit:
+	return err;
+}
+
+
+/**
+ * brcmf_p2p_remain_on_channel() - put device on channel and stay there.
+ *
+ * @wiphy: wiphy device.
+ * @channel: channel to stay on.
+ * @duration: time in ms to remain on channel.
+ *
+ */
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+				struct ieee80211_channel *channel,
+				unsigned int duration, u64 *cookie)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	s32 err;
+	u16 channel_nr;
+
+	channel_nr = ieee80211_frequency_to_channel(channel->center_freq);
+	brcmf_dbg(TRACE, "Enter, channel: %d, duration ms (%d)\n", channel_nr,
+		  duration);
+
+	err = brcmf_p2p_enable_discovery(p2p);
+	if (err)
+		goto exit;
+	err = brcmf_p2p_discover_listen(p2p, channel_nr, duration);
+	if (err)
+		goto exit;
+
+	memcpy(&p2p->remain_on_channel, channel, sizeof(*channel));
+	*cookie = p2p->remain_on_channel_cookie;
+	cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL);
+
+exit:
+	return err;
+}
+
+
+/**
+ * brcmf_p2p_notify_listen_complete() - p2p listen has completed.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message. Not used.
+ *
+ */
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+				     const struct brcmf_event_msg *e,
+				     void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (test_and_clear_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+			       &p2p->status)) {
+		if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+				       &p2p->status)) {
+			clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+				  &p2p->status);
+			brcmf_dbg(INFO, "Listen DONE, wake up wait_next_af\n");
+			complete(&p2p->wait_next_af);
+		}
+
+		cfg80211_remain_on_channel_expired(&ifp->vif->wdev,
+						   p2p->remain_on_channel_cookie,
+						   &p2p->remain_on_channel,
+						   GFP_KERNEL);
+	}
+	return 0;
+}
+
+
+/**
+ * brcmf_p2p_cancel_remain_on_channel() - cancel p2p listen state.
+ *
+ * @ifp: interfac control.
+ *
+ */
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
+{
+	if (!ifp)
+		return;
+	brcmf_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+	brcmf_p2p_notify_listen_complete(ifp, NULL, NULL);
+}
+
+
+/**
+ * brcmf_p2p_act_frm_search() - search function for action frame.
+ *
+ * @p2p: p2p device.
+ * channel: channel on which action frame is to be trasmitted.
+ *
+ * search function to reach at common channel to send action frame. When
+ * channel is 0 then all social channels will be used to send af
+ */
+static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
+{
+	s32 err;
+	u32 channel_cnt;
+	u16 *default_chan_list;
+	u32 i;
+	struct brcmu_chan ch;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (channel)
+		channel_cnt = AF_PEER_SEARCH_CNT;
+	else
+		channel_cnt = SOCIAL_CHAN_CNT;
+	default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list),
+				    GFP_KERNEL);
+	if (default_chan_list == NULL) {
+		brcmf_err("channel list allocation failed\n");
+		err = -ENOMEM;
+		goto exit;
+	}
+	ch.bw = BRCMU_CHAN_BW_20;
+	if (channel) {
+		ch.chnum = channel;
+		p2p->cfg->d11inf.encchspec(&ch);
+		/* insert same channel to the chan_list */
+		for (i = 0; i < channel_cnt; i++)
+			default_chan_list[i] = ch.chspec;
+	} else {
+		ch.chnum = SOCIAL_CHAN_1;
+		p2p->cfg->d11inf.encchspec(&ch);
+		default_chan_list[0] = ch.chspec;
+		ch.chnum = SOCIAL_CHAN_2;
+		p2p->cfg->d11inf.encchspec(&ch);
+		default_chan_list[1] = ch.chspec;
+		ch.chnum = SOCIAL_CHAN_3;
+		p2p->cfg->d11inf.encchspec(&ch);
+		default_chan_list[2] = ch.chspec;
+	}
+	err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list,
+			      WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START,
+			      P2PAPI_BSSCFG_DEVICE);
+	kfree(default_chan_list);
+exit:
+	return err;
+}
+
+
+/**
+ * brcmf_p2p_afx_handler() - afx worker thread.
+ *
+ * @work:
+ *
+ */
+static void brcmf_p2p_afx_handler(struct work_struct *work)
+{
+	struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work);
+	struct brcmf_p2p_info *p2p = container_of(afx_hdl,
+						  struct brcmf_p2p_info,
+						  afx_hdl);
+	s32 err;
+
+	if (!afx_hdl->is_active)
+		return;
+
+	if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
+		/* 100ms ~ 300ms */
+		err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
+						100 * (1 + prandom_u32() % 3));
+	else
+		err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
+
+	if (err) {
+		brcmf_err("ERROR occurred! value is (%d)\n", err);
+		if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+			     &p2p->status))
+			complete(&afx_hdl->act_frm_scan);
+	}
+}
+
+
+/**
+ * brcmf_p2p_af_searching_channel() - search channel.
+ *
+ * @p2p: p2p device info struct.
+ *
+ */
+static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+{
+	struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+	struct brcmf_cfg80211_vif *pri_vif;
+	unsigned long duration;
+	s32 retry;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
+	reinit_completion(&afx_hdl->act_frm_scan);
+	set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+	afx_hdl->is_active = true;
+	afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
+
+	/* Loop to wait until we find a peer's channel or the
+	 * pending action frame tx is cancelled.
+	 */
+	retry = 0;
+	duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
+	while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
+	       (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
+		afx_hdl->is_listen = false;
+		brcmf_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n",
+			  retry);
+		/* search peer on peer's listen channel */
+		schedule_work(&afx_hdl->afx_work);
+		wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+		if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+		    (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+			       &p2p->status)))
+			break;
+
+		if (afx_hdl->my_listen_chan) {
+			brcmf_dbg(TRACE, "Scheduling listen peer, channel=%d\n",
+				  afx_hdl->my_listen_chan);
+			/* listen on my listen channel */
+			afx_hdl->is_listen = true;
+			schedule_work(&afx_hdl->afx_work);
+			wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+						    duration);
+		}
+		if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+		    (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+			       &p2p->status)))
+			break;
+		retry++;
+
+		/* if sta is connected or connecting, sleep for a while before
+		 * retry af tx or finding a peer
+		 */
+		if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) ||
+		    test_bit(BRCMF_VIF_STATUS_CONNECTING, &pri_vif->sme_state))
+			msleep(P2P_DEFAULT_SLEEP_TIME_VSDB);
+	}
+
+	brcmf_dbg(TRACE, "Completed search/listen peer_chan=%d\n",
+		  afx_hdl->peer_chan);
+	afx_hdl->is_active = false;
+
+	clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+
+	return afx_hdl->peer_chan;
+}
+
+
+/**
+ * brcmf_p2p_scan_finding_common_channel() - was escan used for finding channel
+ *
+ * @cfg: common configuration struct.
+ * @bi: bss info struct, result from scan.
+ *
+ */
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+					   struct brcmf_bss_info_le *bi)
+
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+	struct brcmu_chan ch;
+	u8 *ie;
+	s32 err;
+	u8 p2p_dev_addr[ETH_ALEN];
+
+	if (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))
+		return false;
+
+	if (bi == NULL) {
+		brcmf_dbg(TRACE, "ACTION FRAME SCAN Done\n");
+		if (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)
+			complete(&afx_hdl->act_frm_scan);
+		return true;
+	}
+
+	ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+	memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr));
+	err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+				    IEEE80211_P2P_ATTR_DEVICE_INFO,
+				    p2p_dev_addr, sizeof(p2p_dev_addr));
+	if (err < 0)
+		err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+					    IEEE80211_P2P_ATTR_DEVICE_ID,
+					    p2p_dev_addr, sizeof(p2p_dev_addr));
+	if ((err >= 0) &&
+	    (ether_addr_equal(p2p_dev_addr, afx_hdl->tx_dst_addr))) {
+		if (!bi->ctl_ch) {
+			ch.chspec = le16_to_cpu(bi->chanspec);
+			cfg->d11inf.decchspec(&ch);
+			bi->ctl_ch = ch.chnum;
+		}
+		afx_hdl->peer_chan = bi->ctl_ch;
+		brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
+			  afx_hdl->tx_dst_addr, afx_hdl->peer_chan);
+		complete(&afx_hdl->act_frm_scan);
+	}
+	return true;
+}
+
+/**
+ * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static void
+brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_if *ifp = cfg->escan_info.ifp;
+
+	if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
+	    (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
+	     test_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) {
+		brcmf_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n");
+		/* if channel is not zero, "actfame" uses off channel scan.
+		 * So abort scan for off channel completion.
+		 */
+		if (p2p->af_sent_channel)
+			brcmf_notify_escan_complete(cfg, ifp, true, true);
+	} else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+			    &p2p->status)) {
+		brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
+		/* So abort scan to cancel listen */
+		brcmf_notify_escan_complete(cfg, ifp, true, true);
+	}
+}
+
+
+/**
+ * brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
+ *
+ * @p2p: p2p device info struct.
+ *
+ * return true if recevied action frame is to be dropped.
+ */
+static bool
+brcmf_p2p_gon_req_collision(struct brcmf_p2p_info *p2p, u8 *mac)
+{
+	struct brcmf_cfg80211_info *cfg = p2p->cfg;
+	struct brcmf_if *ifp;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (!test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) ||
+	    !p2p->gon_req_action)
+		return false;
+
+	brcmf_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n");
+	/* if sa(peer) addr is less than da(my) addr, then this device
+	 * process peer's gon request and block to send gon req.
+	 * if not (sa addr > da addr),
+	 * this device will process gon request and drop gon req of peer.
+	 */
+	ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+	if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) {
+		brcmf_dbg(INFO, "Block transmit gon req !!!\n");
+		p2p->block_gon_req_tx = true;
+		/* if we are finding a common channel for sending af,
+		 * do not scan more to block to send current gon req
+		 */
+		if (test_and_clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+				       &p2p->status))
+			complete(&p2p->afx_hdl.act_frm_scan);
+		if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+				       &p2p->status))
+			brcmf_p2p_stop_wait_next_action_frame(cfg);
+		return false;
+	}
+
+	/* drop gon request of peer to process gon request by this device. */
+	brcmf_dbg(INFO, "Drop received gon req !!!\n");
+
+	return true;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_frame_rx() - received action frame.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message, containing action frame data.
+ *
+ */
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+				     const struct brcmf_event_msg *e,
+				     void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+	struct wireless_dev *wdev;
+	u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+	struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+	u8 *frame = (u8 *)(rxframe + 1);
+	struct brcmf_p2p_pub_act_frame *act_frm;
+	struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+	struct brcmu_chan ch;
+	struct ieee80211_mgmt *mgmt_frame;
+	s32 freq;
+	u16 mgmt_type;
+	u8 action;
+
+	ch.chspec = be16_to_cpu(rxframe->chanspec);
+	cfg->d11inf.decchspec(&ch);
+	/* Check if wpa_supplicant has registered for this frame */
+	brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg);
+	mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4;
+	if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+		return 0;
+
+	brcmf_p2p_print_actframe(false, frame, mgmt_frame_len);
+
+	action = P2P_PAF_SUBTYPE_INVALID;
+	if (brcmf_p2p_is_pub_action(frame, mgmt_frame_len)) {
+		act_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+		action = act_frm->subtype;
+		if ((action == P2P_PAF_GON_REQ) &&
+		    (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
+			if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+				     &p2p->status) &&
+			    (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
+				afx_hdl->peer_chan = ch.chnum;
+				brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
+					  afx_hdl->peer_chan);
+				complete(&afx_hdl->act_frm_scan);
+			}
+			return 0;
+		}
+		/* After complete GO Negotiation, roll back to mpc mode */
+		if ((action == P2P_PAF_GON_CONF) ||
+		    (action == P2P_PAF_PROVDIS_RSP))
+			brcmf_set_mpc(ifp, 1);
+		if (action == P2P_PAF_GON_CONF) {
+			brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+			clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+		}
+	} else if (brcmf_p2p_is_gas_action(frame, mgmt_frame_len)) {
+		sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+		action = sd_act_frm->action;
+	}
+
+	if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+	    (p2p->next_af_subtype == action)) {
+		brcmf_dbg(TRACE, "We got a right next frame! (%d)\n", action);
+		clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+			  &p2p->status);
+		/* Stop waiting for next AF. */
+		brcmf_p2p_stop_wait_next_action_frame(cfg);
+	}
+
+	mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) +
+			     mgmt_frame_len, GFP_KERNEL);
+	if (!mgmt_frame) {
+		brcmf_err("No memory available for action frame\n");
+		return -ENOMEM;
+	}
+	memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+	brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+			       ETH_ALEN);
+	memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+	mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+	memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
+	mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+
+	freq = ieee80211_channel_to_frequency(ch.chnum,
+					      ch.band == BRCMU_CHAN_BAND_2G ?
+					      IEEE80211_BAND_2GHZ :
+					      IEEE80211_BAND_5GHZ);
+
+	wdev = &ifp->vif->wdev;
+	cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0);
+
+	kfree(mgmt_frame);
+	return 0;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_tx_complete() - transmit action frame complete
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: not used.
+ *
+ */
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+					const struct brcmf_event_msg *e,
+					void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+	brcmf_dbg(INFO, "Enter: event %s, status=%d\n",
+		  e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ?
+		  "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE",
+		  e->status);
+
+	if (!test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status))
+		return 0;
+
+	if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
+		if (e->status == BRCMF_E_STATUS_SUCCESS)
+			set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+				&p2p->status);
+		else {
+			set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+			/* If there is no ack, we don't need to wait for
+			 * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
+			 */
+			brcmf_p2p_stop_wait_next_action_frame(cfg);
+		}
+
+	} else {
+		complete(&p2p->send_af_done);
+	}
+	return 0;
+}
+
+
+/**
+ * brcmf_p2p_tx_action_frame() - send action frame over fil.
+ *
+ * @p2p: p2p info struct for vif.
+ * @af_params: action frame data/info.
+ *
+ * Send an action frame immediately without doing channel synchronization.
+ *
+ * This function waits for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ */
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+				     struct brcmf_fil_af_params_le *af_params)
+{
+	struct brcmf_cfg80211_vif *vif;
+	s32 err = 0;
+	s32 timeout = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	reinit_completion(&p2p->send_af_done);
+	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+					sizeof(*af_params));
+	if (err) {
+		brcmf_err(" sending action frame has failed\n");
+		goto exit;
+	}
+
+	p2p->af_sent_channel = le32_to_cpu(af_params->channel);
+	p2p->af_tx_sent_jiffies = jiffies;
+
+	timeout = wait_for_completion_timeout(&p2p->send_af_done,
+					msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME));
+
+	if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
+		brcmf_dbg(TRACE, "TX action frame operation is success\n");
+	} else {
+		err = -EIO;
+		brcmf_dbg(TRACE, "TX action frame operation has failed\n");
+	}
+	/* clear status bit for action tx */
+	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+exit:
+	return err;
+}
+
+
+/**
+ * brcmf_p2p_pub_af_tx() - public action frame tx routine.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @af_params: action frame data/info.
+ * @config_af_params: configuration data for action frame.
+ *
+ * routine which transmits ation frame public type.
+ */
+static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
+			       struct brcmf_fil_af_params_le *af_params,
+			       struct brcmf_config_af_params *config_af_params)
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_fil_action_frame_le *action_frame;
+	struct brcmf_p2p_pub_act_frame *act_frm;
+	s32 err = 0;
+	u16 ie_len;
+
+	action_frame = &af_params->action_frame;
+	act_frm = (struct brcmf_p2p_pub_act_frame *)(action_frame->data);
+
+	config_af_params->extra_listen = true;
+
+	switch (act_frm->subtype) {
+	case P2P_PAF_GON_REQ:
+		brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n");
+		set_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+		config_af_params->mpc_onoff = 0;
+		config_af_params->search_channel = true;
+		p2p->next_af_subtype = act_frm->subtype + 1;
+		p2p->gon_req_action = true;
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+		break;
+	case P2P_PAF_GON_RSP:
+		p2p->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for CONF frame */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+		break;
+	case P2P_PAF_GON_CONF:
+		/* If we reached till GO Neg confirmation reset the filter */
+		brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+		clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+		/* turn on mpc again if go nego is done */
+		config_af_params->mpc_onoff = 1;
+		/* minimize dwell time */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+		config_af_params->extra_listen = false;
+		break;
+	case P2P_PAF_INVITE_REQ:
+		config_af_params->search_channel = true;
+		p2p->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+		break;
+	case P2P_PAF_INVITE_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+		config_af_params->extra_listen = false;
+		break;
+	case P2P_PAF_DEVDIS_REQ:
+		config_af_params->search_channel = true;
+		p2p->next_af_subtype = act_frm->subtype + 1;
+		/* maximize dwell time to wait for RESP frame */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME);
+		break;
+	case P2P_PAF_DEVDIS_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+		config_af_params->extra_listen = false;
+		break;
+	case P2P_PAF_PROVDIS_REQ:
+		ie_len = le16_to_cpu(action_frame->len) -
+			 offsetof(struct brcmf_p2p_pub_act_frame, elts);
+		if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len,
+					  IEEE80211_P2P_ATTR_GROUP_ID,
+					  NULL, 0) < 0)
+			config_af_params->search_channel = true;
+		config_af_params->mpc_onoff = 0;
+		p2p->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+		break;
+	case P2P_PAF_PROVDIS_RSP:
+		/* wpa_supplicant send go nego req right after prov disc */
+		p2p->next_af_subtype = P2P_PAF_GON_REQ;
+		/* increase dwell time to MED level */
+		af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+		config_af_params->extra_listen = false;
+		break;
+	default:
+		brcmf_err("Unknown p2p pub act frame subtype: %d\n",
+			  act_frm->subtype);
+		err = -EINVAL;
+	}
+	return err;
+}
+
+/**
+ * brcmf_p2p_send_action_frame() - send action frame .
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device to transmit on.
+ * @af_params: configuration data for action frame.
+ */
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+				 struct net_device *ndev,
+				 struct brcmf_fil_af_params_le *af_params)
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	struct brcmf_fil_action_frame_le *action_frame;
+	struct brcmf_config_af_params config_af_params;
+	struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+	u16 action_frame_len;
+	bool ack = false;
+	u8 category;
+	u8 action;
+	s32 tx_retry;
+	s32 extra_listen_time;
+	uint delta_ms;
+
+	action_frame = &af_params->action_frame;
+	action_frame_len = le16_to_cpu(action_frame->len);
+
+	brcmf_p2p_print_actframe(true, action_frame->data, action_frame_len);
+
+	/* Add the default dwell time. Dwell time to stay off-channel */
+	/* to wait for a response action frame after transmitting an  */
+	/* GO Negotiation action frame                                */
+	af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME);
+
+	category = action_frame->data[DOT11_ACTION_CAT_OFF];
+	action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+	/* initialize variables */
+	p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+	p2p->gon_req_action = false;
+
+	/* config parameters */
+	config_af_params.mpc_onoff = -1;
+	config_af_params.search_channel = false;
+	config_af_params.extra_listen = false;
+
+	if (brcmf_p2p_is_pub_action(action_frame->data, action_frame_len)) {
+		/* p2p public action frame process */
+		if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) {
+			/* Just send unknown subtype frame with */
+			/* default parameters.                  */
+			brcmf_err("P2P Public action frame, unknown subtype.\n");
+		}
+	} else if (brcmf_p2p_is_gas_action(action_frame->data,
+					   action_frame_len)) {
+		/* service discovery process */
+		if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+		    action == P2PSD_ACTION_ID_GAS_CREQ) {
+			/* configure service discovery query frame */
+			config_af_params.search_channel = true;
+
+			/* save next af suptype to cancel */
+			/* remaining dwell time           */
+			p2p->next_af_subtype = action + 1;
+
+			af_params->dwell_time =
+				cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+		} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+			   action == P2PSD_ACTION_ID_GAS_CRESP) {
+			/* configure service discovery response frame */
+			af_params->dwell_time =
+				cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+		} else {
+			brcmf_err("Unknown action type: %d\n", action);
+			goto exit;
+		}
+	} else if (brcmf_p2p_is_p2p_action(action_frame->data,
+					   action_frame_len)) {
+		/* do not configure anything. it will be */
+		/* sent with a default configuration     */
+	} else {
+		brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n",
+			  category, action);
+		return false;
+	}
+
+	/* if connecting on primary iface, sleep for a while before sending
+	 * af tx for VSDB
+	 */
+	if (test_bit(BRCMF_VIF_STATUS_CONNECTING,
+		     &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state))
+		msleep(50);
+
+	/* if scan is ongoing, abort current scan. */
+	if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+		brcmf_abort_scanning(cfg);
+
+	memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN);
+
+	/* To make sure to send successfully action frame, turn off mpc */
+	if (config_af_params.mpc_onoff == 0)
+		brcmf_set_mpc(ifp, 0);
+
+	/* set status and destination address before sending af */
+	if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+		/* set status to cancel the remained dwell time in rx process */
+		set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+	}
+
+	p2p->af_sent_channel = 0;
+	set_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+	/* validate channel and p2p ies */
+	if (config_af_params.search_channel &&
+	    IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) &&
+	    p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) {
+		afx_hdl = &p2p->afx_hdl;
+		afx_hdl->peer_listen_chan = le32_to_cpu(af_params->channel);
+
+		if (brcmf_p2p_af_searching_channel(p2p) ==
+							P2P_INVALID_CHANNEL) {
+			brcmf_err("Couldn't find peer's channel.\n");
+			goto exit;
+		}
+
+		/* Abort scan even for VSDB scenarios. Scan gets aborted in
+		 * firmware but after the check of piggyback algorithm. To take
+		 * care of current piggback algo, lets abort the scan here
+		 * itself.
+		 */
+		brcmf_notify_escan_complete(cfg, ifp, true, true);
+
+		/* update channel */
+		af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
+	}
+
+	tx_retry = 0;
+	while (!p2p->block_gon_req_tx &&
+	       (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+		ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+		tx_retry++;
+	}
+	if (ack == false) {
+		brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry);
+		clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+	}
+
+exit:
+	clear_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+
+	/* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+	 * if we coundn't get the next action response frame and dongle does
+	 * not keep the dwell time, go to listen state again to get next action
+	 * response frame.
+	 */
+	if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx &&
+	    test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+	    p2p->af_sent_channel == afx_hdl->my_listen_chan) {
+		delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies);
+		if (le32_to_cpu(af_params->dwell_time) > delta_ms)
+			extra_listen_time = le32_to_cpu(af_params->dwell_time) -
+					    delta_ms;
+		else
+			extra_listen_time = 0;
+		if (extra_listen_time > 50) {
+			set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+				&p2p->status);
+			brcmf_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n",
+				  le32_to_cpu(af_params->dwell_time),
+				  extra_listen_time);
+			extra_listen_time += 100;
+			if (!brcmf_p2p_discover_listen(p2p,
+						       p2p->af_sent_channel,
+						       extra_listen_time)) {
+				unsigned long duration;
+
+				extra_listen_time += 100;
+				duration = msecs_to_jiffies(extra_listen_time);
+				wait_for_completion_timeout(&p2p->wait_next_af,
+							    duration);
+			}
+			clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+				  &p2p->status);
+		}
+	}
+
+	if (p2p->block_gon_req_tx) {
+		/* if ack is true, supplicant will wait more time(100ms).
+		 * so we will return it as a success to get more time .
+		 */
+		p2p->block_gon_req_tx = false;
+		ack = true;
+	}
+
+	clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+	/* if all done, turn mpc on again */
+	if (config_af_params.mpc_onoff == 1)
+		brcmf_set_mpc(ifp, 1);
+
+	return ack;
+}
+
+/**
+ * brcmf_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req.
+ *
+ * @ifp: interface pointer for which event was received.
+ * @e: even message.
+ * @data: payload of event message (probe request).
+ */
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+					  const struct brcmf_event_msg *e,
+					  void *data)
+{
+	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+	struct brcmf_cfg80211_vif *vif = ifp->vif;
+	struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+	u16 chanspec = be16_to_cpu(rxframe->chanspec);
+	struct brcmu_chan ch;
+	u8 *mgmt_frame;
+	u32 mgmt_frame_len;
+	s32 freq;
+	u16 mgmt_type;
+
+	brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
+		  e->reason);
+
+	ch.chspec = be16_to_cpu(rxframe->chanspec);
+	cfg->d11inf.decchspec(&ch);
+
+	if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
+	    (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
+		afx_hdl->peer_chan = ch.chnum;
+		brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
+			  afx_hdl->peer_chan);
+		complete(&afx_hdl->act_frm_scan);
+	}
+
+	/* Firmware sends us two proberesponses for each idx one. At the */
+	/* moment anything but bsscfgidx 0 is passed up to supplicant    */
+	if (e->bsscfgidx == 0)
+		return 0;
+
+	/* Filter any P2P probe reqs arriving during the GO-NEG Phase */
+	if (test_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) {
+		brcmf_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n");
+		return 0;
+	}
+
+	/* Check if wpa_supplicant has registered for this frame */
+	brcmf_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg);
+	mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4;
+	if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+		return 0;
+
+	mgmt_frame = (u8 *)(rxframe + 1);
+	mgmt_frame_len = e->datalen - sizeof(*rxframe);
+	freq = ieee80211_channel_to_frequency(ch.chnum,
+					      ch.band == BRCMU_CHAN_BAND_2G ?
+					      IEEE80211_BAND_2GHZ :
+					      IEEE80211_BAND_5GHZ);
+
+	cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
+
+	brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
+		  mgmt_frame_len, e->datalen, chanspec, freq);
+
+	return 0;
+}
+
+
+/**
+ * brcmf_p2p_get_current_chanspec() - Get current operation channel.
+ *
+ * @p2p: P2P specific data.
+ * @chanspec: chanspec to be returned.
+ */
+static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
+					   u16 *chanspec)
+{
+	struct brcmf_if *ifp;
+	u8 mac_addr[ETH_ALEN];
+	struct brcmu_chan ch;
+	struct brcmf_bss_info_le *bi;
+	u8 *buf;
+
+	ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+
+	if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mac_addr,
+				   ETH_ALEN) == 0) {
+		buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+		if (buf != NULL) {
+			*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
+			if (brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+						   buf, WL_BSS_INFO_MAX) == 0) {
+				bi = (struct brcmf_bss_info_le *)(buf + 4);
+				*chanspec = le16_to_cpu(bi->chanspec);
+				kfree(buf);
+				return;
+			}
+			kfree(buf);
+		}
+	}
+	/* Use default channel for P2P */
+	ch.chnum = BRCMF_P2P_TEMP_CHAN;
+	ch.bw = BRCMU_CHAN_BW_20;
+	p2p->cfg->d11inf.encchspec(&ch);
+	*chanspec = ch.chspec;
+}
+
+/**
+ * Change a P2P Role.
+ * Parameters:
+ * @mac: MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+		       enum brcmf_fil_p2p_if_types if_type)
+{
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_fil_p2p_if_le if_request;
+	s32 err;
+	u16 chanspec;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+	if (!vif) {
+		brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
+		return -EPERM;
+	}
+	brcmf_notify_escan_complete(cfg, vif->ifp, true, true);
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+	if (!vif) {
+		brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
+		return -EPERM;
+	}
+	brcmf_set_mpc(vif->ifp, 0);
+
+	/* In concurrency case, STA may be already associated in a particular */
+	/* channel. so retrieve the current channel of primary interface and  */
+	/* then start the virtual interface on that.                          */
+	brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+	if_request.type = cpu_to_le16((u16)if_type);
+	if_request.chspec = cpu_to_le16(chanspec);
+	memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+
+	brcmf_cfg80211_arm_vif_event(cfg, vif);
+	err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
+				       sizeof(if_request));
+	if (err) {
+		brcmf_err("p2p_ifupd FAILED, err=%d\n", err);
+		brcmf_cfg80211_arm_vif_event(cfg, NULL);
+		return err;
+	}
+	err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE,
+						    msecs_to_jiffies(1500));
+	brcmf_cfg80211_arm_vif_event(cfg, NULL);
+	if (!err)  {
+		brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
+		return -EIO;
+	}
+
+	err = brcmf_fil_cmd_int_set(vif->ifp, BRCMF_C_SET_SCB_TIMEOUT,
+				    BRCMF_SCB_TIMEOUT_VALUE);
+
+	return err;
+}
+
+static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
+				    struct brcmf_if *ifp, u8 ea[ETH_ALEN],
+				    enum brcmf_fil_p2p_if_types iftype)
+{
+	struct brcmf_fil_p2p_if_le if_request;
+	int err;
+	u16 chanspec;
+
+	/* we need a default channel */
+	brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+	/* fill the firmware request */
+	memcpy(if_request.addr, ea, ETH_ALEN);
+	if_request.type = cpu_to_le16((u16)iftype);
+	if_request.chspec = cpu_to_le16(chanspec);
+
+	err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
+				       sizeof(if_request));
+	if (err)
+		return err;
+
+	return err;
+}
+
+static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+	struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+	struct net_device *pri_ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(pri_ndev);
+	u8 *addr = vif->wdev.netdev->dev_addr;
+
+	return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN);
+}
+
+static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+	struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+	struct net_device *pri_ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(pri_ndev);
+	u8 *addr = vif->wdev.netdev->dev_addr;
+
+	return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN);
+}
+
+/**
+ * brcmf_p2p_create_p2pdev() - create a P2P_DEVICE virtual interface.
+ *
+ * @p2p: P2P specific data.
+ * @wiphy: wiphy device of new interface.
+ * @addr: mac address for this new interface.
+ */
+static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
+						    struct wiphy *wiphy,
+						    u8 *addr)
+{
+	struct brcmf_cfg80211_vif *p2p_vif;
+	struct brcmf_if *p2p_ifp;
+	struct brcmf_if *pri_ifp;
+	int err;
+	u32 bssidx;
+
+	if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+		return ERR_PTR(-ENOSPC);
+
+	p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE,
+				  false);
+	if (IS_ERR(p2p_vif)) {
+		brcmf_err("could not create discovery vif\n");
+		return (struct wireless_dev *)p2p_vif;
+	}
+
+	pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+	brcmf_p2p_generate_bss_mac(p2p, addr);
+	brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
+
+	brcmf_cfg80211_arm_vif_event(p2p->cfg, p2p_vif);
+	brcmf_fweh_p2pdev_setup(pri_ifp, true);
+
+	/* Initialize P2P Discovery in the firmware */
+	err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
+	if (err < 0) {
+		brcmf_err("set p2p_disc error\n");
+		brcmf_fweh_p2pdev_setup(pri_ifp, false);
+		brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
+		goto fail;
+	}
+
+	/* wait for firmware event */
+	err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD,
+						    msecs_to_jiffies(1500));
+	brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
+	brcmf_fweh_p2pdev_setup(pri_ifp, false);
+	if (!err) {
+		brcmf_err("timeout occurred\n");
+		err = -EIO;
+		goto fail;
+	}
+
+	/* discovery interface created */
+	p2p_ifp = p2p_vif->ifp;
+	p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
+	memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
+	memcpy(&p2p_vif->wdev.address, p2p->dev_addr, sizeof(p2p->dev_addr));
+
+	/* verify bsscfg index for P2P discovery */
+	err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
+	if (err < 0) {
+		brcmf_err("retrieving discover bsscfg index failed\n");
+		goto fail;
+	}
+
+	WARN_ON(p2p_ifp->bssidx != bssidx);
+
+	init_completion(&p2p->send_af_done);
+	INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+	init_completion(&p2p->afx_hdl.act_frm_scan);
+	init_completion(&p2p->wait_next_af);
+
+	return &p2p_vif->wdev;
+
+fail:
+	brcmf_free_vif(p2p_vif);
+	return ERR_PTR(err);
+}
+
+/**
+ * brcmf_p2p_add_vif() - create a new P2P virtual interface.
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @name_assign_type: origin of the interface name
+ * @type: nl80211 interface type.
+ * @flags: not used.
+ * @params: contains mac address for P2P device.
+ */
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+				       unsigned char name_assign_type,
+				       enum nl80211_iftype type, u32 *flags,
+				       struct vif_params *params)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	struct brcmf_cfg80211_vif *vif;
+	enum brcmf_fil_p2p_if_types iftype;
+	int err;
+
+	if (brcmf_cfg80211_vif_event_armed(cfg))
+		return ERR_PTR(-EBUSY);
+
+	brcmf_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type);
+
+	switch (type) {
+	case NL80211_IFTYPE_P2P_CLIENT:
+		iftype = BRCMF_FIL_P2P_IF_CLIENT;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+		iftype = BRCMF_FIL_P2P_IF_GO;
+		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
+					       params->macaddr);
+	default:
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	vif = brcmf_alloc_vif(cfg, type, false);
+	if (IS_ERR(vif))
+		return (struct wireless_dev *)vif;
+	brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+	err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
+				       iftype);
+	if (err) {
+		brcmf_cfg80211_arm_vif_event(cfg, NULL);
+		goto fail;
+	}
+
+	/* wait for firmware event */
+	err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+						    msecs_to_jiffies(1500));
+	brcmf_cfg80211_arm_vif_event(cfg, NULL);
+	if (!err) {
+		brcmf_err("timeout occurred\n");
+		err = -EIO;
+		goto fail;
+	}
+
+	/* interface created in firmware */
+	ifp = vif->ifp;
+	if (!ifp) {
+		brcmf_err("no if pointer provided\n");
+		err = -ENOENT;
+		goto fail;
+	}
+
+	strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+	ifp->ndev->name_assign_type = name_assign_type;
+	err = brcmf_net_attach(ifp, true);
+	if (err) {
+		brcmf_err("Registering netdevice failed\n");
+		goto fail;
+	}
+
+	cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+	/* Disable firmware roaming for P2P interface  */
+	brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+	if (iftype == BRCMF_FIL_P2P_IF_GO) {
+		/* set station timeout for p2p */
+		brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCB_TIMEOUT,
+				      BRCMF_SCB_TIMEOUT_VALUE);
+	}
+	return &ifp->vif->wdev;
+
+fail:
+	brcmf_free_vif(vif);
+	return ERR_PTR(err);
+}
+
+/**
+ * brcmf_p2p_del_vif() - delete a P2P virtual interface.
+ *
+ * @wiphy: wiphy device of interface.
+ * @wdev: wireless device of interface.
+ */
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_cfg80211_vif *vif;
+	unsigned long jiffie_timeout = msecs_to_jiffies(1500);
+	bool wait_for_disable = false;
+	int err;
+
+	brcmf_dbg(TRACE, "delete P2P vif\n");
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+	brcmf_cfg80211_arm_vif_event(cfg, vif);
+	switch (vif->wdev.iftype) {
+	case NL80211_IFTYPE_P2P_CLIENT:
+		if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
+			wait_for_disable = true;
+		break;
+
+	case NL80211_IFTYPE_P2P_GO:
+		if (!brcmf_p2p_disable_p2p_if(vif))
+			wait_for_disable = true;
+		break;
+
+	case NL80211_IFTYPE_P2P_DEVICE:
+		if (!p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+			return 0;
+		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+		brcmf_p2p_deinit_discovery(p2p);
+	default:
+		return -ENOTSUPP;
+	}
+
+	clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+	brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+	if (wait_for_disable)
+		wait_for_completion_timeout(&cfg->vif_disabled,
+					    msecs_to_jiffies(500));
+
+	err = 0;
+	if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+		brcmf_vif_clear_mgmt_ies(vif);
+		err = brcmf_p2p_release_p2p_if(vif);
+	}
+	if (!err) {
+		/* wait for firmware event */
+		err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
+							    jiffie_timeout);
+		if (!err)
+			err = -EIO;
+		else
+			err = 0;
+	}
+	if (err)
+		brcmf_remove_interface(vif->ifp);
+
+	brcmf_cfg80211_arm_vif_event(cfg, NULL);
+	if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+		p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+
+	return err;
+}
+
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+{
+	struct brcmf_cfg80211_info *cfg;
+	struct brcmf_cfg80211_vif *vif;
+
+	brcmf_dbg(INFO, "P2P: device interface removed\n");
+	vif = ifp->vif;
+	cfg = wdev_to_cfg(&vif->wdev);
+	cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+	rtnl_lock();
+	cfg80211_unregister_wdev(&vif->wdev);
+	rtnl_unlock();
+	brcmf_free_vif(vif);
+}
+
+int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_cfg80211_vif *vif;
+	int err;
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+	mutex_lock(&cfg->usr_sync);
+	err = brcmf_p2p_enable_discovery(p2p);
+	if (!err)
+		set_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_p2p_info *p2p = &cfg->p2p;
+	struct brcmf_cfg80211_vif *vif;
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+	/* This call can be result of the unregister_wdev call. In that case
+	 * we dont want to do anything anymore. Just return. The config vif
+	 * will have been cleared at this point.
+	 */
+	if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif == vif) {
+		mutex_lock(&cfg->usr_sync);
+		/* Set the discovery state to SCAN */
+		(void)brcmf_p2p_set_discover_state(vif->ifp,
+						   WL_P2P_DISC_ST_SCAN, 0, 0);
+		brcmf_abort_scanning(cfg);
+		clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
+		mutex_unlock(&cfg->usr_sync);
+	}
+}
+
+/**
+ * brcmf_p2p_attach() - attach for P2P.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @p2pdev_forced: create p2p device interface at attach.
+ */
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
+{
+	struct brcmf_p2p_info *p2p;
+	struct brcmf_if *pri_ifp;
+	s32 err = 0;
+	void *err_ptr;
+
+	p2p = &cfg->p2p;
+	p2p->cfg = cfg;
+
+	pri_ifp = brcmf_get_ifp(cfg->pub, 0);
+	p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
+	if (p2pdev_forced) {
+		err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+		if (IS_ERR(err_ptr)) {
+			brcmf_err("P2P device creation failed.\n");
+			err = PTR_ERR(err_ptr);
+		}
+	} else {
+		p2p->p2pdev_dynamically = true;
+	}
+	return err;
+}
+
+/**
+ * brcmf_p2p_detach() - detach P2P.
+ *
+ * @p2p: P2P specific data.
+ */
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
+{
+	struct brcmf_cfg80211_vif *vif;
+
+	vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+	if (vif != NULL) {
+		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+		brcmf_p2p_deinit_discovery(p2p);
+		brcmf_remove_interface(vif->ifp);
+	}
+	/* just set it all to zero */
+	memset(p2p, 0, sizeof(*p2p));
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
new file mode 100644
index 0000000..5d49059
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef WL_CFGP2P_H_
+#define WL_CFGP2P_H_
+
+#include <net/cfg80211.h>
+
+struct brcmf_cfg80211_info;
+
+/**
+ * enum p2p_bss_type - different type of BSS configurations.
+ *
+ * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
+ * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_MAX: used for range checking.
+ */
+enum p2p_bss_type {
+	P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+	P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+	P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_MAX
+};
+
+/**
+ * struct p2p_bss - peer-to-peer bss related information.
+ *
+ * @vif: virtual interface of this P2P bss.
+ * @private_data: TBD
+ */
+struct p2p_bss {
+	struct brcmf_cfg80211_vif *vif;
+	void *private_data;
+};
+
+/**
+ * enum brcmf_p2p_status - P2P specific dongle status.
+ *
+ * @BRCMF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_DEL: NOT-USED?
+ * @BRCMF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle.
+ * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed.
+ * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked.
+ * @BRCMF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing.
+ * @BRCMF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel.
+ * @BRCMF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response.
+ * @BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active.
+ */
+enum brcmf_p2p_status {
+	BRCMF_P2P_STATUS_ENABLED,
+	BRCMF_P2P_STATUS_IF_ADD,
+	BRCMF_P2P_STATUS_IF_DEL,
+	BRCMF_P2P_STATUS_IF_DELETING,
+	BRCMF_P2P_STATUS_IF_CHANGING,
+	BRCMF_P2P_STATUS_IF_CHANGED,
+	BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+	BRCMF_P2P_STATUS_ACTION_TX_NOACK,
+	BRCMF_P2P_STATUS_GO_NEG_PHASE,
+	BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+	BRCMF_P2P_STATUS_SENDING_ACT_FRAME,
+	BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+	BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+	BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL
+};
+
+/**
+ * struct afx_hdl - action frame off channel storage.
+ *
+ * @afx_work: worker thread for searching channel
+ * @act_frm_scan: thread synchronizing struct.
+ * @is_active: channel searching active.
+ * @peer_chan: current channel.
+ * @is_listen: sets mode for afx worker.
+ * @my_listen_chan: this peers listen channel.
+ * @peer_listen_chan: remote peers listen channel.
+ * @tx_dst_addr: mac address where tx af should be sent to.
+ */
+struct afx_hdl {
+	struct work_struct afx_work;
+	struct completion act_frm_scan;
+	bool is_active;
+	s32 peer_chan;
+	bool is_listen;
+	u16 my_listen_chan;
+	u16 peer_listen_chan;
+	u8 tx_dst_addr[ETH_ALEN];
+};
+
+/**
+ * struct brcmf_p2p_info - p2p specific driver information.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @status: status of P2P (see enum brcmf_p2p_status).
+ * @dev_addr: P2P device address.
+ * @int_addr: P2P interface address.
+ * @bss_idx: informate for P2P bss types.
+ * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @ssid: ssid for P2P GO.
+ * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @remain_on_channel: contains copy of struct used by cfg80211.
+ * @remain_on_channel_cookie: cookie counter for remain on channel cmd
+ * @next_af_subtype: expected action frame subtype.
+ * @send_af_done: indication that action frame tx is complete.
+ * @afx_hdl: action frame search handler info.
+ * @af_sent_channel: channel action frame is sent.
+ * @af_tx_sent_jiffies: jiffies time when af tx was transmitted.
+ * @wait_next_af: thread synchronizing struct.
+ * @gon_req_action: about to send go negotiation requets frame.
+ * @block_gon_req_tx: drop tx go negotiation requets frame.
+ * @p2pdev_dynamically: is p2p device if created by module param or supplicant.
+ */
+struct brcmf_p2p_info {
+	struct brcmf_cfg80211_info *cfg;
+	unsigned long status;
+	u8 dev_addr[ETH_ALEN];
+	u8 int_addr[ETH_ALEN];
+	struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+	struct timer_list listen_timer;
+	struct brcmf_ssid ssid;
+	u8 listen_channel;
+	struct ieee80211_channel remain_on_channel;
+	u32 remain_on_channel_cookie;
+	u8 next_af_subtype;
+	struct completion send_af_done;
+	struct afx_hdl afx_hdl;
+	u32 af_sent_channel;
+	unsigned long af_tx_sent_jiffies;
+	struct completion wait_next_af;
+	bool gon_req_action;
+	bool block_gon_req_tx;
+	bool p2pdev_dynamically;
+};
+
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+				       unsigned char name_assign_type,
+				       enum nl80211_iftype type, u32 *flags,
+				       struct vif_params *params);
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+		       enum brcmf_fil_p2p_if_types if_type);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
+int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+			struct cfg80211_scan_request *request,
+			struct brcmf_cfg80211_vif *vif);
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+				struct ieee80211_channel *channel,
+				unsigned int duration, u64 *cookie);
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+				     const struct brcmf_event_msg *e,
+				     void *data);
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp);
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+				     const struct brcmf_event_msg *e,
+				     void *data);
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+					const struct brcmf_event_msg *e,
+					void *data);
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+				 struct net_device *ndev,
+				 struct brcmf_fil_af_params_le *af_params);
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+					   struct brcmf_bss_info_le *bi);
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+					  const struct brcmf_event_msg *e,
+					  void *data);
+#endif /* WL_CFGP2P_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
new file mode 100644
index 0000000..83d8042
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -0,0 +1,2107 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/bcma/bcma.h>
+#include <linux/sched.h>
+#include <asm/unaligned.h>
+
+#include <soc.h>
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <brcm_hw_ids.h>
+
+#include "debug.h"
+#include "bus.h"
+#include "commonring.h"
+#include "msgbuf.h"
+#include "pcie.h"
+#include "firmware.h"
+#include "chip.h"
+
+
+enum brcmf_pcie_state {
+	BRCMFMAC_PCIE_STATE_DOWN,
+	BRCMFMAC_PCIE_STATE_UP
+};
+
+
+#define BRCMF_PCIE_43602_FW_NAME		"brcm/brcmfmac43602-pcie.bin"
+#define BRCMF_PCIE_43602_NVRAM_NAME		"brcm/brcmfmac43602-pcie.txt"
+#define BRCMF_PCIE_4350_FW_NAME			"brcm/brcmfmac4350-pcie.bin"
+#define BRCMF_PCIE_4350_NVRAM_NAME		"brcm/brcmfmac4350-pcie.txt"
+#define BRCMF_PCIE_4356_FW_NAME			"brcm/brcmfmac4356-pcie.bin"
+#define BRCMF_PCIE_4356_NVRAM_NAME		"brcm/brcmfmac4356-pcie.txt"
+#define BRCMF_PCIE_43570_FW_NAME		"brcm/brcmfmac43570-pcie.bin"
+#define BRCMF_PCIE_43570_NVRAM_NAME		"brcm/brcmfmac43570-pcie.txt"
+#define BRCMF_PCIE_4358_FW_NAME			"brcm/brcmfmac4358-pcie.bin"
+#define BRCMF_PCIE_4358_NVRAM_NAME		"brcm/brcmfmac4358-pcie.txt"
+#define BRCMF_PCIE_4365_FW_NAME			"brcm/brcmfmac4365b-pcie.bin"
+#define BRCMF_PCIE_4365_NVRAM_NAME		"brcm/brcmfmac4365b-pcie.txt"
+#define BRCMF_PCIE_4366_FW_NAME			"brcm/brcmfmac4366b-pcie.bin"
+#define BRCMF_PCIE_4366_NVRAM_NAME		"brcm/brcmfmac4366b-pcie.txt"
+#define BRCMF_PCIE_4371_FW_NAME			"brcm/brcmfmac4371-pcie.bin"
+#define BRCMF_PCIE_4371_NVRAM_NAME		"brcm/brcmfmac4371-pcie.txt"
+
+#define BRCMF_PCIE_FW_UP_TIMEOUT		2000 /* msec */
+
+#define BRCMF_PCIE_TCM_MAP_SIZE			(4096 * 1024)
+#define BRCMF_PCIE_REG_MAP_SIZE			(32 * 1024)
+
+/* backplane addres space accessed by BAR0 */
+#define	BRCMF_PCIE_BAR0_WINDOW			0x80
+#define BRCMF_PCIE_BAR0_REG_SIZE		0x1000
+#define	BRCMF_PCIE_BAR0_WRAPPERBASE		0x70
+
+#define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET	0x1000
+#define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET	0x2000
+
+#define BRCMF_PCIE_ARMCR4REG_BANKIDX		0x40
+#define BRCMF_PCIE_ARMCR4REG_BANKPDA		0x4C
+
+#define BRCMF_PCIE_REG_INTSTATUS		0x90
+#define BRCMF_PCIE_REG_INTMASK			0x94
+#define BRCMF_PCIE_REG_SBMBX			0x98
+
+#define BRCMF_PCIE_REG_LINK_STATUS_CTRL		0xBC
+
+#define BRCMF_PCIE_PCIE2REG_INTMASK		0x24
+#define BRCMF_PCIE_PCIE2REG_MAILBOXINT		0x48
+#define BRCMF_PCIE_PCIE2REG_MAILBOXMASK		0x4C
+#define BRCMF_PCIE_PCIE2REG_CONFIGADDR		0x120
+#define BRCMF_PCIE_PCIE2REG_CONFIGDATA		0x124
+#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX		0x140
+
+#define BRCMF_PCIE_GENREV1			1
+#define BRCMF_PCIE_GENREV2			2
+
+#define BRCMF_PCIE2_INTA			0x01
+#define BRCMF_PCIE2_INTB			0x02
+
+#define BRCMF_PCIE_INT_0			0x01
+#define BRCMF_PCIE_INT_1			0x02
+#define BRCMF_PCIE_INT_DEF			(BRCMF_PCIE_INT_0 | \
+						 BRCMF_PCIE_INT_1)
+
+#define BRCMF_PCIE_MB_INT_FN0_0			0x0100
+#define BRCMF_PCIE_MB_INT_FN0_1			0x0200
+#define	BRCMF_PCIE_MB_INT_D2H0_DB0		0x10000
+#define	BRCMF_PCIE_MB_INT_D2H0_DB1		0x20000
+#define	BRCMF_PCIE_MB_INT_D2H1_DB0		0x40000
+#define	BRCMF_PCIE_MB_INT_D2H1_DB1		0x80000
+#define	BRCMF_PCIE_MB_INT_D2H2_DB0		0x100000
+#define	BRCMF_PCIE_MB_INT_D2H2_DB1		0x200000
+#define	BRCMF_PCIE_MB_INT_D2H3_DB0		0x400000
+#define	BRCMF_PCIE_MB_INT_D2H3_DB1		0x800000
+
+#define BRCMF_PCIE_MB_INT_D2H_DB		(BRCMF_PCIE_MB_INT_D2H0_DB0 | \
+						 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
+						 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
+						 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
+						 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
+						 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
+						 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
+						 BRCMF_PCIE_MB_INT_D2H3_DB1)
+
+#define BRCMF_PCIE_MIN_SHARED_VERSION		5
+#define BRCMF_PCIE_MAX_SHARED_VERSION		5
+#define BRCMF_PCIE_SHARED_VERSION_MASK		0x00FF
+#define BRCMF_PCIE_SHARED_DMA_INDEX		0x10000
+#define BRCMF_PCIE_SHARED_DMA_2B_IDX		0x100000
+
+#define BRCMF_PCIE_FLAGS_HTOD_SPLIT		0x4000
+#define BRCMF_PCIE_FLAGS_DTOH_SPLIT		0x8000
+
+#define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET	34
+#define BRCMF_SHARED_RING_BASE_OFFSET		52
+#define BRCMF_SHARED_RX_DATAOFFSET_OFFSET	36
+#define BRCMF_SHARED_CONSOLE_ADDR_OFFSET	20
+#define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET	40
+#define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET	44
+#define BRCMF_SHARED_RING_INFO_ADDR_OFFSET	48
+#define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET	52
+#define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET	56
+#define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET	64
+#define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET	68
+
+#define BRCMF_RING_H2D_RING_COUNT_OFFSET	0
+#define BRCMF_RING_D2H_RING_COUNT_OFFSET	1
+#define BRCMF_RING_H2D_RING_MEM_OFFSET		4
+#define BRCMF_RING_H2D_RING_STATE_OFFSET	8
+
+#define BRCMF_RING_MEM_BASE_ADDR_OFFSET		8
+#define BRCMF_RING_MAX_ITEM_OFFSET		4
+#define BRCMF_RING_LEN_ITEMS_OFFSET		6
+#define BRCMF_RING_MEM_SZ			16
+#define BRCMF_RING_STATE_SZ			8
+
+#define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET	4
+#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET	8
+#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET	12
+#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET	16
+#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET	20
+#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET	28
+#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET	36
+#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET	44
+#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET	0
+#define BRCMF_SHARED_RING_MAX_SUB_QUEUES	52
+
+#define BRCMF_DEF_MAX_RXBUFPOST			255
+
+#define BRCMF_CONSOLE_BUFADDR_OFFSET		8
+#define BRCMF_CONSOLE_BUFSIZE_OFFSET		12
+#define BRCMF_CONSOLE_WRITEIDX_OFFSET		16
+
+#define BRCMF_DMA_D2H_SCRATCH_BUF_LEN		8
+#define BRCMF_DMA_D2H_RINGUPD_BUF_LEN		1024
+
+#define BRCMF_D2H_DEV_D3_ACK			0x00000001
+#define BRCMF_D2H_DEV_DS_ENTER_REQ		0x00000002
+#define BRCMF_D2H_DEV_DS_EXIT_NOTE		0x00000004
+
+#define BRCMF_H2D_HOST_D3_INFORM		0x00000001
+#define BRCMF_H2D_HOST_DS_ACK			0x00000002
+#define BRCMF_H2D_HOST_D0_INFORM_IN_USE		0x00000008
+#define BRCMF_H2D_HOST_D0_INFORM		0x00000010
+
+#define BRCMF_PCIE_MBDATA_TIMEOUT		2000
+
+#define BRCMF_PCIE_CFGREG_STATUS_CMD		0x4
+#define BRCMF_PCIE_CFGREG_PM_CSR		0x4C
+#define BRCMF_PCIE_CFGREG_MSI_CAP		0x58
+#define BRCMF_PCIE_CFGREG_MSI_ADDR_L		0x5C
+#define BRCMF_PCIE_CFGREG_MSI_ADDR_H		0x60
+#define BRCMF_PCIE_CFGREG_MSI_DATA		0x64
+#define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL	0xBC
+#define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2	0xDC
+#define BRCMF_PCIE_CFGREG_RBAR_CTRL		0x228
+#define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1	0x248
+#define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG	0x4E0
+#define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG	0x4F4
+#define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB	3
+
+
+MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4350_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4350_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME);
+
+
+struct brcmf_pcie_console {
+	u32 base_addr;
+	u32 buf_addr;
+	u32 bufsize;
+	u32 read_idx;
+	u8 log_str[256];
+	u8 log_idx;
+};
+
+struct brcmf_pcie_shared_info {
+	u32 tcm_base_address;
+	u32 flags;
+	struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
+	struct brcmf_pcie_ringbuf *flowrings;
+	u16 max_rxbufpost;
+	u32 nrof_flowrings;
+	u32 rx_dataoffset;
+	u32 htod_mb_data_addr;
+	u32 dtoh_mb_data_addr;
+	u32 ring_info_addr;
+	struct brcmf_pcie_console console;
+	void *scratch;
+	dma_addr_t scratch_dmahandle;
+	void *ringupd;
+	dma_addr_t ringupd_dmahandle;
+};
+
+struct brcmf_pcie_core_info {
+	u32 base;
+	u32 wrapbase;
+};
+
+struct brcmf_pciedev_info {
+	enum brcmf_pcie_state state;
+	bool in_irq;
+	bool irq_requested;
+	struct pci_dev *pdev;
+	char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+	char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+	void __iomem *regs;
+	void __iomem *tcm;
+	u32 tcm_size;
+	u32 ram_base;
+	u32 ram_size;
+	struct brcmf_chip *ci;
+	u32 coreid;
+	u32 generic_corerev;
+	struct brcmf_pcie_shared_info shared;
+	void (*ringbell)(struct brcmf_pciedev_info *devinfo);
+	wait_queue_head_t mbdata_resp_wait;
+	bool mbdata_completed;
+	bool irq_allocated;
+	bool wowl_enabled;
+	u8 dma_idx_sz;
+	void *idxbuf;
+	u32 idxbuf_sz;
+	dma_addr_t idxbuf_dmahandle;
+	u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
+	void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+			  u16 value);
+};
+
+struct brcmf_pcie_ringbuf {
+	struct brcmf_commonring commonring;
+	dma_addr_t dma_handle;
+	u32 w_idx_addr;
+	u32 r_idx_addr;
+	struct brcmf_pciedev_info *devinfo;
+	u8 id;
+};
+
+
+static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
+	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
+	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
+	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
+	BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
+	BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
+};
+
+static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
+	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
+	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
+	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
+	BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
+	BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
+};
+
+
+static u32
+brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+{
+	void __iomem *address = devinfo->regs + reg_offset;
+
+	return (ioread32(address));
+}
+
+
+static void
+brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
+		       u32 value)
+{
+	void __iomem *address = devinfo->regs + reg_offset;
+
+	iowrite32(value, address);
+}
+
+
+static u8
+brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+
+	return (ioread8(address));
+}
+
+
+static u16
+brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+
+	return (ioread16(address));
+}
+
+
+static void
+brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+		       u16 value)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+
+	iowrite16(value, address);
+}
+
+
+static u16
+brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+	u16 *address = devinfo->idxbuf + mem_offset;
+
+	return (*(address));
+}
+
+
+static void
+brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+		     u16 value)
+{
+	u16 *address = devinfo->idxbuf + mem_offset;
+
+	*(address) = value;
+}
+
+
+static u32
+brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+
+	return (ioread32(address));
+}
+
+
+static void
+brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+		       u32 value)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+
+	iowrite32(value, address);
+}
+
+
+static u32
+brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
+
+	return (ioread32(addr));
+}
+
+
+static void
+brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+		       u32 value)
+{
+	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
+
+	iowrite32(value, addr);
+}
+
+
+static void
+brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+			  void *srcaddr, u32 len)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+	__le32 *src32;
+	__le16 *src16;
+	u8 *src8;
+
+	if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
+		if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
+			src8 = (u8 *)srcaddr;
+			while (len) {
+				iowrite8(*src8, address);
+				address++;
+				src8++;
+				len--;
+			}
+		} else {
+			len = len / 2;
+			src16 = (__le16 *)srcaddr;
+			while (len) {
+				iowrite16(le16_to_cpu(*src16), address);
+				address += 2;
+				src16++;
+				len--;
+			}
+		}
+	} else {
+		len = len / 4;
+		src32 = (__le32 *)srcaddr;
+		while (len) {
+			iowrite32(le32_to_cpu(*src32), address);
+			address += 4;
+			src32++;
+			len--;
+		}
+	}
+}
+
+
+static void
+brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+			  void *dstaddr, u32 len)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+	__le32 *dst32;
+	__le16 *dst16;
+	u8 *dst8;
+
+	if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
+		if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
+			dst8 = (u8 *)dstaddr;
+			while (len) {
+				*dst8 = ioread8(address);
+				address++;
+				dst8++;
+				len--;
+			}
+		} else {
+			len = len / 2;
+			dst16 = (__le16 *)dstaddr;
+			while (len) {
+				*dst16 = cpu_to_le16(ioread16(address));
+				address += 2;
+				dst16++;
+				len--;
+			}
+		}
+	} else {
+		len = len / 4;
+		dst32 = (__le32 *)dstaddr;
+		while (len) {
+			*dst32 = cpu_to_le32(ioread32(address));
+			address += 4;
+			dst32++;
+			len--;
+		}
+	}
+}
+
+
+#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
+		CHIPCREGOFFS(reg), value)
+
+
+static void
+brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
+{
+	const struct pci_dev *pdev = devinfo->pdev;
+	struct brcmf_core *core;
+	u32 bar0_win;
+
+	core = brcmf_chip_get_core(devinfo->ci, coreid);
+	if (core) {
+		bar0_win = core->base;
+		pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
+		if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
+					  &bar0_win) == 0) {
+			if (bar0_win != core->base) {
+				bar0_win = core->base;
+				pci_write_config_dword(pdev,
+						       BRCMF_PCIE_BAR0_WINDOW,
+						       bar0_win);
+			}
+		}
+	} else {
+		brcmf_err("Unsupported core selected %x\n", coreid);
+	}
+}
+
+
+static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
+{
+	struct brcmf_core *core;
+	u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
+			     BRCMF_PCIE_CFGREG_PM_CSR,
+			     BRCMF_PCIE_CFGREG_MSI_CAP,
+			     BRCMF_PCIE_CFGREG_MSI_ADDR_L,
+			     BRCMF_PCIE_CFGREG_MSI_ADDR_H,
+			     BRCMF_PCIE_CFGREG_MSI_DATA,
+			     BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
+			     BRCMF_PCIE_CFGREG_RBAR_CTRL,
+			     BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
+			     BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
+			     BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
+	u32 i;
+	u32 val;
+	u32 lsc;
+
+	if (!devinfo->ci)
+		return;
+
+	/* Disable ASPM */
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+	pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
+			      &lsc);
+	val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
+			       val);
+
+	/* Watchdog reset */
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
+	WRITECC32(devinfo, watchdog, 4);
+	msleep(100);
+
+	/* Restore ASPM */
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
+			       lsc);
+
+	core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+	if (core->rev <= 13) {
+		for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
+			brcmf_pcie_write_reg32(devinfo,
+					       BRCMF_PCIE_PCIE2REG_CONFIGADDR,
+					       cfg_offset[i]);
+			val = brcmf_pcie_read_reg32(devinfo,
+				BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+			brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
+				  cfg_offset[i], val);
+			brcmf_pcie_write_reg32(devinfo,
+					       BRCMF_PCIE_PCIE2REG_CONFIGDATA,
+					       val);
+		}
+	}
+}
+
+
+static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
+{
+	u32 config;
+
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+	/* BAR1 window may not be sized properly */
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
+	config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
+
+	device_wakeup_enable(&devinfo->pdev->dev);
+}
+
+
+static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
+{
+	if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
+		brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
+				       5);
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
+				       0);
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
+				       7);
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
+				       0);
+	}
+	return 0;
+}
+
+
+static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
+					  u32 resetintr)
+{
+	struct brcmf_core *core;
+
+	if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
+		core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
+		brcmf_chip_resetcore(core, 0, 0, 0);
+	}
+
+	return !brcmf_chip_set_active(devinfo->ci, resetintr);
+}
+
+
+static int
+brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
+{
+	struct brcmf_pcie_shared_info *shared;
+	u32 addr;
+	u32 cur_htod_mb_data;
+	u32 i;
+
+	shared = &devinfo->shared;
+	addr = shared->htod_mb_data_addr;
+	cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	if (cur_htod_mb_data != 0)
+		brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
+			  cur_htod_mb_data);
+
+	i = 0;
+	while (cur_htod_mb_data != 0) {
+		msleep(10);
+		i++;
+		if (i > 100)
+			return -EIO;
+		cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
+	}
+
+	brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
+
+	return 0;
+}
+
+
+static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
+{
+	struct brcmf_pcie_shared_info *shared;
+	u32 addr;
+	u32 dtoh_mb_data;
+
+	shared = &devinfo->shared;
+	addr = shared->dtoh_mb_data_addr;
+	dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	if (!dtoh_mb_data)
+		return;
+
+	brcmf_pcie_write_tcm32(devinfo, addr, 0);
+
+	brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
+	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ)  {
+		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
+		brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
+		brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
+	}
+	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
+		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
+	if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
+		brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
+		if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
+			devinfo->mbdata_completed = true;
+			wake_up(&devinfo->mbdata_resp_wait);
+		}
+	}
+}
+
+
+static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
+{
+	struct brcmf_pcie_shared_info *shared;
+	struct brcmf_pcie_console *console;
+	u32 addr;
+
+	shared = &devinfo->shared;
+	console = &shared->console;
+	addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
+	console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
+	console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+	addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
+	console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
+		  console->base_addr, console->buf_addr, console->bufsize);
+}
+
+
+static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
+{
+	struct brcmf_pcie_console *console;
+	u32 addr;
+	u8 ch;
+	u32 newidx;
+
+	if (!BRCMF_FWCON_ON())
+		return;
+
+	console = &devinfo->shared.console;
+	addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
+	newidx = brcmf_pcie_read_tcm32(devinfo, addr);
+	while (newidx != console->read_idx) {
+		addr = console->buf_addr + console->read_idx;
+		ch = brcmf_pcie_read_tcm8(devinfo, addr);
+		console->read_idx++;
+		if (console->read_idx == console->bufsize)
+			console->read_idx = 0;
+		if (ch == '\r')
+			continue;
+		console->log_str[console->log_idx] = ch;
+		console->log_idx++;
+		if ((ch != '\n') &&
+		    (console->log_idx == (sizeof(console->log_str) - 2))) {
+			ch = '\n';
+			console->log_str[console->log_idx] = ch;
+			console->log_idx++;
+		}
+		if (ch == '\n') {
+			console->log_str[console->log_idx] = 0;
+			pr_debug("CONSOLE: %s", console->log_str);
+			console->log_idx = 0;
+		}
+	}
+}
+
+
+static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
+{
+	u32 reg_value;
+
+	brcmf_dbg(PCIE, "RING !\n");
+	reg_value = brcmf_pcie_read_reg32(devinfo,
+					  BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+	reg_value |= BRCMF_PCIE2_INTB;
+	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+			       reg_value);
+}
+
+
+static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
+{
+	brcmf_dbg(PCIE, "RING !\n");
+	/* Any arbitrary value will do, lets use 1 */
+	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
+}
+
+
+static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
+{
+	if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
+		pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
+				       0);
+	else
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+				       0);
+}
+
+
+static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
+{
+	if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
+		pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
+				       BRCMF_PCIE_INT_DEF);
+	else
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+				       BRCMF_PCIE_MB_INT_D2H_DB |
+				       BRCMF_PCIE_MB_INT_FN0_0 |
+				       BRCMF_PCIE_MB_INT_FN0_1);
+}
+
+
+static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+	u32 status;
+
+	status = 0;
+	pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
+	if (status) {
+		brcmf_pcie_intr_disable(devinfo);
+		brcmf_dbg(PCIE, "Enter\n");
+		return IRQ_WAKE_THREAD;
+	}
+	return IRQ_NONE;
+}
+
+
+static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+
+	if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
+		brcmf_pcie_intr_disable(devinfo);
+		brcmf_dbg(PCIE, "Enter\n");
+		return IRQ_WAKE_THREAD;
+	}
+	return IRQ_NONE;
+}
+
+
+static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+	const struct pci_dev *pdev = devinfo->pdev;
+	u32 status;
+
+	devinfo->in_irq = true;
+	status = 0;
+	pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
+	brcmf_dbg(PCIE, "Enter %x\n", status);
+	if (status) {
+		pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
+		if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+			brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
+	}
+	if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+		brcmf_pcie_intr_enable(devinfo);
+	devinfo->in_irq = false;
+	return IRQ_HANDLED;
+}
+
+
+static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
+	u32 status;
+
+	devinfo->in_irq = true;
+	status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+	brcmf_dbg(PCIE, "Enter %x\n", status);
+	if (status) {
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+				       status);
+		if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
+			      BRCMF_PCIE_MB_INT_FN0_1))
+			brcmf_pcie_handle_mb_data(devinfo);
+		if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
+			if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+				brcmf_proto_msgbuf_rx_trigger(
+							&devinfo->pdev->dev);
+		}
+	}
+	brcmf_pcie_bus_console_read(devinfo);
+	if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+		brcmf_pcie_intr_enable(devinfo);
+	devinfo->in_irq = false;
+	return IRQ_HANDLED;
+}
+
+
+static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
+{
+	struct pci_dev *pdev;
+
+	pdev = devinfo->pdev;
+
+	brcmf_pcie_intr_disable(devinfo);
+
+	brcmf_dbg(PCIE, "Enter\n");
+	/* is it a v1 or v2 implementation */
+	devinfo->irq_requested = false;
+	pci_enable_msi(pdev);
+	if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
+		if (request_threaded_irq(pdev->irq,
+					 brcmf_pcie_quick_check_isr_v1,
+					 brcmf_pcie_isr_thread_v1,
+					 IRQF_SHARED, "brcmf_pcie_intr",
+					 devinfo)) {
+			pci_disable_msi(pdev);
+			brcmf_err("Failed to request IRQ %d\n", pdev->irq);
+			return -EIO;
+		}
+	} else {
+		if (request_threaded_irq(pdev->irq,
+					 brcmf_pcie_quick_check_isr_v2,
+					 brcmf_pcie_isr_thread_v2,
+					 IRQF_SHARED, "brcmf_pcie_intr",
+					 devinfo)) {
+			pci_disable_msi(pdev);
+			brcmf_err("Failed to request IRQ %d\n", pdev->irq);
+			return -EIO;
+		}
+	}
+	devinfo->irq_requested = true;
+	devinfo->irq_allocated = true;
+	return 0;
+}
+
+
+static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
+{
+	struct pci_dev *pdev;
+	u32 status;
+	u32 count;
+
+	if (!devinfo->irq_allocated)
+		return;
+
+	pdev = devinfo->pdev;
+
+	brcmf_pcie_intr_disable(devinfo);
+	if (!devinfo->irq_requested)
+		return;
+	devinfo->irq_requested = false;
+	free_irq(pdev->irq, devinfo);
+	pci_disable_msi(pdev);
+
+	msleep(50);
+	count = 0;
+	while ((devinfo->in_irq) && (count < 20)) {
+		msleep(50);
+		count++;
+	}
+	if (devinfo->in_irq)
+		brcmf_err("Still in IRQ (processing) !!!\n");
+
+	if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
+		status = 0;
+		pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
+		pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
+	} else {
+		status = brcmf_pcie_read_reg32(devinfo,
+					       BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+				       status);
+	}
+	devinfo->irq_allocated = false;
+}
+
+
+static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
+{
+	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+	struct brcmf_pciedev_info *devinfo = ring->devinfo;
+	struct brcmf_commonring *commonring = &ring->commonring;
+
+	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+		return -EIO;
+
+	brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
+		  commonring->w_ptr, ring->id);
+
+	devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
+
+	return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
+{
+	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+	struct brcmf_pciedev_info *devinfo = ring->devinfo;
+	struct brcmf_commonring *commonring = &ring->commonring;
+
+	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+		return -EIO;
+
+	brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
+		  commonring->r_ptr, ring->id);
+
+	devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
+
+	return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
+{
+	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+	struct brcmf_pciedev_info *devinfo = ring->devinfo;
+
+	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+		return -EIO;
+
+	devinfo->ringbell(devinfo);
+
+	return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
+{
+	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+	struct brcmf_pciedev_info *devinfo = ring->devinfo;
+	struct brcmf_commonring *commonring = &ring->commonring;
+
+	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+		return -EIO;
+
+	commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
+
+	brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
+		  commonring->w_ptr, ring->id);
+
+	return 0;
+}
+
+
+static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
+{
+	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
+	struct brcmf_pciedev_info *devinfo = ring->devinfo;
+	struct brcmf_commonring *commonring = &ring->commonring;
+
+	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+		return -EIO;
+
+	commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
+
+	brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
+		  commonring->r_ptr, ring->id);
+
+	return 0;
+}
+
+
+static void *
+brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
+				     u32 size, u32 tcm_dma_phys_addr,
+				     dma_addr_t *dma_handle)
+{
+	void *ring;
+	u64 address;
+
+	ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
+				  GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	address = (u64)*dma_handle;
+	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
+			       address & 0xffffffff);
+	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
+
+	memset(ring, 0, size);
+
+	return (ring);
+}
+
+
+static struct brcmf_pcie_ringbuf *
+brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
+			      u32 tcm_ring_phys_addr)
+{
+	void *dma_buf;
+	dma_addr_t dma_handle;
+	struct brcmf_pcie_ringbuf *ring;
+	u32 size;
+	u32 addr;
+
+	size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
+	dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
+			tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
+			&dma_handle);
+	if (!dma_buf)
+		return NULL;
+
+	addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
+	brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
+	addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
+	brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring) {
+		dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
+				  dma_handle);
+		return NULL;
+	}
+	brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
+				brcmf_ring_itemsize[ring_id], dma_buf);
+	ring->dma_handle = dma_handle;
+	ring->devinfo = devinfo;
+	brcmf_commonring_register_cb(&ring->commonring,
+				     brcmf_pcie_ring_mb_ring_bell,
+				     brcmf_pcie_ring_mb_update_rptr,
+				     brcmf_pcie_ring_mb_update_wptr,
+				     brcmf_pcie_ring_mb_write_rptr,
+				     brcmf_pcie_ring_mb_write_wptr, ring);
+
+	return (ring);
+}
+
+
+static void brcmf_pcie_release_ringbuffer(struct device *dev,
+					  struct brcmf_pcie_ringbuf *ring)
+{
+	void *dma_buf;
+	u32 size;
+
+	if (!ring)
+		return;
+
+	dma_buf = ring->commonring.buf_addr;
+	if (dma_buf) {
+		size = ring->commonring.depth * ring->commonring.item_len;
+		dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
+	}
+	kfree(ring);
+}
+
+
+static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
+{
+	u32 i;
+
+	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
+		brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
+					      devinfo->shared.commonrings[i]);
+		devinfo->shared.commonrings[i] = NULL;
+	}
+	kfree(devinfo->shared.flowrings);
+	devinfo->shared.flowrings = NULL;
+	if (devinfo->idxbuf) {
+		dma_free_coherent(&devinfo->pdev->dev,
+				  devinfo->idxbuf_sz,
+				  devinfo->idxbuf,
+				  devinfo->idxbuf_dmahandle);
+		devinfo->idxbuf = NULL;
+	}
+}
+
+
+static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
+{
+	struct brcmf_pcie_ringbuf *ring;
+	struct brcmf_pcie_ringbuf *rings;
+	u32 ring_addr;
+	u32 d2h_w_idx_ptr;
+	u32 d2h_r_idx_ptr;
+	u32 h2d_w_idx_ptr;
+	u32 h2d_r_idx_ptr;
+	u32 addr;
+	u32 ring_mem_ptr;
+	u32 i;
+	u64 address;
+	u32 bufsz;
+	u16 max_sub_queues;
+	u8 idx_offset;
+
+	ring_addr = devinfo->shared.ring_info_addr;
+	brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
+	addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+	max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+
+	if (devinfo->dma_idx_sz != 0) {
+		bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
+			devinfo->dma_idx_sz * 2;
+		devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
+						     &devinfo->idxbuf_dmahandle,
+						     GFP_KERNEL);
+		if (!devinfo->idxbuf)
+			devinfo->dma_idx_sz = 0;
+	}
+
+	if (devinfo->dma_idx_sz == 0) {
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+		d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+		d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+		h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+		h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+		idx_offset = sizeof(u32);
+		devinfo->write_ptr = brcmf_pcie_write_tcm16;
+		devinfo->read_ptr = brcmf_pcie_read_tcm16;
+		brcmf_dbg(PCIE, "Using TCM indices\n");
+	} else {
+		memset(devinfo->idxbuf, 0, bufsz);
+		devinfo->idxbuf_sz = bufsz;
+		idx_offset = devinfo->dma_idx_sz;
+		devinfo->write_ptr = brcmf_pcie_write_idx;
+		devinfo->read_ptr = brcmf_pcie_read_idx;
+
+		h2d_w_idx_ptr = 0;
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
+		address = (u64)devinfo->idxbuf_dmahandle;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+		h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
+		addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
+		address += max_sub_queues * idx_offset;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+		d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
+		address += max_sub_queues * idx_offset;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+		d2h_r_idx_ptr = d2h_w_idx_ptr +
+				BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+		addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
+		address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+		brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+		brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+		brcmf_dbg(PCIE, "Using host memory indices\n");
+	}
+
+	addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
+	ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
+		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
+		if (!ring)
+			goto fail;
+		ring->w_idx_addr = h2d_w_idx_ptr;
+		ring->r_idx_addr = h2d_r_idx_ptr;
+		ring->id = i;
+		devinfo->shared.commonrings[i] = ring;
+
+		h2d_w_idx_ptr += idx_offset;
+		h2d_r_idx_ptr += idx_offset;
+		ring_mem_ptr += BRCMF_RING_MEM_SZ;
+	}
+
+	for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
+	     i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
+		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
+		if (!ring)
+			goto fail;
+		ring->w_idx_addr = d2h_w_idx_ptr;
+		ring->r_idx_addr = d2h_r_idx_ptr;
+		ring->id = i;
+		devinfo->shared.commonrings[i] = ring;
+
+		d2h_w_idx_ptr += idx_offset;
+		d2h_r_idx_ptr += idx_offset;
+		ring_mem_ptr += BRCMF_RING_MEM_SZ;
+	}
+
+	devinfo->shared.nrof_flowrings =
+			max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
+	rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
+			GFP_KERNEL);
+	if (!rings)
+		goto fail;
+
+	brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
+		  devinfo->shared.nrof_flowrings);
+
+	for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
+		ring = &rings[i];
+		ring->devinfo = devinfo;
+		ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
+		brcmf_commonring_register_cb(&ring->commonring,
+					     brcmf_pcie_ring_mb_ring_bell,
+					     brcmf_pcie_ring_mb_update_rptr,
+					     brcmf_pcie_ring_mb_update_wptr,
+					     brcmf_pcie_ring_mb_write_rptr,
+					     brcmf_pcie_ring_mb_write_wptr,
+					     ring);
+		ring->w_idx_addr = h2d_w_idx_ptr;
+		ring->r_idx_addr = h2d_r_idx_ptr;
+		h2d_w_idx_ptr += idx_offset;
+		h2d_r_idx_ptr += idx_offset;
+	}
+	devinfo->shared.flowrings = rings;
+
+	return 0;
+
+fail:
+	brcmf_err("Allocating ring buffers failed\n");
+	brcmf_pcie_release_ringbuffers(devinfo);
+	return -ENOMEM;
+}
+
+
+static void
+brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
+{
+	if (devinfo->shared.scratch)
+		dma_free_coherent(&devinfo->pdev->dev,
+				  BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+				  devinfo->shared.scratch,
+				  devinfo->shared.scratch_dmahandle);
+	if (devinfo->shared.ringupd)
+		dma_free_coherent(&devinfo->pdev->dev,
+				  BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+				  devinfo->shared.ringupd,
+				  devinfo->shared.ringupd_dmahandle);
+}
+
+static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
+{
+	u64 address;
+	u32 addr;
+
+	devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
+		BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+		&devinfo->shared.scratch_dmahandle, GFP_KERNEL);
+	if (!devinfo->shared.scratch)
+		goto fail;
+
+	memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+
+	addr = devinfo->shared.tcm_base_address +
+	       BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
+	address = (u64)devinfo->shared.scratch_dmahandle;
+	brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+	brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+	addr = devinfo->shared.tcm_base_address +
+	       BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
+	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+
+	devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
+		BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+		&devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
+	if (!devinfo->shared.ringupd)
+		goto fail;
+
+	memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+
+	addr = devinfo->shared.tcm_base_address +
+	       BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
+	address = (u64)devinfo->shared.ringupd_dmahandle;
+	brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+	brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+	addr = devinfo->shared.tcm_base_address +
+	       BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
+	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+	return 0;
+
+fail:
+	brcmf_err("Allocating scratch buffers failed\n");
+	brcmf_pcie_release_scratchbuffers(devinfo);
+	return -ENOMEM;
+}
+
+
+static void brcmf_pcie_down(struct device *dev)
+{
+}
+
+
+static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
+{
+	return 0;
+}
+
+
+static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
+				uint len)
+{
+	return 0;
+}
+
+
+static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
+				uint len)
+{
+	return 0;
+}
+
+
+static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
+	devinfo->wowl_enabled = enabled;
+	if (enabled)
+		device_set_wakeup_enable(&devinfo->pdev->dev, true);
+	else
+		device_set_wakeup_enable(&devinfo->pdev->dev, false);
+}
+
+
+static size_t brcmf_pcie_get_ramsize(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	return devinfo->ci->ramsize - devinfo->ci->srsize;
+}
+
+
+static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
+	brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
+	return 0;
+}
+
+
+static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+	.txdata = brcmf_pcie_tx,
+	.stop = brcmf_pcie_down,
+	.txctl = brcmf_pcie_tx_ctlpkt,
+	.rxctl = brcmf_pcie_rx_ctlpkt,
+	.wowl_config = brcmf_pcie_wowl_config,
+	.get_ramsize = brcmf_pcie_get_ramsize,
+	.get_memdump = brcmf_pcie_get_memdump,
+};
+
+
+static int
+brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
+			       u32 sharedram_addr)
+{
+	struct brcmf_pcie_shared_info *shared;
+	u32 addr;
+	u32 version;
+
+	shared = &devinfo->shared;
+	shared->tcm_base_address = sharedram_addr;
+
+	shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
+	version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
+	brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
+	if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
+	    (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
+		brcmf_err("Unsupported PCIE version %d\n", version);
+		return -EINVAL;
+	}
+
+	/* check firmware support dma indicies */
+	if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
+		if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
+			devinfo->dma_idx_sz = sizeof(u16);
+		else
+			devinfo->dma_idx_sz = sizeof(u32);
+	}
+
+	addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
+	shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
+	if (shared->max_rxbufpost == 0)
+		shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
+
+	addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
+	shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
+	shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
+	shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
+	shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
+
+	brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
+		  shared->max_rxbufpost, shared->rx_dataoffset);
+
+	brcmf_pcie_bus_console_init(devinfo);
+
+	return 0;
+}
+
+
+static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
+{
+	char *fw_name;
+	char *nvram_name;
+	uint fw_len, nv_len;
+	char end;
+
+	brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
+		  devinfo->ci->chiprev);
+
+	switch (devinfo->ci->chip) {
+	case BRCM_CC_43602_CHIP_ID:
+		fw_name = BRCMF_PCIE_43602_FW_NAME;
+		nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
+		break;
+	case BRCM_CC_4350_CHIP_ID:
+		fw_name = BRCMF_PCIE_4350_FW_NAME;
+		nvram_name = BRCMF_PCIE_4350_NVRAM_NAME;
+		break;
+	case BRCM_CC_4356_CHIP_ID:
+		fw_name = BRCMF_PCIE_4356_FW_NAME;
+		nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
+		break;
+	case BRCM_CC_43567_CHIP_ID:
+	case BRCM_CC_43569_CHIP_ID:
+	case BRCM_CC_43570_CHIP_ID:
+		fw_name = BRCMF_PCIE_43570_FW_NAME;
+		nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
+		break;
+	case BRCM_CC_4358_CHIP_ID:
+		fw_name = BRCMF_PCIE_4358_FW_NAME;
+		nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
+		break;
+	case BRCM_CC_4365_CHIP_ID:
+		fw_name = BRCMF_PCIE_4365_FW_NAME;
+		nvram_name = BRCMF_PCIE_4365_NVRAM_NAME;
+		break;
+	case BRCM_CC_4366_CHIP_ID:
+		fw_name = BRCMF_PCIE_4366_FW_NAME;
+		nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
+		break;
+	case BRCM_CC_4371_CHIP_ID:
+		fw_name = BRCMF_PCIE_4371_FW_NAME;
+		nvram_name = BRCMF_PCIE_4371_NVRAM_NAME;
+		break;
+	default:
+		brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
+		return -ENODEV;
+	}
+
+	fw_len = sizeof(devinfo->fw_name) - 1;
+	nv_len = sizeof(devinfo->nvram_name) - 1;
+	/* check if firmware path is provided by module parameter */
+	if (brcmf_firmware_path[0] != '\0') {
+		strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
+		strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
+		fw_len -= strlen(devinfo->fw_name);
+		nv_len -= strlen(devinfo->nvram_name);
+
+		end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
+		if (end != '/') {
+			strncat(devinfo->fw_name, "/", fw_len);
+			strncat(devinfo->nvram_name, "/", nv_len);
+			fw_len--;
+			nv_len--;
+		}
+	}
+	strncat(devinfo->fw_name, fw_name, fw_len);
+	strncat(devinfo->nvram_name, nvram_name, nv_len);
+
+	return 0;
+}
+
+
+static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+					const struct firmware *fw, void *nvram,
+					u32 nvram_len)
+{
+	u32 sharedram_addr;
+	u32 sharedram_addr_written;
+	u32 loop_counter;
+	int err;
+	u32 address;
+	u32 resetintr;
+
+	devinfo->ringbell = brcmf_pcie_ringbell_v2;
+	devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
+
+	brcmf_dbg(PCIE, "Halt ARM.\n");
+	err = brcmf_pcie_enter_download_state(devinfo);
+	if (err)
+		return err;
+
+	brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
+	brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
+				  (void *)fw->data, fw->size);
+
+	resetintr = get_unaligned_le32(fw->data);
+	release_firmware(fw);
+
+	/* reset last 4 bytes of RAM address. to be used for shared
+	 * area. This identifies when FW is running
+	 */
+	brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
+
+	if (nvram) {
+		brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
+		address = devinfo->ci->rambase + devinfo->ci->ramsize -
+			  nvram_len;
+		brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+		brcmf_fw_nvram_free(nvram);
+	} else {
+		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
+			  devinfo->nvram_name);
+	}
+
+	sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
+						       devinfo->ci->ramsize -
+						       4);
+	brcmf_dbg(PCIE, "Bring ARM in running state\n");
+	err = brcmf_pcie_exit_download_state(devinfo, resetintr);
+	if (err)
+		return err;
+
+	brcmf_dbg(PCIE, "Wait for FW init\n");
+	sharedram_addr = sharedram_addr_written;
+	loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
+	while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
+		msleep(50);
+		sharedram_addr = brcmf_pcie_read_ram32(devinfo,
+						       devinfo->ci->ramsize -
+						       4);
+		loop_counter--;
+	}
+	if (sharedram_addr == sharedram_addr_written) {
+		brcmf_err("FW failed to initialize\n");
+		return -ENODEV;
+	}
+	brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
+
+	return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
+}
+
+
+static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
+{
+	struct pci_dev *pdev;
+	int err;
+	phys_addr_t  bar0_addr, bar1_addr;
+	ulong bar1_size;
+
+	pdev = devinfo->pdev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		brcmf_err("pci_enable_device failed err=%d\n", err);
+		return err;
+	}
+
+	pci_set_master(pdev);
+
+	/* Bar-0 mapped address */
+	bar0_addr = pci_resource_start(pdev, 0);
+	/* Bar-1 mapped address */
+	bar1_addr = pci_resource_start(pdev, 2);
+	/* read Bar-1 mapped memory range */
+	bar1_size = pci_resource_len(pdev, 2);
+	if ((bar1_size == 0) || (bar1_addr == 0)) {
+		brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
+			  bar1_size, (unsigned long long)bar1_addr);
+		return -EINVAL;
+	}
+
+	devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
+	devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
+	devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
+
+	if (!devinfo->regs || !devinfo->tcm) {
+		brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
+			  devinfo->tcm);
+		return -EINVAL;
+	}
+	brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
+		  devinfo->regs, (unsigned long long)bar0_addr);
+	brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
+		  devinfo->tcm, (unsigned long long)bar1_addr);
+
+	return 0;
+}
+
+
+static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
+{
+	if (devinfo->tcm)
+		iounmap(devinfo->tcm);
+	if (devinfo->regs)
+		iounmap(devinfo->regs);
+
+	pci_disable_device(devinfo->pdev);
+}
+
+
+static int brcmf_pcie_attach_bus(struct device *dev)
+{
+	int ret;
+
+	/* Attach to the common driver interface */
+	ret = brcmf_attach(dev);
+	if (ret) {
+		brcmf_err("brcmf_attach failed\n");
+	} else {
+		ret = brcmf_bus_start(dev);
+		if (ret)
+			brcmf_err("dongle is not responding\n");
+	}
+
+	return ret;
+}
+
+
+static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
+{
+	u32 ret_addr;
+
+	ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
+	addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
+	pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
+
+	return ret_addr;
+}
+
+
+static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+
+	addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
+	return brcmf_pcie_read_reg32(devinfo, addr);
+}
+
+
+static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+
+	addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
+	brcmf_pcie_write_reg32(devinfo, addr, value);
+}
+
+
+static int brcmf_pcie_buscoreprep(void *ctx)
+{
+	return brcmf_pcie_get_resource(ctx);
+}
+
+
+static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+	u32 val;
+
+	devinfo->ci = chip;
+	brcmf_pcie_reset_device(devinfo);
+
+	val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+	if (val != 0xffffffff)
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+				       val);
+
+	return 0;
+}
+
+
+static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
+					u32 rstvec)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+
+	brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
+}
+
+
+static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
+	.prepare = brcmf_pcie_buscoreprep,
+	.reset = brcmf_pcie_buscore_reset,
+	.activate = brcmf_pcie_buscore_activate,
+	.read32 = brcmf_pcie_buscore_read32,
+	.write32 = brcmf_pcie_buscore_write32,
+};
+
+static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+			     void *nvram, u32 nvram_len)
+{
+	struct brcmf_bus *bus = dev_get_drvdata(dev);
+	struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+	struct brcmf_commonring **flowrings;
+	int ret;
+	u32 i;
+
+	brcmf_pcie_attach(devinfo);
+
+	ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
+	if (ret)
+		goto fail;
+
+	devinfo->state = BRCMFMAC_PCIE_STATE_UP;
+
+	ret = brcmf_pcie_init_ringbuffers(devinfo);
+	if (ret)
+		goto fail;
+
+	ret = brcmf_pcie_init_scratchbuffers(devinfo);
+	if (ret)
+		goto fail;
+
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+	ret = brcmf_pcie_request_irq(devinfo);
+	if (ret)
+		goto fail;
+
+	/* hook the commonrings in the bus structure. */
+	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
+		bus->msgbuf->commonrings[i] =
+				&devinfo->shared.commonrings[i]->commonring;
+
+	flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
+			    GFP_KERNEL);
+	if (!flowrings)
+		goto fail;
+
+	for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
+		flowrings[i] = &devinfo->shared.flowrings[i].commonring;
+	bus->msgbuf->flowrings = flowrings;
+
+	bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
+	bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
+	bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
+
+	init_waitqueue_head(&devinfo->mbdata_resp_wait);
+
+	brcmf_pcie_intr_enable(devinfo);
+	if (brcmf_pcie_attach_bus(bus->dev) == 0)
+		return;
+
+	brcmf_pcie_bus_console_read(devinfo);
+
+fail:
+	device_release_driver(dev);
+}
+
+static int
+brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int ret;
+	struct brcmf_pciedev_info *devinfo;
+	struct brcmf_pciedev *pcie_bus_dev;
+	struct brcmf_bus *bus;
+	u16 domain_nr;
+	u16 bus_nr;
+
+	domain_nr = pci_domain_nr(pdev->bus) + 1;
+	bus_nr = pdev->bus->number;
+	brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
+		  domain_nr, bus_nr);
+
+	ret = -ENOMEM;
+	devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
+	if (devinfo == NULL)
+		return ret;
+
+	devinfo->pdev = pdev;
+	pcie_bus_dev = NULL;
+	devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
+	if (IS_ERR(devinfo->ci)) {
+		ret = PTR_ERR(devinfo->ci);
+		devinfo->ci = NULL;
+		goto fail;
+	}
+
+	pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
+	if (pcie_bus_dev == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+	if (!bus) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
+	if (!bus->msgbuf) {
+		ret = -ENOMEM;
+		kfree(bus);
+		goto fail;
+	}
+
+	/* hook it all together. */
+	pcie_bus_dev->devinfo = devinfo;
+	pcie_bus_dev->bus = bus;
+	bus->dev = &pdev->dev;
+	bus->bus_priv.pcie = pcie_bus_dev;
+	bus->ops = &brcmf_pcie_bus_ops;
+	bus->proto_type = BRCMF_PROTO_MSGBUF;
+	bus->chip = devinfo->coreid;
+	bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
+	dev_set_drvdata(&pdev->dev, bus);
+
+	ret = brcmf_pcie_get_fwnames(devinfo);
+	if (ret)
+		goto fail_bus;
+
+	ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+						    BRCMF_FW_REQ_NV_OPTIONAL,
+					  devinfo->fw_name, devinfo->nvram_name,
+					  brcmf_pcie_setup, domain_nr, bus_nr);
+	if (ret == 0)
+		return 0;
+fail_bus:
+	kfree(bus->msgbuf);
+	kfree(bus);
+fail:
+	brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
+	brcmf_pcie_release_resource(devinfo);
+	if (devinfo->ci)
+		brcmf_chip_detach(devinfo->ci);
+	kfree(pcie_bus_dev);
+	kfree(devinfo);
+	return ret;
+}
+
+
+static void
+brcmf_pcie_remove(struct pci_dev *pdev)
+{
+	struct brcmf_pciedev_info *devinfo;
+	struct brcmf_bus *bus;
+
+	brcmf_dbg(PCIE, "Enter\n");
+
+	bus = dev_get_drvdata(&pdev->dev);
+	if (bus == NULL)
+		return;
+
+	devinfo = bus->bus_priv.pcie->devinfo;
+
+	devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
+	if (devinfo->ci)
+		brcmf_pcie_intr_disable(devinfo);
+
+	brcmf_detach(&pdev->dev);
+
+	kfree(bus->bus_priv.pcie);
+	kfree(bus->msgbuf->flowrings);
+	kfree(bus->msgbuf);
+	kfree(bus);
+
+	brcmf_pcie_release_irq(devinfo);
+	brcmf_pcie_release_scratchbuffers(devinfo);
+	brcmf_pcie_release_ringbuffers(devinfo);
+	brcmf_pcie_reset_device(devinfo);
+	brcmf_pcie_release_resource(devinfo);
+
+	if (devinfo->ci)
+		brcmf_chip_detach(devinfo->ci);
+
+	kfree(devinfo);
+	dev_set_drvdata(&pdev->dev, NULL);
+}
+
+
+#ifdef CONFIG_PM
+
+
+static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct brcmf_pciedev_info *devinfo;
+	struct brcmf_bus *bus;
+	int err;
+
+	brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
+
+	bus = dev_get_drvdata(&pdev->dev);
+	devinfo = bus->bus_priv.pcie->devinfo;
+
+	brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
+
+	devinfo->mbdata_completed = false;
+	brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
+
+	wait_event_timeout(devinfo->mbdata_resp_wait,
+			   devinfo->mbdata_completed,
+			   msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
+	if (!devinfo->mbdata_completed) {
+		brcmf_err("Timeout on response for entering D3 substate\n");
+		return -EIO;
+	}
+	brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE);
+
+	err = pci_save_state(pdev);
+	if (err)
+		brcmf_err("pci_save_state failed, err=%d\n", err);
+	if ((err) || (!devinfo->wowl_enabled)) {
+		brcmf_chip_detach(devinfo->ci);
+		devinfo->ci = NULL;
+		brcmf_pcie_remove(pdev);
+		return 0;
+	}
+
+	return pci_prepare_to_sleep(pdev);
+}
+
+static int brcmf_pcie_resume(struct pci_dev *pdev)
+{
+	struct brcmf_pciedev_info *devinfo;
+	struct brcmf_bus *bus;
+	int err;
+
+	bus = dev_get_drvdata(&pdev->dev);
+	brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus);
+
+	err = pci_set_power_state(pdev, PCI_D0);
+	if (err) {
+		brcmf_err("pci_set_power_state failed, err=%d\n", err);
+		goto cleanup;
+	}
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, false);
+	pci_enable_wake(pdev, PCI_D3cold, false);
+
+	/* Check if device is still up and running, if so we are ready */
+	if (bus) {
+		devinfo = bus->bus_priv.pcie->devinfo;
+		if (brcmf_pcie_read_reg32(devinfo,
+					  BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
+			if (brcmf_pcie_send_mb_data(devinfo,
+						    BRCMF_H2D_HOST_D0_INFORM))
+				goto cleanup;
+			brcmf_dbg(PCIE, "Hot resume, continue....\n");
+			brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+			brcmf_bus_change_state(bus, BRCMF_BUS_UP);
+			brcmf_pcie_intr_enable(devinfo);
+			return 0;
+		}
+	}
+
+cleanup:
+	if (bus) {
+		devinfo = bus->bus_priv.pcie->devinfo;
+		brcmf_chip_detach(devinfo->ci);
+		devinfo->ci = NULL;
+		brcmf_pcie_remove(pdev);
+	}
+	err = brcmf_pcie_probe(pdev, NULL);
+	if (err)
+		brcmf_err("probe after resume failed, err=%d\n", err);
+
+	return err;
+}
+
+
+#endif /* CONFIG_PM */
+
+
+#define BRCMF_PCIE_DEVICE(dev_id)	{ BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
+	PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
+
+static struct pci_device_id brcmf_pcie_devid_table[] = {
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
+	{ /* end: all zeroes */ }
+};
+
+
+MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
+
+
+static struct pci_driver brcmf_pciedrvr = {
+	.node = {},
+	.name = KBUILD_MODNAME,
+	.id_table = brcmf_pcie_devid_table,
+	.probe = brcmf_pcie_probe,
+	.remove = brcmf_pcie_remove,
+#ifdef CONFIG_PM
+	.suspend = brcmf_pcie_suspend,
+	.resume = brcmf_pcie_resume
+#endif /* CONFIG_PM */
+};
+
+
+void brcmf_pcie_register(void)
+{
+	int err;
+
+	brcmf_dbg(PCIE, "Enter\n");
+	err = pci_register_driver(&brcmf_pciedrvr);
+	if (err)
+		brcmf_err("PCIE driver registration failed, err=%d\n", err);
+}
+
+
+void brcmf_pcie_exit(void)
+{
+	brcmf_dbg(PCIE, "Enter\n");
+	pci_unregister_driver(&brcmf_pciedrvr);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/brcm80211/brcmfmac/pcie.h
new file mode 100644
index 0000000..6edaaf8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_PCIE_H
+#define BRCMFMAC_PCIE_H
+
+
+struct brcmf_pciedev {
+	struct brcmf_bus *bus;
+	struct brcmf_pciedev_info *devinfo;
+};
+
+
+void brcmf_pcie_exit(void);
+void brcmf_pcie_register(void);
+
+
+#endif /* BRCMFMAC_PCIE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
new file mode 100644
index 0000000..26b68c3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+ #include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+#include <brcmu_wifi.h>
+#include "core.h"
+#include "bus.h"
+#include "debug.h"
+#include "proto.h"
+#include "bcdc.h"
+#include "msgbuf.h"
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_proto *proto;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	proto = kzalloc(sizeof(*proto), GFP_ATOMIC);
+	if (!proto)
+		goto fail;
+
+	drvr->proto = proto;
+
+	if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) {
+		if (brcmf_proto_bcdc_attach(drvr))
+			goto fail;
+	} else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) {
+		if (brcmf_proto_msgbuf_attach(drvr))
+			goto fail;
+	} else {
+		brcmf_err("Unsupported proto type %d\n",
+			  drvr->bus_if->proto_type);
+		goto fail;
+	}
+	if ((proto->txdata == NULL) || (proto->hdrpull == NULL) ||
+	    (proto->query_dcmd == NULL) || (proto->set_dcmd == NULL) ||
+	    (proto->configure_addr_mode == NULL) ||
+	    (proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL)) {
+		brcmf_err("Not all proto handlers have been installed\n");
+		goto fail;
+	}
+	return 0;
+
+fail:
+	kfree(proto);
+	drvr->proto = NULL;
+	return -ENOMEM;
+}
+
+void brcmf_proto_detach(struct brcmf_pub *drvr)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (drvr->proto) {
+		if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
+			brcmf_proto_bcdc_detach(drvr);
+		else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
+			brcmf_proto_msgbuf_detach(drvr);
+		kfree(drvr->proto);
+		drvr->proto = NULL;
+	}
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
new file mode 100644
index 0000000..d55119d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_PROTO_H
+#define BRCMFMAC_PROTO_H
+
+
+enum proto_addr_mode {
+	ADDR_INDIRECT	= 0,
+	ADDR_DIRECT
+};
+
+
+struct brcmf_proto {
+	int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
+		       struct sk_buff *skb, struct brcmf_if **ifp);
+	int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
+			  void *buf, uint len);
+	int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
+			uint len);
+	int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
+		      struct sk_buff *skb);
+	void (*configure_addr_mode)(struct brcmf_pub *drvr, int ifidx,
+				    enum proto_addr_mode addr_mode);
+	void (*delete_peer)(struct brcmf_pub *drvr, int ifidx,
+			    u8 peer[ETH_ALEN]);
+	void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
+			      u8 peer[ETH_ALEN]);
+	void *pd;
+};
+
+
+int brcmf_proto_attach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
+
+static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+				      struct sk_buff *skb,
+				      struct brcmf_if **ifp)
+{
+	struct brcmf_if *tmp = NULL;
+
+	/* assure protocol is always called with
+	 * non-null initialized pointer.
+	 */
+	if (ifp)
+		*ifp = NULL;
+	else
+		ifp = &tmp;
+	return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
+}
+static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
+					 uint cmd, void *buf, uint len)
+{
+	return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
+				       uint cmd, void *buf, uint len)
+{
+	return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+}
+static inline int brcmf_proto_txdata(struct brcmf_pub *drvr, int ifidx,
+				     u8 offset, struct sk_buff *skb)
+{
+	return drvr->proto->txdata(drvr, ifidx, offset, skb);
+}
+static inline void
+brcmf_proto_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
+				enum proto_addr_mode addr_mode)
+{
+	drvr->proto->configure_addr_mode(drvr, ifidx, addr_mode);
+}
+static inline void
+brcmf_proto_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+	drvr->proto->delete_peer(drvr, ifidx, peer);
+}
+static inline void
+brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
+{
+	drvr->proto->add_tdls_peer(drvr, ifidx, peer);
+}
+
+
+#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
new file mode 100644
index 0000000..7e74ac3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -0,0 +1,4376 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/printk.h>
+#include <linux/pci_ids.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/semaphore.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/bcma/bcma.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_data/brcmfmac-sdio.h>
+#include <linux/moduleparam.h>
+#include <asm/unaligned.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include <soc.h>
+#include "sdio.h"
+#include "chip.h"
+#include "firmware.h"
+
+#define DCMD_RESP_TIMEOUT	2000	/* In milli second */
+#define CTL_DONE_TIMEOUT	2000	/* In milli second */
+
+#ifdef DEBUG
+
+#define BRCMF_TRAP_INFO_SIZE	80
+
+#define CBUF_LEN	(128)
+
+/* Device console log buffer state */
+#define CONSOLE_BUFFER_MAX	2024
+
+struct rte_log_le {
+	__le32 buf;		/* Can't be pointer on (64-bit) hosts */
+	__le32 buf_size;
+	__le32 idx;
+	char *_buf_compat;	/* Redundant pointer for backward compat. */
+};
+
+struct rte_console {
+	/* Virtual UART
+	 * When there is no UART (e.g. Quickturn),
+	 * the host should write a complete
+	 * input line directly into cbuf and then write
+	 * the length into vcons_in.
+	 * This may also be used when there is a real UART
+	 * (at risk of conflicting with
+	 * the real UART).  vcons_out is currently unused.
+	 */
+	uint vcons_in;
+	uint vcons_out;
+
+	/* Output (logging) buffer
+	 * Console output is written to a ring buffer log_buf at index log_idx.
+	 * The host may read the output when it sees log_idx advance.
+	 * Output will be lost if the output wraps around faster than the host
+	 * polls.
+	 */
+	struct rte_log_le log_le;
+
+	/* Console input line buffer
+	 * Characters are read one at a time into cbuf
+	 * until <CR> is received, then
+	 * the buffer is processed as a command line.
+	 * Also used for virtual UART.
+	 */
+	uint cbuf_idx;
+	char cbuf[CBUF_LEN];
+};
+
+#endif				/* DEBUG */
+#include <chipcommon.h>
+
+#include "bus.h"
+#include "debug.h"
+#include "tracepoint.h"
+
+#define TXQLEN		2048	/* bulk tx queue length */
+#define TXHI		(TXQLEN - 256)	/* turn on flow control above TXHI */
+#define TXLOW		(TXHI - 256)	/* turn off flow control below TXLOW */
+#define PRIOMASK	7
+
+#define TXRETRIES	2	/* # of retries for tx frames */
+
+#define BRCMF_RXBOUND	50	/* Default for max rx frames in
+				 one scheduling */
+
+#define BRCMF_TXBOUND	20	/* Default for max tx frames in
+				 one scheduling */
+
+#define BRCMF_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048	/* Block size used for downloading
+				 of dongle image */
+#define MAX_DATA_BUF	(32 * 1024)	/* Must be large enough to hold
+				 biggest possible glom */
+
+#define BRCMF_FIRSTREAD	(1 << 6)
+
+#define BRCMF_CONSOLE	10	/* watchdog interval to poll console */
+
+/* SBSDIO_DEVICE_CTL */
+
+/* 1: device will assert busy signal when receiving CMD53 */
+#define SBSDIO_DEVCTL_SETBUSY		0x01
+/* 1: assertion of sdio interrupt is synchronous to the sdio clock */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02
+/* 1: mask all interrupts to host except the chipActive (rev 8) */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04
+/* 1: isolate internal sdio signals, put external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9) */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08
+/* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_SB_RST_CTL	0x30
+/*   Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_CORECTL	0x00
+/*   Force backplane reset */
+#define SBSDIO_DEVCTL_RST_BPRESET	0x10
+/*   Force no backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET	0x20
+
+/* direct(mapped) cis space */
+
+/* MAPPED common CIS address */
+#define SBSDIO_CIS_BASE_COMMON		0x1000
+/* maximum bytes in one CIS */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200
+/* cis offset addr is < 17 bits */
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF
+
+/* manfid tuple length, include tuple, link bytes */
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6
+
+#define CORE_BUS_REG(base, field) \
+		(base + offsetof(struct sdpcmd_regs, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP		0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT			0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP		0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ		0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ		0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL		0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL			0x80
+#define SBSDIO_CSR_MASK			0x1F
+#define SBSDIO_AVBITS		(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)	((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)	(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)	(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+	(SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0	/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4	/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* corecontrol */
+#define CC_CISRDY		(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal */
+#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation */
+#define CC_XMTDATAAVAIL_MODE	(1 << 4)
+#define CC_XMTDATAAVAIL_CTRL	(1 << 5)
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort all in-progress frames */
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* Miscellaneous Interrupt */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_SHIFT	16	/* host protocol version */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* Miscellaneous Interrupt */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	1	/* retransmit NAK'd frame */
+#define HMB_DATA_DEVREADY	2	/* talk to host after enable */
+#define HMB_DATA_FC		4	/* per prio flowcontrol update flag */
+#define HMB_DATA_FWREADY	8	/* fw ready for protocol activity */
+
+#define HMB_DATA_FCDATA_MASK	0xff000000
+#define HMB_DATA_FCDATA_SHIFT	24
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000
+#define HMB_DATA_VERSION_SHIFT	16
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION       0x0003
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+
+/* Space for header read, limit for data packets */
+#define MAX_HDR_READ	(1 << 6)
+#define MAX_RX_DATASZ	2048
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+
+/* Value for ChipClockCSR during initial setup */
+#define BRCMF_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF |	\
+					SBSDIO_ALP_AVAIL_REQ)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+#define BRCMF_IDLE_ACTIVE	0	/* Do not request any SD clock change
+					 * when idle
+					 */
+#define BRCMF_IDLE_INTERVAL	1
+
+#define KSO_WAIT_US 50
+#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+
+/*
+ * Conversion of 802.1D priority to precedence level
+ */
+static uint prio2prec(u32 prio)
+{
+	return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
+	       (prio^2) : prio;
+}
+
+#ifdef DEBUG
+/* Device console log buffer state */
+struct brcmf_console {
+	uint count;		/* Poll interval msec counter */
+	uint log_addr;		/* Log struct address (fixed) */
+	struct rte_log_le log_le;	/* Log struct (host copy) */
+	uint bufsize;		/* Size of log buffer */
+	u8 *buf;		/* Log buffer (host copy) */
+	uint last;		/* Last buffer read index */
+};
+
+struct brcmf_trap_info {
+	__le32		type;
+	__le32		epc;
+	__le32		cpsr;
+	__le32		spsr;
+	__le32		r0;	/* a1 */
+	__le32		r1;	/* a2 */
+	__le32		r2;	/* a3 */
+	__le32		r3;	/* a4 */
+	__le32		r4;	/* v1 */
+	__le32		r5;	/* v2 */
+	__le32		r6;	/* v3 */
+	__le32		r7;	/* v4 */
+	__le32		r8;	/* v5 */
+	__le32		r9;	/* sb/v6 */
+	__le32		r10;	/* sl/v7 */
+	__le32		r11;	/* fp/v8 */
+	__le32		r12;	/* ip */
+	__le32		r13;	/* sp */
+	__le32		r14;	/* lr */
+	__le32		pc;	/* r15 */
+};
+#endif				/* DEBUG */
+
+struct sdpcm_shared {
+	u32 flags;
+	u32 trap_addr;
+	u32 assert_exp_addr;
+	u32 assert_file_addr;
+	u32 assert_line;
+	u32 console_addr;	/* Address of struct rte_console */
+	u32 msgtrace_addr;
+	u8 tag[32];
+	u32 brpt_addr;
+};
+
+struct sdpcm_shared_le {
+	__le32 flags;
+	__le32 trap_addr;
+	__le32 assert_exp_addr;
+	__le32 assert_file_addr;
+	__le32 assert_line;
+	__le32 console_addr;	/* Address of struct rte_console */
+	__le32 msgtrace_addr;
+	u8 tag[32];
+	__le32 brpt_addr;
+};
+
+/* dongle SDIO bus specific header info */
+struct brcmf_sdio_hdrinfo {
+	u8 seq_num;
+	u8 channel;
+	u16 len;
+	u16 len_left;
+	u16 len_nxtfrm;
+	u8 dat_offset;
+	bool lastfrm;
+	u16 tail_pad;
+};
+
+/*
+ * hold counter variables
+ */
+struct brcmf_sdio_count {
+	uint intrcount;		/* Count of device interrupt callbacks */
+	uint lastintrs;		/* Count as of last watchdog timer */
+	uint pollcnt;		/* Count of active polls */
+	uint regfails;		/* Count of R_REG failures */
+	uint tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint fcqueued;		/* Tx packets that got queued */
+	uint rxrtx;		/* Count of rtx requests (NAK to dongle) */
+	uint rx_toolong;	/* Receive frames too long to receive */
+	uint rxc_errors;	/* SDIO errors when reading control frames */
+	uint rx_hdrfail;	/* SDIO errors on header reads */
+	uint rx_badhdr;		/* Bad received headers (roosync?) */
+	uint rx_badseq;		/* Mismatched rx sequence number */
+	uint fc_rcvd;		/* Number of flow-control events received */
+	uint fc_xoff;		/* Number which turned on flow-control */
+	uint fc_xon;		/* Number which turned off flow-control */
+	uint rxglomfail;	/* Failed deglom attempts */
+	uint rxglomframes;	/* Number of glom frames (superframes) */
+	uint rxglompkts;	/* Number of packets from glom frames */
+	uint f2rxhdrs;		/* Number of header reads */
+	uint f2rxdata;		/* Number of frame data reads */
+	uint f2txdata;		/* Number of f2 frame writes */
+	uint f1regdata;		/* Number of f1 register accesses */
+	uint tickcnt;		/* Number of watchdog been schedule */
+	ulong tx_ctlerrs;	/* Err of sending ctrl frames */
+	ulong tx_ctlpkts;	/* Ctrl frames sent to dongle */
+	ulong rx_ctlerrs;	/* Err of processing rx ctrl frames */
+	ulong rx_ctlpkts;	/* Ctrl frames processed from dongle */
+	ulong rx_readahead_cnt;	/* packets where header read-ahead was used */
+};
+
+/* misc chip info needed by some of the routines */
+/* Private data for SDIO bus interaction */
+struct brcmf_sdio {
+	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
+	struct brcmf_chip *ci;	/* Chip info struct */
+
+	u32 hostintmask;	/* Copy of Host Interrupt Mask */
+	atomic_t intstatus;	/* Intstatus bits (events) pending */
+	atomic_t fcstate;	/* State of dongle flow-control */
+
+	uint blocksize;		/* Block size of SDIO transfers */
+	uint roundup;		/* Max roundup limit */
+
+	struct pktq txq;	/* Queue length used for flow-control */
+	u8 flowcontrol;	/* per prio flow control bitmask */
+	u8 tx_seq;		/* Transmit sequence number (next) */
+	u8 tx_max;		/* Maximum transmit sequence allowed */
+
+	u8 *hdrbuf;		/* buffer for handling rx frame */
+	u8 *rxhdr;		/* Header of current rx frame (in hdrbuf) */
+	u8 rx_seq;		/* Receive sequence number (expected) */
+	struct brcmf_sdio_hdrinfo cur_read;
+				/* info of current read frame */
+	bool rxskip;		/* Skip receive (awaiting NAK ACK) */
+	bool rxpending;		/* Data frame pending in dongle */
+
+	uint rxbound;		/* Rx frames to read before resched */
+	uint txbound;		/* Tx frames to send before resched */
+	uint txminmax;
+
+	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
+	struct sk_buff_head glom; /* Packet list for glommed superframe */
+	uint glomerr;		/* Glom packet read errors */
+
+	u8 *rxbuf;		/* Buffer for receiving control packets */
+	uint rxblen;		/* Allocated length of rxbuf */
+	u8 *rxctl;		/* Aligned pointer into rxbuf */
+	u8 *rxctl_orig;		/* pointer for freeing rxctl */
+	uint rxlen;		/* Length of valid data in buffer */
+	spinlock_t rxctl_lock;	/* protection lock for ctrl frame resources */
+
+	u8 sdpcm_ver;	/* Bus protocol reported by dongle */
+
+	bool intr;		/* Use interrupts */
+	bool poll;		/* Use polling */
+	atomic_t ipend;		/* Device interrupt is pending */
+	uint spurious;		/* Count of spurious interrupts */
+	uint pollrate;		/* Ticks between device polls */
+	uint polltick;		/* Tick counter */
+
+#ifdef DEBUG
+	uint console_interval;
+	struct brcmf_console console;	/* Console output polling support */
+	uint console_addr;	/* Console address from shared struct */
+#endif				/* DEBUG */
+
+	uint clkstate;		/* State of sd and backplane clock(s) */
+	s32 idletime;		/* Control for activity timeout */
+	s32 idlecount;		/* Activity timeout counter */
+	s32 idleclock;		/* How to set bus driver when idle */
+	bool rxflow_mode;	/* Rx flow control mode */
+	bool rxflow;		/* Is rx flow control on */
+	bool alp_only;		/* Don't use HT clock (ALP only) */
+
+	u8 *ctrl_frame_buf;
+	u16 ctrl_frame_len;
+	bool ctrl_frame_stat;
+	int ctrl_frame_err;
+
+	spinlock_t txq_lock;		/* protect bus->txq */
+	wait_queue_head_t ctrl_wait;
+	wait_queue_head_t dcmd_resp_wait;
+
+	struct timer_list timer;
+	struct completion watchdog_wait;
+	struct task_struct *watchdog_tsk;
+	bool wd_timer_valid;
+	uint save_ms;
+
+	struct workqueue_struct *brcmf_wq;
+	struct work_struct datawork;
+	bool dpc_triggered;
+	bool dpc_running;
+
+	bool txoff;		/* Transmit flow-controlled */
+	struct brcmf_sdio_count sdcnt;
+	bool sr_enabled; /* SaveRestore enabled */
+	bool sleeping;
+
+	u8 tx_hdrlen;		/* sdio bus header length for tx packet */
+	bool txglom;		/* host tx glomming enable flag */
+	u16 head_align;		/* buffer pointer alignment */
+	u16 sgentry_align;	/* scatter-gather buffer alignment */
+};
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2
+#define CLK_AVAIL	3
+
+#ifdef DEBUG
+static int qcount[NUMPRIO];
+#endif				/* DEBUG */
+
+#define DEFAULT_SDIO_DRIVE_STRENGTH	6	/* in milliamps */
+
+#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+#define ALIGNMENT  4
+
+enum brcmf_sdio_frmtype {
+	BRCMF_SDIO_FT_NORMAL,
+	BRCMF_SDIO_FT_SUPER,
+	BRCMF_SDIO_FT_SUB,
+};
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+	u8 strength;	/* Pad Drive Strength in mA */
+	u8 sel;		/* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+	{6, 0x7},
+	{5, 0x6},
+	{4, 0x5},
+	{3, 0x4},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+	{3, 0x3},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+	{16, 0x7},
+	{12, 0x5},
+	{8,  0x3},
+	{4,  0x1}
+};
+
+#define BCM43143_FIRMWARE_NAME		"brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME		"brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME	"brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME		"brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME	"brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME		"brcm/brcmfmac43241b4-sdio.txt"
+#define BCM43241B5_FIRMWARE_NAME	"brcm/brcmfmac43241b5-sdio.bin"
+#define BCM43241B5_NVRAM_NAME		"brcm/brcmfmac43241b5-sdio.txt"
+#define BCM4329_FIRMWARE_NAME		"brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME		"brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME		"brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME		"brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME		"brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME		"brcm/brcmfmac4334-sdio.txt"
+#define BCM43340_FIRMWARE_NAME		"brcm/brcmfmac43340-sdio.bin"
+#define BCM43340_NVRAM_NAME		"brcm/brcmfmac43340-sdio.txt"
+#define BCM4335_FIRMWARE_NAME		"brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME		"brcm/brcmfmac4335-sdio.txt"
+#define BCM43362_FIRMWARE_NAME		"brcm/brcmfmac43362-sdio.bin"
+#define BCM43362_NVRAM_NAME		"brcm/brcmfmac43362-sdio.txt"
+#define BCM4339_FIRMWARE_NAME		"brcm/brcmfmac4339-sdio.bin"
+#define BCM4339_NVRAM_NAME		"brcm/brcmfmac4339-sdio.txt"
+#define BCM43430_FIRMWARE_NAME		"brcm/brcmfmac43430-sdio.bin"
+#define BCM43430_NVRAM_NAME		"brcm/brcmfmac43430-sdio.txt"
+#define BCM43455_FIRMWARE_NAME		"brcm/brcmfmac43455-sdio.bin"
+#define BCM43455_NVRAM_NAME		"brcm/brcmfmac43455-sdio.txt"
+#define BCM4354_FIRMWARE_NAME		"brcm/brcmfmac4354-sdio.bin"
+#define BCM4354_NVRAM_NAME		"brcm/brcmfmac4354-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43340_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+	u32 chipid;
+	u32 revmsk;
+	const char *bin;
+	const char *nv;
+};
+
+enum brcmf_firmware_type {
+	BRCMF_FIRMWARE_BIN,
+	BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+	name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+	{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+	{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+	{ BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+	{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
+	{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+	{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+	{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+	{ BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) },
+	{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+	{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+	{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+	{ BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
+	{ BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
+	{ BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
+};
+
+static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
+				  struct brcmf_sdio_dev *sdiodev)
+{
+	int i;
+	char end;
+
+	for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+		if (brcmf_fwname_data[i].chipid == ci->chip &&
+		    brcmf_fwname_data[i].revmsk & BIT(ci->chiprev))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(brcmf_fwname_data)) {
+		brcmf_err("Unknown chipid %d [%d]\n", ci->chip, ci->chiprev);
+		return -ENODEV;
+	}
+
+	/* check if firmware path is provided by module parameter */
+	if (brcmf_firmware_path[0] != '\0') {
+		strlcpy(sdiodev->fw_name, brcmf_firmware_path,
+			sizeof(sdiodev->fw_name));
+		strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
+			sizeof(sdiodev->nvram_name));
+
+		end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
+		if (end != '/') {
+			strlcat(sdiodev->fw_name, "/",
+				sizeof(sdiodev->fw_name));
+			strlcat(sdiodev->nvram_name, "/",
+				sizeof(sdiodev->nvram_name));
+		}
+	}
+	strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
+		sizeof(sdiodev->fw_name));
+	strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
+		sizeof(sdiodev->nvram_name));
+
+	return 0;
+}
+
+static void pkt_align(struct sk_buff *p, int len, int align)
+{
+	uint datalign;
+	datalign = (unsigned long)(p->data);
+	datalign = roundup(datalign, (align)) - datalign;
+	if (datalign)
+		skb_pull(p, datalign);
+	__skb_trim(p, len);
+}
+
+/* To check if there's window offered */
+static bool data_ok(struct brcmf_sdio *bus)
+{
+	return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
+	       ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
+}
+
+/*
+ * Reads a register in the SDIO hardware block. This block occupies a series of
+ * adresses on the 32 bit backplane bus.
+ */
+static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+{
+	struct brcmf_core *core;
+	int ret;
+
+	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+	*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
+
+	return ret;
+}
+
+static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+{
+	struct brcmf_core *core;
+	int ret;
+
+	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+	brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
+
+	return ret;
+}
+
+static int
+brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
+{
+	u8 wr_val = 0, rd_val, cmp_val, bmask;
+	int err = 0;
+	int try_cnt = 0;
+
+	brcmf_dbg(TRACE, "Enter: on=%d\n", on);
+
+	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+	/* 1st KSO write goes to AOS wake up core if device is asleep  */
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+			  wr_val, &err);
+
+	if (on) {
+		/* device WAKEUP through KSO:
+		 * write bit 0 & read back until
+		 * both bits 0 (kso bit) & 1 (dev on status) are set
+		 */
+		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
+			  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
+		bmask = cmp_val;
+		usleep_range(2000, 3000);
+	} else {
+		/* Put device to sleep, turn off KSO */
+		cmp_val = 0;
+		/* only check for bit0, bit1(dev on status) may not
+		 * get cleared right away
+		 */
+		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
+	}
+
+	do {
+		/* reliable KSO bit set/clr:
+		 * the sdiod sleep write access is synced to PMU 32khz clk
+		 * just one write attempt may fail,
+		 * read it back until it matches written value
+		 */
+		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+					   &err);
+		if (((rd_val & bmask) == cmp_val) && !err)
+			break;
+
+		udelay(KSO_WAIT_US);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+				  wr_val, &err);
+	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
+
+	if (try_cnt > 2)
+		brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
+			  rd_val, err);
+
+	if (try_cnt > MAX_KSO_ATTEMPTS)
+		brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
+
+	return err;
+}
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+/* Turn backplane clock on or off */
+static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
+{
+	int err;
+	u8 clkctl, clkreq, devctl;
+	unsigned long timeout;
+
+	brcmf_dbg(SDIO, "Enter\n");
+
+	clkctl = 0;
+
+	if (bus->sr_enabled) {
+		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+		return 0;
+	}
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq =
+		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  clkreq, &err);
+		if (err) {
+			brcmf_err("HT Avail request error: %d\n", err);
+			return -EBADE;
+		}
+
+		/* Check current status */
+		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			brcmf_err("HT Avail read error: %d\n", err);
+			return -EBADE;
+		}
+
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				brcmf_err("Devctl error setting CA: %d\n",
+					  err);
+				return -EBADE;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
+			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
+			bus->clkstate = CLK_PENDING;
+
+			return 0;
+		} else if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		timeout = jiffies +
+			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
+		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_FUNC1_CHIPCLKCSR,
+						   &err);
+			if (time_after(jiffies, timeout))
+				break;
+			else
+				usleep_range(5000, 10000);
+		}
+		if (err) {
+			brcmf_err("HT Avail request error: %d\n", err);
+			return -EBADE;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
+				  PMU_MAX_TRANSITION_DLY, clkctl);
+			return -EBADE;
+		}
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		brcmf_dbg(SDIO, "CLKCTL: turned ON\n");
+
+#if defined(DEBUG)
+		if (!bus->alp_only) {
+			if (SBSDIO_ALPONLY(clkctl))
+				brcmf_err("HT Clock should be on\n");
+		}
+#endif				/* defined (DEBUG) */
+
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  clkreq, &err);
+		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
+		if (err) {
+			brcmf_err("Failed access turning clock off: %d\n",
+				  err);
+			return -EBADE;
+		}
+	}
+	return 0;
+}
+
+/* Change idle/active SD state */
+static int brcmf_sdio_sdclk(struct brcmf_sdio *bus, bool on)
+{
+	brcmf_dbg(SDIO, "Enter\n");
+
+	if (on)
+		bus->clkstate = CLK_SDONLY;
+	else
+		bus->clkstate = CLK_NONE;
+
+	return 0;
+}
+
+/* Transition SD and backplane clock readiness */
+static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
+{
+#ifdef DEBUG
+	uint oldstate = bus->clkstate;
+#endif				/* DEBUG */
+
+	brcmf_dbg(SDIO, "Enter\n");
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target)
+		return 0;
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			brcmf_sdio_sdclk(bus, true);
+		/* Now request HT Avail on the backplane */
+		brcmf_sdio_htclk(bus, true, pendok);
+		break;
+
+	case CLK_SDONLY:
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			brcmf_sdio_sdclk(bus, true);
+		else if (bus->clkstate == CLK_AVAIL)
+			brcmf_sdio_htclk(bus, false, false);
+		else
+			brcmf_err("request for %d -> %d\n",
+				  bus->clkstate, target);
+		break;
+
+	case CLK_NONE:
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			brcmf_sdio_htclk(bus, false, false);
+		/* Now remove the SD clock */
+		brcmf_sdio_sdclk(bus, false);
+		break;
+	}
+#ifdef DEBUG
+	brcmf_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate);
+#endif				/* DEBUG */
+
+	return 0;
+}
+
+static int
+brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
+{
+	int err = 0;
+	u8 clkcsr;
+
+	brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
+		  (sleep ? "SLEEP" : "WAKE"),
+		  (bus->sleeping ? "SLEEP" : "WAKE"));
+
+	/* If SR is enabled control bus state with KSO */
+	if (bus->sr_enabled) {
+		/* Done if we're already in the requested state */
+		if (sleep == bus->sleeping)
+			goto end;
+
+		/* Going to sleep */
+		if (sleep) {
+			clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_FUNC1_CHIPCLKCSR,
+						   &err);
+			if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
+				brcmf_dbg(SDIO, "no clock, set ALP\n");
+				brcmf_sdiod_regwb(bus->sdiodev,
+						  SBSDIO_FUNC1_CHIPCLKCSR,
+						  SBSDIO_ALP_AVAIL_REQ, &err);
+			}
+			err = brcmf_sdio_kso_control(bus, false);
+		} else {
+			err = brcmf_sdio_kso_control(bus, true);
+		}
+		if (err) {
+			brcmf_err("error while changing bus sleep state %d\n",
+				  err);
+			goto done;
+		}
+	}
+
+end:
+	/* control clocks */
+	if (sleep) {
+		if (!bus->sr_enabled)
+			brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
+	} else {
+		brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
+		brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
+	}
+	bus->sleeping = sleep;
+	brcmf_dbg(SDIO, "new state %s\n",
+		  (sleep ? "SLEEP" : "WAKE"));
+done:
+	brcmf_dbg(SDIO, "Exit: err=%d\n", err);
+	return err;
+
+}
+
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+	return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+				 struct sdpcm_shared *sh)
+{
+	u32 addr = 0;
+	int rv;
+	u32 shaddr = 0;
+	struct sdpcm_shared_le sh_le;
+	__le32 addr_le;
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+	brcmf_sdio_bus_sleep(bus, false, false);
+
+	/*
+	 * Read last word in socram to determine
+	 * address of sdpcm_shared structure
+	 */
+	shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
+	if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
+		shaddr -= bus->ci->srsize;
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
+			       (u8 *)&addr_le, 4);
+	if (rv < 0)
+		goto fail;
+
+	/*
+	 * Check if addr is valid.
+	 * NVRAM length at the end of memory should have been overwritten.
+	 */
+	addr = le32_to_cpu(addr_le);
+	if (!brcmf_sdio_valid_shared_address(addr)) {
+		brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
+		rv = -EINVAL;
+		goto fail;
+	}
+
+	brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
+
+	/* Read hndrte_shared structure */
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+			       sizeof(struct sdpcm_shared_le));
+	if (rv < 0)
+		goto fail;
+
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	/* Endianness */
+	sh->flags = le32_to_cpu(sh_le.flags);
+	sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+	sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+	sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+	sh->assert_line = le32_to_cpu(sh_le.assert_line);
+	sh->console_addr = le32_to_cpu(sh_le.console_addr);
+	sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+		brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+			  SDPCM_SHARED_VERSION,
+			  sh->flags & SDPCM_SHARED_VERSION_MASK);
+		return -EPROTO;
+	}
+	return 0;
+
+fail:
+	brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
+		  rv, addr);
+	sdio_release_host(bus->sdiodev->func[1]);
+	return rv;
+}
+
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+	struct sdpcm_shared sh;
+
+	if (brcmf_sdio_readshared(bus, &sh) == 0)
+		bus->console_addr = sh.console_addr;
+}
+#else
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
+static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
+{
+	u32 intstatus = 0;
+	u32 hmb_data;
+	u8 fcbits;
+	int ret;
+
+	brcmf_dbg(SDIO, "Enter\n");
+
+	/* Read mailbox data and ack that we did so */
+	ret = r_sdreg32(bus, &hmb_data,
+			offsetof(struct sdpcmd_regs, tohostmailboxdata));
+
+	if (ret == 0)
+		w_sdreg32(bus, SMB_INT_ACK,
+			  offsetof(struct sdpcmd_regs, tosbmailbox));
+	bus->sdcnt.f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
+			  bus->rx_seq);
+		if (!bus->rxskip)
+			brcmf_err("unexpected NAKHANDLED!\n");
+
+		bus->rxskip = false;
+		intstatus |= I_HMB_FRAME_IND;
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver =
+		    (hmb_data & HMB_DATA_VERSION_MASK) >>
+		    HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			brcmf_err("Version mismatch, dongle reports %d, "
+				  "expecting %d\n",
+				  bus->sdpcm_ver, SDPCM_PROT_VERSION);
+		else
+			brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
+				  bus->sdpcm_ver);
+
+		/*
+		 * Retrieve console state address now that firmware should have
+		 * updated it.
+		 */
+		brcmf_sdio_get_console_addr(bus);
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.
+	 * remaining backward compatible with older dongles.
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
+							HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->sdcnt.fc_xoff++;
+
+		if (bus->flowcontrol & ~fcbits)
+			bus->sdcnt.fc_xon++;
+
+		bus->sdcnt.fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+			 HMB_DATA_NAKHANDLED |
+			 HMB_DATA_FC |
+			 HMB_DATA_FWREADY |
+			 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
+		brcmf_err("Unknown mailbox data content: 0x%02x\n",
+			  hmb_data);
+
+	return intstatus;
+}
+
+static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
+{
+	uint retries = 0;
+	u16 lastrbc;
+	u8 hi, lo;
+	int err;
+
+	brcmf_err("%sterminate frame%s\n",
+		  abort ? "abort command, " : "",
+		  rtx ? ", send NAK" : "");
+
+	if (abort)
+		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+			  SFC_RF_TERM, &err);
+	bus->sdcnt.f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = brcmf_sdiod_regrb(bus->sdiodev,
+				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
+		lo = brcmf_sdiod_regrb(bus->sdiodev,
+				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		bus->sdcnt.f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			brcmf_err("count growing: last 0x%04x now 0x%04x\n",
+				  lastrbc, (hi << 8) + lo);
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries)
+		brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
+	else
+		brcmf_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries);
+
+	if (rtx) {
+		bus->sdcnt.rxrtx++;
+		err = w_sdreg32(bus, SMB_NAK,
+				offsetof(struct sdpcmd_regs, tosbmailbox));
+
+		bus->sdcnt.f1regdata++;
+		if (err == 0)
+			bus->rxskip = true;
+	}
+
+	/* Clear partial in any case */
+	bus->cur_read.len = 0;
+}
+
+static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
+{
+	struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
+	u8 i, hi, lo;
+
+	/* On failure, abort the command and terminate the frame */
+	brcmf_err("sdio error, abort command and terminate frame\n");
+	bus->sdcnt.tx_sderrs++;
+
+	brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+	bus->sdcnt.f1regdata++;
+
+	for (i = 0; i < 3; i++) {
+		hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+		lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+		bus->sdcnt.f1regdata += 2;
+		if ((hi == 0) && (lo == 0))
+			break;
+	}
+}
+
+/* return total length of buffer chain */
+static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
+{
+	struct sk_buff *p;
+	uint total;
+
+	total = 0;
+	skb_queue_walk(&bus->glom, p)
+		total += p->len;
+	return total;
+}
+
+static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
+{
+	struct sk_buff *cur, *next;
+
+	skb_queue_walk_safe(&bus->glom, cur, next) {
+		skb_unlink(cur, &bus->glom);
+		brcmu_pkt_buf_free_skb(cur);
+	}
+}
+
+/**
+ * brcmfmac sdio bus specific header
+ * This is the lowest layer header wrapped on the packets transmitted between
+ * host and WiFi dongle which contains information needed for SDIO core and
+ * firmware
+ *
+ * It consists of 3 parts: hardware header, hardware extension header and
+ * software header
+ * hardware header (frame tag) - 4 bytes
+ * Byte 0~1: Frame length
+ * Byte 2~3: Checksum, bit-wise inverse of frame length
+ * hardware extension header - 8 bytes
+ * Tx glom mode only, N/A for Rx or normal Tx
+ * Byte 0~1: Packet length excluding hw frame tag
+ * Byte 2: Reserved
+ * Byte 3: Frame flags, bit 0: last frame indication
+ * Byte 4~5: Reserved
+ * Byte 6~7: Tail padding length
+ * software header - 8 bytes
+ * Byte 0: Rx/Tx sequence number
+ * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+ * Byte 2: Length of next data frame, reserved for Tx
+ * Byte 3: Data offset
+ * Byte 4: Flow control bits, reserved for Tx
+ * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
+ * Byte 6~7: Reserved
+ */
+#define SDPCM_HWHDR_LEN			4
+#define SDPCM_HWEXT_LEN			8
+#define SDPCM_SWHDR_LEN			8
+#define SDPCM_HDRLEN			(SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
+/* software header */
+#define SDPCM_SEQ_MASK			0x000000ff
+#define SDPCM_SEQ_WRAP			256
+#define SDPCM_CHANNEL_MASK		0x00000f00
+#define SDPCM_CHANNEL_SHIFT		8
+#define SDPCM_CONTROL_CHANNEL		0	/* Control */
+#define SDPCM_EVENT_CHANNEL		1	/* Asyc Event Indication */
+#define SDPCM_DATA_CHANNEL		2	/* Data Xmit/Recv */
+#define SDPCM_GLOM_CHANNEL		3	/* Coalesced packets */
+#define SDPCM_TEST_CHANNEL		15	/* Test/debug packets */
+#define SDPCM_GLOMDESC(p)		(((u8 *)p)[1] & 0x80)
+#define SDPCM_NEXTLEN_MASK		0x00ff0000
+#define SDPCM_NEXTLEN_SHIFT		16
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+#define SDPCM_FCMASK_MASK		0x000000ff
+#define SDPCM_WINDOW_MASK		0x0000ff00
+#define SDPCM_WINDOW_SHIFT		8
+
+static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
+{
+	u32 hdrvalue;
+	hdrvalue = *(u32 *)swheader;
+	return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
+}
+
+static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
+			      struct brcmf_sdio_hdrinfo *rd,
+			      enum brcmf_sdio_frmtype type)
+{
+	u16 len, checksum;
+	u8 rx_seq, fc, tx_seq_max;
+	u32 swheader;
+
+	trace_brcmf_sdpcm_hdr(SDPCM_RX, header);
+
+	/* hw header */
+	len = get_unaligned_le16(header);
+	checksum = get_unaligned_le16(header + sizeof(u16));
+	/* All zero means no more to read */
+	if (!(len | checksum)) {
+		bus->rxpending = false;
+		return -ENODATA;
+	}
+	if ((u16)(~(len ^ checksum))) {
+		brcmf_err("HW header checksum error\n");
+		bus->sdcnt.rx_badhdr++;
+		brcmf_sdio_rxfail(bus, false, false);
+		return -EIO;
+	}
+	if (len < SDPCM_HDRLEN) {
+		brcmf_err("HW header length error\n");
+		return -EPROTO;
+	}
+	if (type == BRCMF_SDIO_FT_SUPER &&
+	    (roundup(len, bus->blocksize) != rd->len)) {
+		brcmf_err("HW superframe header length error\n");
+		return -EPROTO;
+	}
+	if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
+		brcmf_err("HW subframe header length error\n");
+		return -EPROTO;
+	}
+	rd->len = len;
+
+	/* software header */
+	header += SDPCM_HWHDR_LEN;
+	swheader = le32_to_cpu(*(__le32 *)header);
+	if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
+		brcmf_err("Glom descriptor found in superframe head\n");
+		rd->len = 0;
+		return -EINVAL;
+	}
+	rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
+	rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
+	if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
+	    type != BRCMF_SDIO_FT_SUPER) {
+		brcmf_err("HW header length too long\n");
+		bus->sdcnt.rx_toolong++;
+		brcmf_sdio_rxfail(bus, false, false);
+		rd->len = 0;
+		return -EPROTO;
+	}
+	if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
+		brcmf_err("Wrong channel for superframe\n");
+		rd->len = 0;
+		return -EINVAL;
+	}
+	if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
+	    rd->channel != SDPCM_EVENT_CHANNEL) {
+		brcmf_err("Wrong channel for subframe\n");
+		rd->len = 0;
+		return -EINVAL;
+	}
+	rd->dat_offset = brcmf_sdio_getdatoffset(header);
+	if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
+		brcmf_err("seq %d: bad data offset\n", rx_seq);
+		bus->sdcnt.rx_badhdr++;
+		brcmf_sdio_rxfail(bus, false, false);
+		rd->len = 0;
+		return -ENXIO;
+	}
+	if (rd->seq_num != rx_seq) {
+		brcmf_err("seq %d: sequence number error, expect %d\n",
+			  rx_seq, rd->seq_num);
+		bus->sdcnt.rx_badseq++;
+		rd->seq_num = rx_seq;
+	}
+	/* no need to check the reset for subframe */
+	if (type == BRCMF_SDIO_FT_SUB)
+		return 0;
+	rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
+	if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
+		/* only warm for NON glom packet */
+		if (rd->channel != SDPCM_GLOM_CHANNEL)
+			brcmf_err("seq %d: next length error\n", rx_seq);
+		rd->len_nxtfrm = 0;
+	}
+	swheader = le32_to_cpu(*(__le32 *)(header + 4));
+	fc = swheader & SDPCM_FCMASK_MASK;
+	if (bus->flowcontrol != fc) {
+		if (~bus->flowcontrol & fc)
+			bus->sdcnt.fc_xoff++;
+		if (bus->flowcontrol & ~fc)
+			bus->sdcnt.fc_xon++;
+		bus->sdcnt.fc_rcvd++;
+		bus->flowcontrol = fc;
+	}
+	tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
+	if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
+		brcmf_err("seq %d: max tx seq number error\n", rx_seq);
+		tx_seq_max = bus->tx_seq + 2;
+	}
+	bus->tx_max = tx_seq_max;
+
+	return 0;
+}
+
+static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
+{
+	*(__le16 *)header = cpu_to_le16(frm_length);
+	*(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
+}
+
+static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
+			      struct brcmf_sdio_hdrinfo *hd_info)
+{
+	u32 hdrval;
+	u8 hdr_offset;
+
+	brcmf_sdio_update_hwhdr(header, hd_info->len);
+	hdr_offset = SDPCM_HWHDR_LEN;
+
+	if (bus->txglom) {
+		hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24);
+		*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+		hdrval = (u16)hd_info->tail_pad << 16;
+		*(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval);
+		hdr_offset += SDPCM_HWEXT_LEN;
+	}
+
+	hdrval = hd_info->seq_num;
+	hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+		  SDPCM_CHANNEL_MASK;
+	hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+		  SDPCM_DOFFSET_MASK;
+	*((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval);
+	*(((__le32 *)(header + hdr_offset)) + 1) = 0;
+	trace_brcmf_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header);
+}
+
+static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
+{
+	u16 dlen, totlen;
+	u8 *dptr, num = 0;
+	u16 sublen;
+	struct sk_buff *pfirst, *pnext;
+
+	int errcode;
+	u8 doff, sfdoff;
+
+	struct brcmf_sdio_hdrinfo rd_new;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
+		  bus->glomd, skb_peek(&bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		pfirst = pnext = NULL;
+		dlen = (u16) (bus->glomd->len);
+		dptr = bus->glomd->data;
+		if (!dlen || (dlen & 1)) {
+			brcmf_err("bad glomd len(%d), ignore descriptor\n",
+				  dlen);
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = get_unaligned_le16(dptr);
+			dlen -= sizeof(u16);
+			dptr += sizeof(u16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				brcmf_err("descriptor len %d bad: %d\n",
+					  num, sublen);
+				pnext = NULL;
+				break;
+			}
+			if (sublen % bus->sgentry_align) {
+				brcmf_err("sublen %d not multiple of %d\n",
+					  sublen, bus->sgentry_align);
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total
+				 is a block multiple */
+			if (!dlen) {
+				sublen +=
+				    (roundup(totlen, bus->blocksize) - totlen);
+				totlen = roundup(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			pnext = brcmu_pkt_buf_get_skb(sublen + bus->sgentry_align);
+			if (pnext == NULL) {
+				brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
+					  num, sublen);
+				break;
+			}
+			skb_queue_tail(&bus->glom, pnext);
+
+			/* Adhere to start alignment requirements */
+			pkt_align(pnext, sublen, bus->sgentry_align);
+		}
+
+		/* If all allocations succeeded, save packet chain
+			 in bus structure */
+		if (pnext) {
+			brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
+				  totlen, num);
+			if (BRCMF_GLOM_ON() && bus->cur_read.len &&
+			    totlen != bus->cur_read.len) {
+				brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
+					  bus->cur_read.len, totlen, rxseq);
+			}
+			pfirst = pnext = NULL;
+		} else {
+			brcmf_sdio_free_glom(bus);
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		brcmu_pkt_buf_free_skb(bus->glomd);
+		bus->glomd = NULL;
+		bus->cur_read.len = 0;
+	}
+
+	/* Ok -- either we just generated a packet chain,
+		 or had one from before */
+	if (!skb_queue_empty(&bus->glom)) {
+		if (BRCMF_GLOM_ON()) {
+			brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
+			skb_queue_walk(&bus->glom, pnext) {
+				brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
+					  pnext, (u8 *) (pnext->data),
+					  pnext->len, pnext->len);
+			}
+		}
+
+		pfirst = skb_peek(&bus->glom);
+		dlen = (u16) brcmf_sdio_glom_len(bus);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		sdio_claim_host(bus->sdiodev->func[1]);
+		errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
+						 &bus->glom, dlen);
+		sdio_release_host(bus->sdiodev->func[1]);
+		bus->sdcnt.f2rxdata++;
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			brcmf_err("glom read of %d bytes failed: %d\n",
+				  dlen, errcode);
+
+			sdio_claim_host(bus->sdiodev->func[1]);
+			if (bus->glomerr++ < 3) {
+				brcmf_sdio_rxfail(bus, true, true);
+			} else {
+				bus->glomerr = 0;
+				brcmf_sdio_rxfail(bus, true, false);
+				bus->sdcnt.rxglomfail++;
+				brcmf_sdio_free_glom(bus);
+			}
+			sdio_release_host(bus->sdiodev->func[1]);
+			return 0;
+		}
+
+		brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+				   pfirst->data, min_t(int, pfirst->len, 48),
+				   "SUPERFRAME:\n");
+
+		rd_new.seq_num = rxseq;
+		rd_new.len = dlen;
+		sdio_claim_host(bus->sdiodev->func[1]);
+		errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
+					     BRCMF_SDIO_FT_SUPER);
+		sdio_release_host(bus->sdiodev->func[1]);
+		bus->cur_read.len = rd_new.len_nxtfrm << 4;
+
+		/* Remove superframe header, remember offset */
+		skb_pull(pfirst, rd_new.dat_offset);
+		sfdoff = rd_new.dat_offset;
+		num = 0;
+
+		/* Validate all the subframe headers */
+		skb_queue_walk(&bus->glom, pnext) {
+			/* leave when invalid subframe is found */
+			if (errcode)
+				break;
+
+			rd_new.len = pnext->len;
+			rd_new.seq_num = rxseq++;
+			sdio_claim_host(bus->sdiodev->func[1]);
+			errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
+						     BRCMF_SDIO_FT_SUB);
+			sdio_release_host(bus->sdiodev->func[1]);
+			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+					   pnext->data, 32, "subframe:\n");
+
+			num++;
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request
+				 a couple retries */
+			sdio_claim_host(bus->sdiodev->func[1]);
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				skb_push(pfirst, sfdoff);
+				brcmf_sdio_rxfail(bus, true, true);
+			} else {
+				bus->glomerr = 0;
+				brcmf_sdio_rxfail(bus, true, false);
+				bus->sdcnt.rxglomfail++;
+				brcmf_sdio_free_glom(bus);
+			}
+			sdio_release_host(bus->sdiodev->func[1]);
+			bus->cur_read.len = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+
+		skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
+			dptr = (u8 *) (pfirst->data);
+			sublen = get_unaligned_le16(dptr);
+			doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
+
+			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
+					   dptr, pfirst->len,
+					   "Rx Subframe Data:\n");
+
+			__skb_trim(pfirst, sublen);
+			skb_pull(pfirst, doff);
+
+			if (pfirst->len == 0) {
+				skb_unlink(pfirst, &bus->glom);
+				brcmu_pkt_buf_free_skb(pfirst);
+				continue;
+			}
+
+			brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+					   pfirst->data,
+					   min_t(int, pfirst->len, 32),
+					   "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
+					   bus->glom.qlen, pfirst, pfirst->data,
+					   pfirst->len, pfirst->next,
+					   pfirst->prev);
+			skb_unlink(pfirst, &bus->glom);
+			brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+			bus->sdcnt.rxglompkts++;
+		}
+
+		bus->sdcnt.rxglomframes++;
+	}
+	return num;
+}
+
+static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
+				     bool *pending)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
+
+	/* Wait until control frame is available */
+	add_wait_queue(&bus->dcmd_resp_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	while (!(*condition) && (!signal_pending(current) && timeout))
+		timeout = schedule_timeout(timeout);
+
+	if (signal_pending(current))
+		*pending = true;
+
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&bus->dcmd_resp_wait, &wait);
+
+	return timeout;
+}
+
+static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio *bus)
+{
+	if (waitqueue_active(&bus->dcmd_resp_wait))
+		wake_up_interruptible(&bus->dcmd_resp_wait);
+
+	return 0;
+}
+static void
+brcmf_sdio_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
+{
+	uint rdlen, pad;
+	u8 *buf = NULL, *rbuf;
+	int sdret;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (bus->rxblen)
+		buf = vzalloc(bus->rxblen);
+	if (!buf)
+		goto done;
+
+	rbuf = bus->rxbuf;
+	pad = ((unsigned long)rbuf % bus->head_align);
+	if (pad)
+		rbuf += (bus->head_align - pad);
+
+	/* Copy the already-read portion over */
+	memcpy(buf, hdr, BRCMF_FIRSTREAD);
+	if (len <= BRCMF_FIRSTREAD)
+		goto gotpkt;
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - BRCMF_FIRSTREAD;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->sdiodev->bus_if->maxctl))
+			rdlen += pad;
+	} else if (rdlen % bus->head_align) {
+		rdlen += bus->head_align - (rdlen % bus->head_align);
+	}
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
+		brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
+			  rdlen, bus->sdiodev->bus_if->maxctl);
+		brcmf_sdio_rxfail(bus, false, false);
+		goto done;
+	}
+
+	if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
+		brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+			  len, len - doff, bus->sdiodev->bus_if->maxctl);
+		bus->sdcnt.rx_toolong++;
+		brcmf_sdio_rxfail(bus, false, false);
+		goto done;
+	}
+
+	/* Read remain of frame body */
+	sdret = brcmf_sdiod_recv_buf(bus->sdiodev, rbuf, rdlen);
+	bus->sdcnt.f2rxdata++;
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		brcmf_err("read %d control bytes failed: %d\n",
+			  rdlen, sdret);
+		bus->sdcnt.rxc_errors++;
+		brcmf_sdio_rxfail(bus, true, true);
+		goto done;
+	} else
+		memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
+
+gotpkt:
+
+	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+			   buf, len, "RxCtrl:\n");
+
+	/* Point to valid data and indicate its length */
+	spin_lock_bh(&bus->rxctl_lock);
+	if (bus->rxctl) {
+		brcmf_err("last control frame is being processed.\n");
+		spin_unlock_bh(&bus->rxctl_lock);
+		vfree(buf);
+		goto done;
+	}
+	bus->rxctl = buf + doff;
+	bus->rxctl_orig = buf;
+	bus->rxlen = len - doff;
+	spin_unlock_bh(&bus->rxctl_lock);
+
+done:
+	/* Awake any waiters */
+	brcmf_sdio_dcmd_resp_wake(bus);
+}
+
+/* Pad read to blocksize for efficiency */
+static void brcmf_sdio_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
+{
+	if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
+		*pad = bus->blocksize - (*rdlen % bus->blocksize);
+		if (*pad <= bus->roundup && *pad < bus->blocksize &&
+		    *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
+			*rdlen += *pad;
+	} else if (*rdlen % bus->head_align) {
+		*rdlen += bus->head_align - (*rdlen % bus->head_align);
+	}
+}
+
+static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
+{
+	struct sk_buff *pkt;		/* Packet for event or data frames */
+	u16 pad;		/* Number of pad bytes to read */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int ret;		/* Return code from calls */
+	uint rxcount = 0;	/* Total frames read */
+	struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
+	u8 head_read = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* Not finished unless we encounter no more frames indication */
+	bus->rxpending = true;
+
+	for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_SDIOD_DATA;
+	     rd->seq_num++, rxleft--) {
+
+		/* Handle glomming separately */
+		if (bus->glomd || !skb_queue_empty(&bus->glom)) {
+			u8 cnt;
+			brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
+				  bus->glomd, skb_peek(&bus->glom));
+			cnt = brcmf_sdio_rxglom(bus, rd->seq_num);
+			brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
+			rd->seq_num += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		rd->len_left = rd->len;
+		/* read header first for unknow frame length */
+		sdio_claim_host(bus->sdiodev->func[1]);
+		if (!rd->len) {
+			ret = brcmf_sdiod_recv_buf(bus->sdiodev,
+						   bus->rxhdr, BRCMF_FIRSTREAD);
+			bus->sdcnt.f2rxhdrs++;
+			if (ret < 0) {
+				brcmf_err("RXHEADER FAILED: %d\n",
+					  ret);
+				bus->sdcnt.rx_hdrfail++;
+				brcmf_sdio_rxfail(bus, true, true);
+				sdio_release_host(bus->sdiodev->func[1]);
+				continue;
+			}
+
+			brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
+					   bus->rxhdr, SDPCM_HDRLEN,
+					   "RxHdr:\n");
+
+			if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
+					       BRCMF_SDIO_FT_NORMAL)) {
+				sdio_release_host(bus->sdiodev->func[1]);
+				if (!bus->rxpending)
+					break;
+				else
+					continue;
+			}
+
+			if (rd->channel == SDPCM_CONTROL_CHANNEL) {
+				brcmf_sdio_read_control(bus, bus->rxhdr,
+							rd->len,
+							rd->dat_offset);
+				/* prepare the descriptor for the next read */
+				rd->len = rd->len_nxtfrm << 4;
+				rd->len_nxtfrm = 0;
+				/* treat all packet as event if we don't know */
+				rd->channel = SDPCM_EVENT_CHANNEL;
+				sdio_release_host(bus->sdiodev->func[1]);
+				continue;
+			}
+			rd->len_left = rd->len > BRCMF_FIRSTREAD ?
+				       rd->len - BRCMF_FIRSTREAD : 0;
+			head_read = BRCMF_FIRSTREAD;
+		}
+
+		brcmf_sdio_pad(bus, &pad, &rd->len_left);
+
+		pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
+					    bus->head_align);
+		if (!pkt) {
+			/* Give up on data, request rtx of events */
+			brcmf_err("brcmu_pkt_buf_get_skb failed\n");
+			brcmf_sdio_rxfail(bus, false,
+					    RETRYCHAN(rd->channel));
+			sdio_release_host(bus->sdiodev->func[1]);
+			continue;
+		}
+		skb_pull(pkt, head_read);
+		pkt_align(pkt, rd->len_left, bus->head_align);
+
+		ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
+		bus->sdcnt.f2rxdata++;
+		sdio_release_host(bus->sdiodev->func[1]);
+
+		if (ret < 0) {
+			brcmf_err("read %d bytes from channel %d failed: %d\n",
+				  rd->len, rd->channel, ret);
+			brcmu_pkt_buf_free_skb(pkt);
+			sdio_claim_host(bus->sdiodev->func[1]);
+			brcmf_sdio_rxfail(bus, true,
+					    RETRYCHAN(rd->channel));
+			sdio_release_host(bus->sdiodev->func[1]);
+			continue;
+		}
+
+		if (head_read) {
+			skb_push(pkt, head_read);
+			memcpy(pkt->data, bus->rxhdr, head_read);
+			head_read = 0;
+		} else {
+			memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
+			rd_new.seq_num = rd->seq_num;
+			sdio_claim_host(bus->sdiodev->func[1]);
+			if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
+					       BRCMF_SDIO_FT_NORMAL)) {
+				rd->len = 0;
+				brcmu_pkt_buf_free_skb(pkt);
+			}
+			bus->sdcnt.rx_readahead_cnt++;
+			if (rd->len != roundup(rd_new.len, 16)) {
+				brcmf_err("frame length mismatch:read %d, should be %d\n",
+					  rd->len,
+					  roundup(rd_new.len, 16) >> 4);
+				rd->len = 0;
+				brcmf_sdio_rxfail(bus, true, true);
+				sdio_release_host(bus->sdiodev->func[1]);
+				brcmu_pkt_buf_free_skb(pkt);
+				continue;
+			}
+			sdio_release_host(bus->sdiodev->func[1]);
+			rd->len_nxtfrm = rd_new.len_nxtfrm;
+			rd->channel = rd_new.channel;
+			rd->dat_offset = rd_new.dat_offset;
+
+			brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
+					     BRCMF_DATA_ON()) &&
+					   BRCMF_HDRS_ON(),
+					   bus->rxhdr, SDPCM_HDRLEN,
+					   "RxHdr:\n");
+
+			if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
+				brcmf_err("readahead on control packet %d?\n",
+					  rd_new.seq_num);
+				/* Force retry w/normal header read */
+				rd->len = 0;
+				sdio_claim_host(bus->sdiodev->func[1]);
+				brcmf_sdio_rxfail(bus, false, true);
+				sdio_release_host(bus->sdiodev->func[1]);
+				brcmu_pkt_buf_free_skb(pkt);
+				continue;
+			}
+		}
+
+		brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
+				   pkt->data, rd->len, "Rx Data:\n");
+
+		/* Save superframe descriptor and allocate packet frame */
+		if (rd->channel == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
+				brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
+					  rd->len);
+				brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
+						   pkt->data, rd->len,
+						   "Glom Data:\n");
+				__skb_trim(pkt, rd->len);
+				skb_pull(pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				brcmf_err("%s: glom superframe w/o "
+					  "descriptor!\n", __func__);
+				sdio_claim_host(bus->sdiodev->func[1]);
+				brcmf_sdio_rxfail(bus, false, false);
+				sdio_release_host(bus->sdiodev->func[1]);
+			}
+			/* prepare the descriptor for the next read */
+			rd->len = rd->len_nxtfrm << 4;
+			rd->len_nxtfrm = 0;
+			/* treat all packet as event if we don't know */
+			rd->channel = SDPCM_EVENT_CHANNEL;
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		__skb_trim(pkt, rd->len);
+		skb_pull(pkt, rd->dat_offset);
+
+		/* prepare the descriptor for the next read */
+		rd->len = rd->len_nxtfrm << 4;
+		rd->len_nxtfrm = 0;
+		/* treat all packet as event if we don't know */
+		rd->channel = SDPCM_EVENT_CHANNEL;
+
+		if (pkt->len == 0) {
+			brcmu_pkt_buf_free_skb(pkt);
+			continue;
+		}
+
+		brcmf_rx_frame(bus->sdiodev->dev, pkt);
+	}
+
+	rxcount = maxframes - rxleft;
+	/* Message if we hit the limit */
+	if (!rxleft)
+		brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
+	else
+		brcmf_dbg(DATA, "processed %d frames\n", rxcount);
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rd->seq_num--;
+	bus->rx_seq = rd->seq_num;
+
+	return rxcount;
+}
+
+static void
+brcmf_sdio_wait_event_wakeup(struct brcmf_sdio *bus)
+{
+	if (waitqueue_active(&bus->ctrl_wait))
+		wake_up_interruptible(&bus->ctrl_wait);
+	return;
+}
+
+static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
+{
+	u16 head_pad;
+	u8 *dat_buf;
+
+	dat_buf = (u8 *)(pkt->data);
+
+	/* Check head padding */
+	head_pad = ((unsigned long)dat_buf % bus->head_align);
+	if (head_pad) {
+		if (skb_headroom(pkt) < head_pad) {
+			bus->sdiodev->bus_if->tx_realloc++;
+			head_pad = 0;
+			if (skb_cow(pkt, head_pad))
+				return -ENOMEM;
+		}
+		skb_push(pkt, head_pad);
+		dat_buf = (u8 *)(pkt->data);
+		memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+	}
+	return head_pad;
+}
+
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
+/* flag marking a dummy skb added for DMA alignment requirement */
+#define ALIGN_SKB_FLAG		0x8000
+/* bit mask of data length chopped from the previous packet */
+#define ALIGN_SKB_CHOP_LEN_MASK	0x7fff
+
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
+				    struct sk_buff_head *pktq,
+				    struct sk_buff *pkt, u16 total_len)
+{
+	struct brcmf_sdio_dev *sdiodev;
+	struct sk_buff *pkt_pad;
+	u16 tail_pad, tail_chop, chain_pad;
+	unsigned int blksize;
+	bool lastfrm;
+	int ntail, ret;
+
+	sdiodev = bus->sdiodev;
+	blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+	/* sg entry alignment should be a divisor of block size */
+	WARN_ON(blksize % bus->sgentry_align);
+
+	/* Check tail padding */
+	lastfrm = skb_queue_is_last(pktq, pkt);
+	tail_pad = 0;
+	tail_chop = pkt->len % bus->sgentry_align;
+	if (tail_chop)
+		tail_pad = bus->sgentry_align - tail_chop;
+	chain_pad = (total_len + tail_pad) % blksize;
+	if (lastfrm && chain_pad)
+		tail_pad += blksize - chain_pad;
+	if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
+		pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
+						bus->head_align);
+		if (pkt_pad == NULL)
+			return -ENOMEM;
+		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
+		if (unlikely(ret < 0)) {
+			kfree_skb(pkt_pad);
+			return ret;
+		}
+		memcpy(pkt_pad->data,
+		       pkt->data + pkt->len - tail_chop,
+		       tail_chop);
+		*(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+		skb_trim(pkt, pkt->len - tail_chop);
+		skb_trim(pkt_pad, tail_pad + tail_chop);
+		__skb_queue_after(pktq, pkt, pkt_pad);
+	} else {
+		ntail = pkt->data_len + tail_pad -
+			(pkt->end - pkt->tail);
+		if (skb_cloned(pkt) || ntail > 0)
+			if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
+				return -ENOMEM;
+		if (skb_linearize(pkt))
+			return -ENOMEM;
+		__skb_put(pkt, tail_pad);
+	}
+
+	return tail_pad;
+}
+
+/**
+ * brcmf_sdio_txpkt_prep - packet preparation for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ * @chan: virtual channel to transmit the packet
+ *
+ * Processes to be applied to the packet
+ *	- Align data buffer pointer
+ *	- Align data buffer length
+ *	- Prepare header
+ * Return: negative value if there is error
+ */
+static int
+brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+		      uint chan)
+{
+	u16 head_pad, total_len;
+	struct sk_buff *pkt_next;
+	u8 txseq;
+	int ret;
+	struct brcmf_sdio_hdrinfo hd_info = {0};
+
+	txseq = bus->tx_seq;
+	total_len = 0;
+	skb_queue_walk(pktq, pkt_next) {
+		/* alignment packet inserted in previous
+		 * loop cycle can be skipped as it is
+		 * already properly aligned and does not
+		 * need an sdpcm header.
+		 */
+		if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+			continue;
+
+		/* align packet data pointer */
+		ret = brcmf_sdio_txpkt_hdalign(bus, pkt_next);
+		if (ret < 0)
+			return ret;
+		head_pad = (u16)ret;
+		if (head_pad)
+			memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
+
+		total_len += pkt_next->len;
+
+		hd_info.len = pkt_next->len;
+		hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next);
+		if (bus->txglom && pktq->qlen > 1) {
+			ret = brcmf_sdio_txpkt_prep_sg(bus, pktq,
+						       pkt_next, total_len);
+			if (ret < 0)
+				return ret;
+			hd_info.tail_pad = (u16)ret;
+			total_len += (u16)ret;
+		}
+
+		hd_info.channel = chan;
+		hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+		hd_info.seq_num = txseq++;
+
+		/* Now fill the header */
+		brcmf_sdio_hdpack(bus, pkt_next->data, &hd_info);
+
+		if (BRCMF_BYTES_ON() &&
+		    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+		     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+			brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
+					   "Tx Frame:\n");
+		else if (BRCMF_HDRS_ON())
+			brcmf_dbg_hex_dump(true, pkt_next->data,
+					   head_pad + bus->tx_hdrlen,
+					   "Tx Header:\n");
+	}
+	/* Hardware length tag of the first packet should be total
+	 * length of the chain (including padding)
+	 */
+	if (bus->txglom)
+		brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
+	return 0;
+}
+
+/**
+ * brcmf_sdio_txpkt_postp - packet post processing for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ *
+ * Processes to be applied to the packet
+ *	- Remove head padding
+ *	- Remove tail padding
+ */
+static void
+brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
+{
+	u8 *hdr;
+	u32 dat_offset;
+	u16 tail_pad;
+	u16 dummy_flags, chop_len;
+	struct sk_buff *pkt_next, *tmp, *pkt_prev;
+
+	skb_queue_walk_safe(pktq, pkt_next, tmp) {
+		dummy_flags = *(u16 *)(pkt_next->cb);
+		if (dummy_flags & ALIGN_SKB_FLAG) {
+			chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
+			if (chop_len) {
+				pkt_prev = pkt_next->prev;
+				skb_put(pkt_prev, chop_len);
+			}
+			__skb_unlink(pkt_next, pktq);
+			brcmu_pkt_buf_free_skb(pkt_next);
+		} else {
+			hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN;
+			dat_offset = le32_to_cpu(*(__le32 *)hdr);
+			dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
+				     SDPCM_DOFFSET_SHIFT;
+			skb_pull(pkt_next, dat_offset);
+			if (bus->txglom) {
+				tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2));
+				skb_trim(pkt_next, pkt_next->len - tail_pad);
+			}
+		}
+	}
+}
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+			    uint chan)
+{
+	int ret;
+	struct sk_buff *pkt_next, *tmp;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	ret = brcmf_sdio_txpkt_prep(bus, pktq, chan);
+	if (ret)
+		goto done;
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+	ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
+	bus->sdcnt.f2txdata++;
+
+	if (ret < 0)
+		brcmf_sdio_txfail(bus);
+
+	sdio_release_host(bus->sdiodev->func[1]);
+
+done:
+	brcmf_sdio_txpkt_postp(bus, pktq);
+	if (ret == 0)
+		bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP;
+	skb_queue_walk_safe(pktq, pkt_next, tmp) {
+		__skb_unlink(pkt_next, pktq);
+		brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0);
+	}
+	return ret;
+}
+
+static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
+{
+	struct sk_buff *pkt;
+	struct sk_buff_head pktq;
+	u32 intstatus = 0;
+	int ret = 0, prec_out, i;
+	uint cnt = 0;
+	u8 tx_prec_map, pkt_num;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	tx_prec_map = ~bus->flowcontrol;
+
+	/* Send frames until the limit or some other event */
+	for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
+		pkt_num = 1;
+		if (bus->txglom)
+			pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
+					bus->sdiodev->txglomsz);
+		pkt_num = min_t(u32, pkt_num,
+				brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
+		__skb_queue_head_init(&pktq);
+		spin_lock_bh(&bus->txq_lock);
+		for (i = 0; i < pkt_num; i++) {
+			pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
+					      &prec_out);
+			if (pkt == NULL)
+				break;
+			__skb_queue_tail(&pktq, pkt);
+		}
+		spin_unlock_bh(&bus->txq_lock);
+		if (i == 0)
+			break;
+
+		ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+
+		cnt += i;
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr) {
+			/* Check device status, signal pending interrupt */
+			sdio_claim_host(bus->sdiodev->func[1]);
+			ret = r_sdreg32(bus, &intstatus,
+					offsetof(struct sdpcmd_regs,
+						 intstatus));
+			sdio_release_host(bus->sdiodev->func[1]);
+			bus->sdcnt.f2txdata++;
+			if (ret != 0)
+				break;
+			if (intstatus & bus->hostintmask)
+				atomic_set(&bus->ipend, 1);
+		}
+	}
+
+	/* Deflow-control stack if needed */
+	if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) &&
+	    bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
+		bus->txoff = false;
+		brcmf_txflowblock(bus->sdiodev->dev, false);
+	}
+
+	return cnt;
+}
+
+static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
+{
+	u8 doff;
+	u16 pad;
+	uint retries = 0;
+	struct brcmf_sdio_hdrinfo hd_info = {0};
+	int ret;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* Back the pointer to make room for bus header */
+	frame -= bus->tx_hdrlen;
+	len += bus->tx_hdrlen;
+
+	/* Add alignment padding (optional for ctl frames) */
+	doff = ((unsigned long)frame % bus->head_align);
+	if (doff) {
+		frame -= doff;
+		len += doff;
+		memset(frame + bus->tx_hdrlen, 0, doff);
+	}
+
+	/* Round send length to next SDIO block */
+	pad = 0;
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad > bus->roundup) || (pad >= bus->blocksize))
+			pad = 0;
+	} else if (len % bus->head_align) {
+		pad = bus->head_align - (len % bus->head_align);
+	}
+	len += pad;
+
+	hd_info.len = len - pad;
+	hd_info.channel = SDPCM_CONTROL_CHANNEL;
+	hd_info.dat_offset = doff + bus->tx_hdrlen;
+	hd_info.seq_num = bus->tx_seq;
+	hd_info.lastfrm = true;
+	hd_info.tail_pad = pad;
+	brcmf_sdio_hdpack(bus, frame, &hd_info);
+
+	if (bus->txglom)
+		brcmf_sdio_update_hwhdr(frame, len);
+
+	brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+			   frame, len, "Tx Frame:\n");
+	brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+			   BRCMF_HDRS_ON(),
+			   frame, min_t(u16, len, 16), "TxHdr:\n");
+
+	do {
+		ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
+
+		if (ret < 0)
+			brcmf_sdio_txfail(bus);
+		else
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+	} while (ret < 0 && retries++ < TXRETRIES);
+
+	return ret;
+}
+
+static void brcmf_sdio_bus_stop(struct device *dev)
+{
+	u32 local_hostintmask;
+	u8 saveclk;
+	int err;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (bus->watchdog_tsk) {
+		send_sig(SIGTERM, bus->watchdog_tsk, 1);
+		kthread_stop(bus->watchdog_tsk);
+		bus->watchdog_tsk = NULL;
+	}
+
+	if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
+		sdio_claim_host(sdiodev->func[1]);
+
+		/* Enable clock for device interrupts */
+		brcmf_sdio_bus_sleep(bus, false, false);
+
+		/* Disable and clear interrupts at the chip level also */
+		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+		local_hostintmask = bus->hostintmask;
+		bus->hostintmask = 0;
+
+		/* Force backplane clocks to assure F2 interrupt propagates */
+		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					    &err);
+		if (!err)
+			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					  (saveclk | SBSDIO_FORCE_HT), &err);
+		if (err)
+			brcmf_err("Failed to force clock for F2: err %d\n",
+				  err);
+
+		/* Turn off the bus (F2), free any pending packets */
+		brcmf_dbg(INTR, "disable SDIO interrupts\n");
+		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+
+		/* Clear any pending interrupts now that F2 is disabled */
+		w_sdreg32(bus, local_hostintmask,
+			  offsetof(struct sdpcmd_regs, intstatus));
+
+		sdio_release_host(sdiodev->func[1]);
+	}
+	/* Clear the data packet queues */
+	brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+	/* Clear any held glomming stuff */
+	brcmu_pkt_buf_free_skb(bus->glomd);
+	brcmf_sdio_free_glom(bus);
+
+	/* Clear rx control and wake any waiters */
+	spin_lock_bh(&bus->rxctl_lock);
+	bus->rxlen = 0;
+	spin_unlock_bh(&bus->rxctl_lock);
+	brcmf_sdio_dcmd_resp_wake(bus);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = false;
+	bus->tx_seq = bus->rx_seq = 0;
+}
+
+static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
+{
+	unsigned long flags;
+
+	if (bus->sdiodev->oob_irq_requested) {
+		spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
+		if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
+			enable_irq(bus->sdiodev->pdata->oob_irq_nr);
+			bus->sdiodev->irq_en = true;
+		}
+		spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
+	}
+}
+
+static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
+{
+	struct brcmf_core *buscore;
+	u32 addr;
+	unsigned long val;
+	int ret;
+
+	buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+	addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+
+	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+	bus->sdcnt.f1regdata++;
+	if (ret != 0)
+		return ret;
+
+	val &= bus->hostintmask;
+	atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
+
+	/* Clear interrupts */
+	if (val) {
+		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+		bus->sdcnt.f1regdata++;
+		atomic_or(val, &bus->intstatus);
+	}
+
+	return ret;
+}
+
+static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
+{
+	u32 newstatus = 0;
+	unsigned long intstatus;
+	uint txlimit = bus->txbound;	/* Tx frames to send before resched */
+	uint framecnt;			/* Temporary counter of tx/rx frames */
+	int err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+
+	/* If waiting for HTAVAIL, check status */
+	if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
+		u8 clkctl, devctl = 0;
+
+#ifdef DEBUG
+		/* Check for inconsistent device control */
+		devctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_DEVICE_CTL, &err);
+#endif				/* DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
+			  devctl, clkctl);
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+						   SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					  devctl, &err);
+			bus->clkstate = CLK_AVAIL;
+		}
+	}
+
+	/* Make sure backplane clock is on */
+	brcmf_sdio_bus_sleep(bus, false, true);
+
+	/* Pending interrupt indicates new device status */
+	if (atomic_read(&bus->ipend) > 0) {
+		atomic_set(&bus->ipend, 0);
+		err = brcmf_sdio_intr_rstatus(bus);
+	}
+
+	/* Start with leftover status bits */
+	intstatus = atomic_xchg(&bus->intstatus, 0);
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		err = w_sdreg32(bus, I_HMB_FC_CHANGE,
+				offsetof(struct sdpcmd_regs, intstatus));
+
+		err = r_sdreg32(bus, &newstatus,
+				offsetof(struct sdpcmd_regs, intstatus));
+		bus->sdcnt.f1regdata += 2;
+		atomic_set(&bus->fcstate,
+			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= brcmf_sdio_hostmail(bus);
+	}
+
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		brcmf_err("Dongle reports WR_OOSYNC\n");
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		brcmf_err("Dongle reports RD_OOSYNC\n");
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		brcmf_err("Dongle reports SBINT\n");
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip)
+		intstatus &= ~I_HMB_FRAME_IND;
+
+	/* On frame indication, read available frames */
+	if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
+		brcmf_sdio_readframes(bus, bus->rxbound);
+		if (!bus->rxpending)
+			intstatus &= ~I_HMB_FRAME_IND;
+	}
+
+	/* Keep still-pending events for next scheduling */
+	if (intstatus)
+		atomic_or(intstatus, &bus->intstatus);
+
+	brcmf_sdio_clrintr(bus);
+
+	if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
+	    data_ok(bus)) {
+		sdio_claim_host(bus->sdiodev->func[1]);
+		if (bus->ctrl_frame_stat) {
+			err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
+						      bus->ctrl_frame_len);
+			bus->ctrl_frame_err = err;
+			wmb();
+			bus->ctrl_frame_stat = false;
+		}
+		sdio_release_host(bus->sdiodev->func[1]);
+		brcmf_sdio_wait_event_wakeup(bus);
+	}
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
+	    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
+	    data_ok(bus)) {
+		framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
+					    txlimit;
+		brcmf_sdio_sendfromq(bus, framecnt);
+	}
+
+	if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
+		brcmf_err("failed backplane access over SDIO, halting operation\n");
+		atomic_set(&bus->intstatus, 0);
+		if (bus->ctrl_frame_stat) {
+			sdio_claim_host(bus->sdiodev->func[1]);
+			if (bus->ctrl_frame_stat) {
+				bus->ctrl_frame_err = -ENODEV;
+				wmb();
+				bus->ctrl_frame_stat = false;
+				brcmf_sdio_wait_event_wakeup(bus);
+			}
+			sdio_release_host(bus->sdiodev->func[1]);
+		}
+	} else if (atomic_read(&bus->intstatus) ||
+		   atomic_read(&bus->ipend) > 0 ||
+		   (!atomic_read(&bus->fcstate) &&
+		    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+		    data_ok(bus))) {
+		bus->dpc_triggered = true;
+	}
+}
+
+static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	return &bus->txq;
+}
+
+static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
+{
+	struct sk_buff *p;
+	int eprec = -1;		/* precedence to evict from */
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		brcmu_pktq_penq(q, prec, pkt);
+		return true;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec)) {
+		eprec = prec;
+	} else if (pktq_full(q)) {
+		p = brcmu_pktq_peek_tail(q, &eprec);
+		if (eprec > prec)
+			return false;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		if (eprec == prec)
+			return false;	/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = brcmu_pktq_pdeq_tail(q, eprec);
+		if (p == NULL)
+			brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
+		brcmu_pkt_buf_free_skb(p);
+	}
+
+	/* Enqueue */
+	p = brcmu_pktq_penq(q, prec, pkt);
+	if (p == NULL)
+		brcmf_err("brcmu_pktq_penq() failed\n");
+
+	return p != NULL;
+}
+
+static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
+{
+	int ret = -EBADE;
+	uint prec;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
+	if (sdiodev->state != BRCMF_SDIOD_DATA)
+		return -EIO;
+
+	/* Add space for the header */
+	skb_push(pkt, bus->tx_hdrlen);
+	/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
+
+	prec = prio2prec((pkt->priority & PRIOMASK));
+
+	/* Check for existing queue, current flow-control,
+			 pending event, or pending clock */
+	brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
+	bus->sdcnt.fcqueued++;
+
+	/* Priority based enq */
+	spin_lock_bh(&bus->txq_lock);
+	/* reset bus_flags in packet cb */
+	*(u16 *)(pkt->cb) = 0;
+	if (!brcmf_sdio_prec_enq(&bus->txq, pkt, prec)) {
+		skb_pull(pkt, bus->tx_hdrlen);
+		brcmf_err("out of bus->txq !!!\n");
+		ret = -ENOSR;
+	} else {
+		ret = 0;
+	}
+
+	if (pktq_len(&bus->txq) >= TXHI) {
+		bus->txoff = true;
+		brcmf_txflowblock(dev, true);
+	}
+	spin_unlock_bh(&bus->txq_lock);
+
+#ifdef DEBUG
+	if (pktq_plen(&bus->txq, prec) > qcount[prec])
+		qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+
+	brcmf_sdio_trigger_dpc(bus);
+	return ret;
+}
+
+#ifdef DEBUG
+#define CONSOLE_LINE_MAX	192
+
+static int brcmf_sdio_readconsole(struct brcmf_sdio *bus)
+{
+	struct brcmf_console *c = &bus->console;
+	u8 line[CONSOLE_LINE_MAX], ch;
+	u32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + offsetof(struct rte_console, log_le);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le,
+			       sizeof(c->log_le));
+	if (rv < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = le32_to_cpu(c->log_le.buf_size);
+		c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
+		if (c->buf == NULL)
+			return -ENOMEM;
+	}
+
+	idx = le32_to_cpu(c->log_le.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return -EBADE;
+
+	/* Skip reading the console buffer if the index pointer
+	 has not moved */
+	if (idx == c->last)
+		return 0;
+
+	/* Read the console buffer */
+	addr = le32_to_cpu(c->log_le.buf);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize);
+	if (rv < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.
+				 * Instead, back up
+				 * the buffer pointer and output this
+				 * line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			pr_debug("CONSOLE: %s\n", line);
+		}
+	}
+break2:
+
+	return 0;
+}
+#endif				/* DEBUG */
+
+static int
+brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	int ret;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (sdiodev->state != BRCMF_SDIOD_DATA)
+		return -EIO;
+
+	/* Send from dpc */
+	bus->ctrl_frame_buf = msg;
+	bus->ctrl_frame_len = msglen;
+	wmb();
+	bus->ctrl_frame_stat = true;
+
+	brcmf_sdio_trigger_dpc(bus);
+	wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
+					 msecs_to_jiffies(CTL_DONE_TIMEOUT));
+	ret = 0;
+	if (bus->ctrl_frame_stat) {
+		sdio_claim_host(bus->sdiodev->func[1]);
+		if (bus->ctrl_frame_stat) {
+			brcmf_dbg(SDIO, "ctrl_frame timeout\n");
+			bus->ctrl_frame_stat = false;
+			ret = -ETIMEDOUT;
+		}
+		sdio_release_host(bus->sdiodev->func[1]);
+	}
+	if (!ret) {
+		brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
+			  bus->ctrl_frame_err);
+		rmb();
+		ret = bus->ctrl_frame_err;
+	}
+
+	if (ret)
+		bus->sdcnt.tx_ctlerrs++;
+	else
+		bus->sdcnt.tx_ctlpkts++;
+
+	return ret;
+}
+
+#ifdef DEBUG
+static int brcmf_sdio_dump_console(struct seq_file *seq, struct brcmf_sdio *bus,
+				   struct sdpcm_shared *sh)
+{
+	u32 addr, console_ptr, console_size, console_index;
+	char *conbuf = NULL;
+	__le32 sh_val;
+	int rv;
+
+	/* obtain console information from device memory */
+	addr = sh->console_addr + offsetof(struct rte_console, log_le);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+			       (u8 *)&sh_val, sizeof(u32));
+	if (rv < 0)
+		return rv;
+	console_ptr = le32_to_cpu(sh_val);
+
+	addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+			       (u8 *)&sh_val, sizeof(u32));
+	if (rv < 0)
+		return rv;
+	console_size = le32_to_cpu(sh_val);
+
+	addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr,
+			       (u8 *)&sh_val, sizeof(u32));
+	if (rv < 0)
+		return rv;
+	console_index = le32_to_cpu(sh_val);
+
+	/* allocate buffer for console data */
+	if (console_size <= CONSOLE_BUFFER_MAX)
+		conbuf = vzalloc(console_size+1);
+
+	if (!conbuf)
+		return -ENOMEM;
+
+	/* obtain the console data from device */
+	conbuf[console_size] = '\0';
+	rv = brcmf_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf,
+			       console_size);
+	if (rv < 0)
+		goto done;
+
+	rv = seq_write(seq, conbuf + console_index,
+		       console_size - console_index);
+	if (rv < 0)
+		goto done;
+
+	if (console_index > 0)
+		rv = seq_write(seq, conbuf, console_index - 1);
+
+done:
+	vfree(conbuf);
+	return rv;
+}
+
+static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus,
+				struct sdpcm_shared *sh)
+{
+	int error;
+	struct brcmf_trap_info tr;
+
+	if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
+		brcmf_dbg(INFO, "no trap in firmware\n");
+		return 0;
+	}
+
+	error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr,
+				  sizeof(struct brcmf_trap_info));
+	if (error < 0)
+		return error;
+
+	seq_printf(seq,
+		   "dongle trap info: type 0x%x @ epc 0x%08x\n"
+		   "  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
+		   "  lr   0x%08x pc   0x%08x offset 0x%x\n"
+		   "  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
+		   "  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
+		   le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
+		   le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
+		   le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
+		   le32_to_cpu(tr.pc), sh->trap_addr,
+		   le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
+		   le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
+		   le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
+		   le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
+
+	return 0;
+}
+
+static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
+				  struct sdpcm_shared *sh)
+{
+	int error = 0;
+	char file[80] = "?";
+	char expr[80] = "<???>";
+
+	if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		brcmf_dbg(INFO, "firmware not built with -assert\n");
+		return 0;
+	} else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
+		brcmf_dbg(INFO, "no assert in dongle\n");
+		return 0;
+	}
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+	if (sh->assert_file_addr != 0) {
+		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+					  sh->assert_file_addr, (u8 *)file, 80);
+		if (error < 0)
+			return error;
+	}
+	if (sh->assert_exp_addr != 0) {
+		error = brcmf_sdiod_ramrw(bus->sdiodev, false,
+					  sh->assert_exp_addr, (u8 *)expr, 80);
+		if (error < 0)
+			return error;
+	}
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
+		   file, sh->assert_line, expr);
+	return 0;
+}
+
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
+{
+	int error;
+	struct sdpcm_shared sh;
+
+	error = brcmf_sdio_readshared(bus, &sh);
+
+	if (error < 0)
+		return error;
+
+	if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
+		brcmf_dbg(INFO, "firmware not built with -assert\n");
+	else if (sh.flags & SDPCM_SHARED_ASSERT)
+		brcmf_err("assertion in dongle\n");
+
+	if (sh.flags & SDPCM_SHARED_TRAP)
+		brcmf_err("firmware trap in dongle\n");
+
+	return 0;
+}
+
+static int brcmf_sdio_died_dump(struct seq_file *seq, struct brcmf_sdio *bus)
+{
+	int error = 0;
+	struct sdpcm_shared sh;
+
+	error = brcmf_sdio_readshared(bus, &sh);
+	if (error < 0)
+		goto done;
+
+	error = brcmf_sdio_assert_info(seq, bus, &sh);
+	if (error < 0)
+		goto done;
+
+	error = brcmf_sdio_trap_info(seq, bus, &sh);
+	if (error < 0)
+		goto done;
+
+	error = brcmf_sdio_dump_console(seq, bus, &sh);
+
+done:
+	return error;
+}
+
+static int brcmf_sdio_forensic_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_sdio *bus = bus_if->bus_priv.sdio->bus;
+
+	return brcmf_sdio_died_dump(seq, bus);
+}
+
+static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio_count *sdcnt = &sdiodev->bus->sdcnt;
+
+	seq_printf(seq,
+		   "intrcount:    %u\nlastintrs:    %u\n"
+		   "pollcnt:      %u\nregfails:     %u\n"
+		   "tx_sderrs:    %u\nfcqueued:     %u\n"
+		   "rxrtx:        %u\nrx_toolong:   %u\n"
+		   "rxc_errors:   %u\nrx_hdrfail:   %u\n"
+		   "rx_badhdr:    %u\nrx_badseq:    %u\n"
+		   "fc_rcvd:      %u\nfc_xoff:      %u\n"
+		   "fc_xon:       %u\nrxglomfail:   %u\n"
+		   "rxglomframes: %u\nrxglompkts:   %u\n"
+		   "f2rxhdrs:     %u\nf2rxdata:     %u\n"
+		   "f2txdata:     %u\nf1regdata:    %u\n"
+		   "tickcnt:      %u\ntx_ctlerrs:   %lu\n"
+		   "tx_ctlpkts:   %lu\nrx_ctlerrs:   %lu\n"
+		   "rx_ctlpkts:   %lu\nrx_readahead: %lu\n",
+		   sdcnt->intrcount, sdcnt->lastintrs,
+		   sdcnt->pollcnt, sdcnt->regfails,
+		   sdcnt->tx_sderrs, sdcnt->fcqueued,
+		   sdcnt->rxrtx, sdcnt->rx_toolong,
+		   sdcnt->rxc_errors, sdcnt->rx_hdrfail,
+		   sdcnt->rx_badhdr, sdcnt->rx_badseq,
+		   sdcnt->fc_rcvd, sdcnt->fc_xoff,
+		   sdcnt->fc_xon, sdcnt->rxglomfail,
+		   sdcnt->rxglomframes, sdcnt->rxglompkts,
+		   sdcnt->f2rxhdrs, sdcnt->f2rxdata,
+		   sdcnt->f2txdata, sdcnt->f1regdata,
+		   sdcnt->tickcnt, sdcnt->tx_ctlerrs,
+		   sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
+		   sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
+
+	return 0;
+}
+
+static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+{
+	struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
+	struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
+
+	if (IS_ERR_OR_NULL(dentry))
+		return;
+
+	bus->console_interval = BRCMF_CONSOLE;
+
+	brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
+	brcmf_debugfs_add_entry(drvr, "counters",
+				brcmf_debugfs_sdio_count_read);
+	debugfs_create_u32("console_interval", 0644, dentry,
+			   &bus->console_interval);
+}
+#else
+static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
+{
+	return 0;
+}
+
+static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
+static int
+brcmf_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+	u8 *buf;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	brcmf_dbg(TRACE, "Enter\n");
+	if (sdiodev->state != BRCMF_SDIOD_DATA)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = brcmf_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+
+	spin_lock_bh(&bus->rxctl_lock);
+	rxlen = bus->rxlen;
+	memcpy(msg, bus->rxctl, min(msglen, rxlen));
+	bus->rxctl = NULL;
+	buf = bus->rxctl_orig;
+	bus->rxctl_orig = NULL;
+	bus->rxlen = 0;
+	spin_unlock_bh(&bus->rxctl_lock);
+	vfree(buf);
+
+	if (rxlen) {
+		brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
+			  rxlen, msglen);
+	} else if (timeleft == 0) {
+		brcmf_err("resumed on timeout\n");
+		brcmf_sdio_checkdied(bus);
+	} else if (pending) {
+		brcmf_dbg(CTL, "cancelled\n");
+		return -ERESTARTSYS;
+	} else {
+		brcmf_dbg(CTL, "resumed for unknown reason?\n");
+		brcmf_sdio_checkdied(bus);
+	}
+
+	if (rxlen)
+		bus->sdcnt.rx_ctlpkts++;
+	else
+		bus->sdcnt.rx_ctlerrs++;
+
+	return rxlen ? (int)rxlen : -ETIMEDOUT;
+}
+
+#ifdef DEBUG
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+			u8 *ram_data, uint ram_sz)
+{
+	char *ram_cmp;
+	int err;
+	bool ret = true;
+	int address;
+	int offset;
+	int len;
+
+	/* read back and verify */
+	brcmf_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr,
+		  ram_sz);
+	ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL);
+	/* do not proceed while no memory but  */
+	if (!ram_cmp)
+		return true;
+
+	address = ram_addr;
+	offset = 0;
+	while (offset < ram_sz) {
+		len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK :
+		      ram_sz - offset;
+		err = brcmf_sdiod_ramrw(sdiodev, false, address, ram_cmp, len);
+		if (err) {
+			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+				  err, len, address);
+			ret = false;
+			break;
+		} else if (memcmp(ram_cmp, &ram_data[offset], len)) {
+			brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
+				  offset, len);
+			ret = false;
+			break;
+		}
+		offset += len;
+		address += len;
+	}
+
+	kfree(ram_cmp);
+
+	return ret;
+}
+#else	/* DEBUG */
+static bool
+brcmf_sdio_verifymemory(struct brcmf_sdio_dev *sdiodev, u32 ram_addr,
+			u8 *ram_data, uint ram_sz)
+{
+	return true;
+}
+#endif	/* DEBUG */
+
+static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
+					 const struct firmware *fw)
+{
+	int err;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
+				(u8 *)fw->data, fw->size);
+	if (err)
+		brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+			  err, (int)fw->size, bus->ci->rambase);
+	else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+					  (u8 *)fw->data, fw->size))
+		err = -EIO;
+
+	return err;
+}
+
+static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
+				     void *vars, u32 varsz)
+{
+	int address;
+	int err;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	address = bus->ci->ramsize - varsz + bus->ci->rambase;
+	err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
+	if (err)
+		brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
+			  err, varsz, address);
+	else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
+		err = -EIO;
+
+	return err;
+}
+
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
+					const struct firmware *fw,
+					void *nvram, u32 nvlen)
+{
+	int bcmerror = -EFAULT;
+	u32 rstvec;
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+
+	rstvec = get_unaligned_le32(fw->data);
+	brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
+
+	bcmerror = brcmf_sdio_download_code_file(bus, fw);
+	release_firmware(fw);
+	if (bcmerror) {
+		brcmf_err("dongle image file download failed\n");
+		brcmf_fw_nvram_free(nvram);
+		goto err;
+	}
+
+	bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
+	brcmf_fw_nvram_free(nvram);
+	if (bcmerror) {
+		brcmf_err("dongle nvram file download failed\n");
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (!brcmf_chip_set_active(bus->ci, rstvec)) {
+		brcmf_err("error getting out of ARM core reset\n");
+		goto err;
+	}
+
+	/* Allow full data communication using DPC from now on. */
+	brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+	bcmerror = 0;
+
+err:
+	brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
+	sdio_release_host(bus->sdiodev->func[1]);
+	return bcmerror;
+}
+
+static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
+{
+	int err = 0;
+	u8 val;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
+	if (err) {
+		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
+		return;
+	}
+
+	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
+	if (err) {
+		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
+		return;
+	}
+
+	/* Add CMD14 Support */
+	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+			  &err);
+	if (err) {
+		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
+		return;
+	}
+
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+			  SBSDIO_FORCE_HT, &err);
+	if (err) {
+		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
+		return;
+	}
+
+	/* set flag */
+	bus->sr_enabled = true;
+	brcmf_dbg(INFO, "SR enabled\n");
+}
+
+/* enable KSO bit */
+static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
+{
+	u8 val;
+	int err = 0;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* KSO bit added in SDIO core rev 12 */
+	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+		return 0;
+
+	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+	if (err) {
+		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
+		return err;
+	}
+
+	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
+			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+				  val, &err);
+		if (err) {
+			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+
+static int brcmf_sdio_bus_preinit(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	uint pad_size;
+	u32 value;
+	int err;
+
+	/* the commands below use the terms tx and rx from
+	 * a device perspective, ie. bus:txglom affects the
+	 * bus transfers from device to host.
+	 */
+	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+		/* for sdio core rev < 12, disable txgloming */
+		value = 0;
+		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
+					   sizeof(u32));
+	} else {
+		/* otherwise, set txglomalign */
+		value = 4;
+		if (sdiodev->pdata)
+			value = sdiodev->pdata->sd_sgentry_align;
+		/* SDIO ADMA requires at least 32 bit alignment */
+		value = max_t(u32, value, 4);
+		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
+					   sizeof(u32));
+	}
+
+	if (err < 0)
+		goto done;
+
+	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+	if (sdiodev->sg_support) {
+		bus->txglom = false;
+		value = 1;
+		pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+		err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
+					   &value, sizeof(u32));
+		if (err < 0) {
+			/* bus:rxglom is allowed to fail */
+			err = 0;
+		} else {
+			bus->txglom = true;
+			bus->tx_hdrlen += SDPCM_HWEXT_LEN;
+		}
+	}
+	brcmf_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen);
+
+done:
+	return err;
+}
+
+static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	return bus->ci->ramsize - bus->ci->srsize;
+}
+
+static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
+				      size_t mem_size)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	int err;
+	int address;
+	int offset;
+	int len;
+
+	brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
+		  mem_size);
+
+	address = bus->ci->rambase;
+	offset = err = 0;
+	sdio_claim_host(sdiodev->func[1]);
+	while (offset < mem_size) {
+		len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
+		      mem_size - offset;
+		err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
+		if (err) {
+			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+				  err, len, address);
+			goto done;
+		}
+		data += len;
+		offset += len;
+		address += len;
+	}
+
+done:
+	sdio_release_host(sdiodev->func[1]);
+	return err;
+}
+
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
+{
+	if (!bus->dpc_triggered) {
+		bus->dpc_triggered = true;
+		queue_work(bus->brcmf_wq, &bus->datawork);
+	}
+}
+
+void brcmf_sdio_isr(struct brcmf_sdio *bus)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (!bus) {
+		brcmf_err("bus is null pointer, exiting\n");
+		return;
+	}
+
+	/* Count the interrupt call */
+	bus->sdcnt.intrcount++;
+	if (in_interrupt())
+		atomic_set(&bus->ipend, 1);
+	else
+		if (brcmf_sdio_intr_rstatus(bus)) {
+			brcmf_err("failed backplane access\n");
+		}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (!bus->intr)
+		brcmf_err("isr w/o interrupt configured!\n");
+
+	bus->dpc_triggered = true;
+	queue_work(bus->brcmf_wq, &bus->datawork);
+}
+
+static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
+{
+	brcmf_dbg(TIMER, "Enter\n");
+
+	/* Poll period: check device if appropriate. */
+	if (!bus->sr_enabled &&
+	    bus->poll && (++bus->polltick >= bus->pollrate)) {
+		u32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr ||
+		    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
+
+			if (!bus->dpc_triggered) {
+				u8 devpend;
+
+				sdio_claim_host(bus->sdiodev->func[1]);
+				devpend = brcmf_sdiod_regrb(bus->sdiodev,
+							    SDIO_CCCR_INTx,
+							    NULL);
+				sdio_release_host(bus->sdiodev->func[1]);
+				intstatus = devpend & (INTR_STATUS_FUNC1 |
+						       INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and
+				 schedule the DPC */
+			if (intstatus) {
+				bus->sdcnt.pollcnt++;
+				atomic_set(&bus->ipend, 1);
+
+				bus->dpc_triggered = true;
+				queue_work(bus->brcmf_wq, &bus->datawork);
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
+	}
+#ifdef DEBUG
+	/* Poll for console output periodically */
+	if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() &&
+	    bus->console_interval != 0) {
+		bus->console.count += BRCMF_WD_POLL_MS;
+		if (bus->console.count >= bus->console_interval) {
+			bus->console.count -= bus->console_interval;
+			sdio_claim_host(bus->sdiodev->func[1]);
+			/* Make sure backplane clock is on */
+			brcmf_sdio_bus_sleep(bus, false, false);
+			if (brcmf_sdio_readconsole(bus) < 0)
+				/* stop on error */
+				bus->console_interval = 0;
+			sdio_release_host(bus->sdiodev->func[1]);
+		}
+	}
+#endif				/* DEBUG */
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+	if (!bus->dpc_triggered) {
+		rmb();
+		if ((!bus->dpc_running) && (bus->idletime > 0) &&
+		    (bus->clkstate == CLK_AVAIL)) {
+			bus->idlecount++;
+			if (bus->idlecount > bus->idletime) {
+				brcmf_dbg(SDIO, "idle\n");
+				sdio_claim_host(bus->sdiodev->func[1]);
+				brcmf_sdio_wd_timer(bus, 0);
+				bus->idlecount = 0;
+				brcmf_sdio_bus_sleep(bus, true, false);
+				sdio_release_host(bus->sdiodev->func[1]);
+			}
+		} else {
+			bus->idlecount = 0;
+		}
+	} else {
+		bus->idlecount = 0;
+	}
+}
+
+static void brcmf_sdio_dataworker(struct work_struct *work)
+{
+	struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
+					      datawork);
+
+	bus->dpc_running = true;
+	wmb();
+	while (ACCESS_ONCE(bus->dpc_triggered)) {
+		bus->dpc_triggered = false;
+		brcmf_sdio_dpc(bus);
+		bus->idlecount = 0;
+	}
+	bus->dpc_running = false;
+	if (brcmf_sdiod_freezing(bus->sdiodev)) {
+		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
+		brcmf_sdiod_try_freeze(bus->sdiodev);
+		brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+	}
+}
+
+static void
+brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+			     struct brcmf_chip *ci, u32 drivestrength)
+{
+	const struct sdiod_drive_str *str_tab = NULL;
+	u32 str_mask;
+	u32 str_shift;
+	u32 base;
+	u32 i;
+	u32 drivestrength_sel = 0;
+	u32 cc_data_temp;
+	u32 addr;
+
+	if (!(ci->cc_caps & CC_CAP_PMU))
+		return;
+
+	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID, 12):
+		str_tab = sdiod_drvstr_tab1_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID, 17):
+		str_tab = sdiod_drvstr_tab6_1v8;
+		str_mask = 0x00001800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID, 17):
+		/* note: 43143 does not support tristate */
+		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+		if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+			str_tab = sdiod_drvstr_tab2_3v3;
+			str_mask = 0x00000007;
+			str_shift = 0;
+		} else
+			brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+				  ci->name, drivestrength);
+		break;
+	case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID, 13):
+		str_tab = sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	default:
+		brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+			  ci->name, ci->chiprev, ci->pmurev);
+		break;
+	}
+
+	if (str_tab != NULL) {
+		for (i = 0; str_tab[i].strength != 0; i++) {
+			if (drivestrength >= str_tab[i].strength) {
+				drivestrength_sel = str_tab[i].sel;
+				break;
+			}
+		}
+		base = brcmf_chip_get_chipcommon(ci)->base;
+		addr = CORE_CC_REG(base, chipcontrol_addr);
+		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+		cc_data_temp &= ~str_mask;
+		drivestrength_sel <<= str_shift;
+		cc_data_temp |= drivestrength_sel;
+		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+
+		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+			  str_tab[i].strength, drivestrength, cc_data_temp);
+	}
+}
+
+static int brcmf_sdio_buscoreprep(void *ctx)
+{
+	struct brcmf_sdio_dev *sdiodev = ctx;
+	int err = 0;
+	u8 clkval, clkset;
+
+	/* Try forcing SDIO core to do ALPAvail request only */
+	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	if (err) {
+		brcmf_err("error writing for HT off\n");
+		return err;
+	}
+
+	/* If register supported, wait for ALPAvail and then force ALP */
+	/* This may take up to 15 milliseconds */
+	clkval = brcmf_sdiod_regrb(sdiodev,
+				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+			  clkset, clkval);
+		return -EACCES;
+	}
+
+	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+			!SBSDIO_ALPAV(clkval)),
+			PMU_MAX_TRANSITION_DLY);
+	if (!SBSDIO_ALPAV(clkval)) {
+		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+			  clkval);
+		return -EBUSY;
+	}
+
+	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	udelay(65);
+
+	/* Also, disable the extra SDIO pull-ups */
+	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+	return 0;
+}
+
+static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
+					u32 rstvec)
+{
+	struct brcmf_sdio_dev *sdiodev = ctx;
+	struct brcmf_core *core;
+	u32 reg_addr;
+
+	/* clear all interrupts */
+	core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
+	reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
+	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+
+	if (rstvec)
+		/* Write reset vector to address 0 */
+		brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+				  sizeof(rstvec));
+}
+
+static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
+{
+	struct brcmf_sdio_dev *sdiodev = ctx;
+	u32 val, rev;
+
+	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
+		if (rev >= 2) {
+			val &= ~CID_ID_MASK;
+			val |= BRCM_CC_4339_CHIP_ID;
+		}
+	}
+	return val;
+}
+
+static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
+{
+	struct brcmf_sdio_dev *sdiodev = ctx;
+
+	brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+}
+
+static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
+	.prepare = brcmf_sdio_buscoreprep,
+	.activate = brcmf_sdio_buscore_activate,
+	.read32 = brcmf_sdio_buscore_read32,
+	.write32 = brcmf_sdio_buscore_write32,
+};
+
+static bool
+brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
+{
+	u8 clkctl = 0;
+	int err = 0;
+	int reg_addr;
+	u32 reg_val;
+	u32 drivestrength;
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+
+	pr_debug("F1 signature read @0x18000000=0x%4x\n",
+		 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+
+	/*
+	 * Force PLL off until brcmf_chip_attach()
+	 * programs PLL control regs
+	 */
+
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+			  BRCMF_INIT_CLKCTL1, &err);
+	if (!err)
+		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
+		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+			  err, BRCMF_INIT_CLKCTL1, clkctl);
+		goto fail;
+	}
+
+	bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+	if (IS_ERR(bus->ci)) {
+		brcmf_err("brcmf_chip_attach failed!\n");
+		bus->ci = NULL;
+		goto fail;
+	}
+
+	if (brcmf_sdio_kso_init(bus)) {
+		brcmf_err("error enabling KSO\n");
+		goto fail;
+	}
+
+	if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
+		drivestrength = bus->sdiodev->pdata->drive_strength;
+	else
+		drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
+	brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+
+	/* Set card control so an SDIO card reset does a WLAN backplane reset */
+	reg_val = brcmf_sdiod_regrb(bus->sdiodev,
+				    SDIO_CCCR_BRCM_CARDCTRL, &err);
+	if (err)
+		goto fail;
+
+	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
+
+	brcmf_sdiod_regwb(bus->sdiodev,
+			  SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+	if (err)
+		goto fail;
+
+	/* set PMUControl so a backplane reset does PMU state reload */
+	reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
+			       pmucontrol);
+	reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
+	if (err)
+		goto fail;
+
+	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
+
+	brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
+	if (err)
+		goto fail;
+
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+
+	/* allocate header buffer */
+	bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL);
+	if (!bus->hdrbuf)
+		return false;
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
+				    bus->head_align);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = true;
+	bus->poll = false;
+	if (bus->poll)
+		bus->pollrate = 1;
+
+	return true;
+
+fail:
+	sdio_release_host(bus->sdiodev->func[1]);
+	return false;
+}
+
+static int
+brcmf_sdio_watchdog_thread(void *data)
+{
+	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+	int wait;
+
+	allow_signal(SIGTERM);
+	/* Run until signal received */
+	brcmf_sdiod_freezer_count(bus->sdiodev);
+	while (1) {
+		if (kthread_should_stop())
+			break;
+		brcmf_sdiod_freezer_uncount(bus->sdiodev);
+		wait = wait_for_completion_interruptible(&bus->watchdog_wait);
+		brcmf_sdiod_freezer_count(bus->sdiodev);
+		brcmf_sdiod_try_freeze(bus->sdiodev);
+		if (!wait) {
+			brcmf_sdio_bus_watchdog(bus);
+			/* Count the tick for reference */
+			bus->sdcnt.tickcnt++;
+			reinit_completion(&bus->watchdog_wait);
+		} else
+			break;
+	}
+	return 0;
+}
+
+static void
+brcmf_sdio_watchdog(unsigned long data)
+{
+	struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+
+	if (bus->watchdog_tsk) {
+		complete(&bus->watchdog_wait);
+		/* Reschedule the watchdog */
+		if (bus->wd_timer_valid)
+			mod_timer(&bus->timer,
+				  jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
+	}
+}
+
+static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
+	.stop = brcmf_sdio_bus_stop,
+	.preinit = brcmf_sdio_bus_preinit,
+	.txdata = brcmf_sdio_bus_txdata,
+	.txctl = brcmf_sdio_bus_txctl,
+	.rxctl = brcmf_sdio_bus_rxctl,
+	.gettxq = brcmf_sdio_bus_gettxq,
+	.wowl_config = brcmf_sdio_wowl_config,
+	.get_ramsize = brcmf_sdio_bus_get_ramsize,
+	.get_memdump = brcmf_sdio_bus_get_memdump,
+};
+
+static void brcmf_sdio_firmware_callback(struct device *dev,
+					 const struct firmware *code,
+					 void *nvram, u32 nvram_len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	int err = 0;
+	u8 saveclk;
+
+	brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+
+	if (!bus_if->drvr)
+		return;
+
+	/* try to download image and nvram to the dongle */
+	bus->alp_only = true;
+	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
+	if (err)
+		goto fail;
+	bus->alp_only = false;
+
+	/* Start the watchdog timer */
+	bus->sdcnt.tickcnt = 0;
+	brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
+
+	sdio_claim_host(sdiodev->func[1]);
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+	if (bus->clkstate != CLK_AVAIL)
+		goto release;
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		brcmf_err("Failed to force clock for F2: err %d\n", err);
+		goto release;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
+	err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+
+
+	brcmf_dbg(INFO, "enable F2: err=%d\n", err);
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (!err) {
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		w_sdreg32(bus, bus->hostintmask,
+			  offsetof(struct sdpcmd_regs, hostintmask));
+
+		brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+	} else {
+		/* Disable F2 again */
+		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+		goto release;
+	}
+
+	if (brcmf_chip_sr_capable(bus->ci)) {
+		brcmf_sdio_sr_init(bus);
+	} else {
+		/* Restore previous clock setting */
+		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				  saveclk, &err);
+	}
+
+	if (err == 0) {
+		err = brcmf_sdiod_intr_register(sdiodev);
+		if (err != 0)
+			brcmf_err("intr register failed:%d\n", err);
+	}
+
+	/* If we didn't come up, turn off backplane clock */
+	if (err != 0)
+		brcmf_sdio_clkctl(bus, CLK_NONE, false);
+
+	sdio_release_host(sdiodev->func[1]);
+
+	err = brcmf_bus_start(dev);
+	if (err != 0) {
+		brcmf_err("dongle is not responding\n");
+		goto fail;
+	}
+	return;
+
+release:
+	sdio_release_host(sdiodev->func[1]);
+fail:
+	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
+	device_release_driver(dev);
+}
+
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+{
+	int ret;
+	struct brcmf_sdio *bus;
+	struct workqueue_struct *wq;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	/* Allocate private bus interface state */
+	bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
+	if (!bus)
+		goto fail;
+
+	bus->sdiodev = sdiodev;
+	sdiodev->bus = bus;
+	skb_queue_head_init(&bus->glom);
+	bus->txbound = BRCMF_TXBOUND;
+	bus->rxbound = BRCMF_RXBOUND;
+	bus->txminmax = BRCMF_TXMINMAX;
+	bus->tx_seq = SDPCM_SEQ_WRAP - 1;
+
+	/* platform specific configuration:
+	 *   alignments must be at least 4 bytes for ADMA
+	 */
+	bus->head_align = ALIGNMENT;
+	bus->sgentry_align = ALIGNMENT;
+	if (sdiodev->pdata) {
+		if (sdiodev->pdata->sd_head_align > ALIGNMENT)
+			bus->head_align = sdiodev->pdata->sd_head_align;
+		if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
+			bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
+	}
+
+	/* single-threaded workqueue */
+	wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
+				     dev_name(&sdiodev->func[1]->dev));
+	if (!wq) {
+		brcmf_err("insufficient memory to create txworkqueue\n");
+		goto fail;
+	}
+	brcmf_sdiod_freezer_count(sdiodev);
+	INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+	bus->brcmf_wq = wq;
+
+	/* attempt to attach to the dongle */
+	if (!(brcmf_sdio_probe_attach(bus))) {
+		brcmf_err("brcmf_sdio_probe_attach failed\n");
+		goto fail;
+	}
+
+	spin_lock_init(&bus->rxctl_lock);
+	spin_lock_init(&bus->txq_lock);
+	init_waitqueue_head(&bus->ctrl_wait);
+	init_waitqueue_head(&bus->dcmd_resp_wait);
+
+	/* Set up the watchdog timer */
+	init_timer(&bus->timer);
+	bus->timer.data = (unsigned long)bus;
+	bus->timer.function = brcmf_sdio_watchdog;
+
+	/* Initialize watchdog thread */
+	init_completion(&bus->watchdog_wait);
+	bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
+					bus, "brcmf_wdog/%s",
+					dev_name(&sdiodev->func[1]->dev));
+	if (IS_ERR(bus->watchdog_tsk)) {
+		pr_warn("brcmf_watchdog thread failed to start\n");
+		bus->watchdog_tsk = NULL;
+	}
+	/* Initialize DPC thread */
+	bus->dpc_triggered = false;
+	bus->dpc_running = false;
+
+	/* Assign bus interface call back */
+	bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
+	bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
+	bus->sdiodev->bus_if->chip = bus->ci->chip;
+	bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
+
+	/* default sdio bus header length for tx packet */
+	bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+
+	/* Attach to the common layer, reserve hdr space */
+	ret = brcmf_attach(bus->sdiodev->dev);
+	if (ret != 0) {
+		brcmf_err("brcmf_attach failed\n");
+		goto fail;
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+	bus->roundup = min(max_roundup, bus->blocksize);
+
+	/* Allocate buffers */
+	if (bus->sdiodev->bus_if->maxctl) {
+		bus->sdiodev->bus_if->maxctl += bus->roundup;
+		bus->rxblen =
+		    roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
+			    ALIGNMENT) + bus->head_align;
+		bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
+		if (!(bus->rxbuf)) {
+			brcmf_err("rxbuf allocation failed\n");
+			goto fail;
+		}
+	}
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+
+	bus->rxflow = false;
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = BRCMF_IDLE_INTERVAL;
+	bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+	/* SR state */
+	bus->sr_enabled = false;
+
+	brcmf_sdio_debugfs_create(bus);
+	brcmf_dbg(INFO, "completed!!\n");
+
+	ret = brcmf_sdio_get_fwnames(bus->ci, sdiodev);
+	if (ret)
+		goto fail;
+
+	ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
+				     sdiodev->fw_name, sdiodev->nvram_name,
+				     brcmf_sdio_firmware_callback);
+	if (ret != 0) {
+		brcmf_err("async firmware request failed: %d\n", ret);
+		goto fail;
+	}
+
+	return bus;
+
+fail:
+	brcmf_sdio_remove(bus);
+	return NULL;
+}
+
+/* Detach and free everything */
+void brcmf_sdio_remove(struct brcmf_sdio *bus)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	if (bus) {
+		/* De-register interrupt handler */
+		brcmf_sdiod_intr_unregister(bus->sdiodev);
+
+		brcmf_detach(bus->sdiodev->dev);
+
+		cancel_work_sync(&bus->datawork);
+		if (bus->brcmf_wq)
+			destroy_workqueue(bus->brcmf_wq);
+
+		if (bus->ci) {
+			if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
+				sdio_claim_host(bus->sdiodev->func[1]);
+				brcmf_sdio_wd_timer(bus, 0);
+				brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+				/* Leave the device in state where it is
+				 * 'passive'. This is done by resetting all
+				 * necessary cores.
+				 */
+				msleep(20);
+				brcmf_chip_set_passive(bus->ci);
+				brcmf_sdio_clkctl(bus, CLK_NONE, false);
+				sdio_release_host(bus->sdiodev->func[1]);
+			}
+			brcmf_chip_detach(bus->ci);
+		}
+
+		kfree(bus->rxbuf);
+		kfree(bus->hdrbuf);
+		kfree(bus);
+	}
+
+	brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
+{
+	/* Totally stop the timer */
+	if (!wdtick && bus->wd_timer_valid) {
+		del_timer_sync(&bus->timer);
+		bus->wd_timer_valid = false;
+		bus->save_ms = wdtick;
+		return;
+	}
+
+	/* don't start the wd until fw is loaded */
+	if (bus->sdiodev->state != BRCMF_SDIOD_DATA)
+		return;
+
+	if (wdtick) {
+		if (bus->save_ms != BRCMF_WD_POLL_MS) {
+			if (bus->wd_timer_valid)
+				/* Stop timer and restart at new value */
+				del_timer_sync(&bus->timer);
+
+			/* Create timer again when watchdog period is
+			   dynamically changed or in the first instance
+			 */
+			bus->timer.expires =
+				jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS);
+			add_timer(&bus->timer);
+
+		} else {
+			/* Re arm the timer, at last watchdog period */
+			mod_timer(&bus->timer,
+				jiffies + msecs_to_jiffies(BRCMF_WD_POLL_MS));
+		}
+
+		bus->wd_timer_valid = true;
+		bus->save_ms = wdtick;
+	}
+}
+
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
+{
+	int ret;
+
+	sdio_claim_host(bus->sdiodev->func[1]);
+	ret = brcmf_sdio_bus_sleep(bus, sleep, false);
+	sdio_release_host(bus->sdiodev->func[1]);
+
+	return ret;
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
new file mode 100644
index 0000000..7328478
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef	BRCMFMAC_SDIO_H
+#define	BRCMFMAC_SDIO_H
+
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include "firmware.h"
+
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+
+#define SDIOD_FBR_SIZE		0x100
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02
+#define SDIO_FUNC_ENABLE_2	0x04
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02
+#define SDIO_FUNC_READY_2	0x04
+
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2
+#define INTR_STATUS_FUNC2	0x4
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS	7
+
+/* mask of register map */
+#define REG_F0_REG_MASK		0x7FF
+#define REG_F1_MISC_MASK	0x1FFFF
+
+/* as of sdiod rev 0, supports 3 functions */
+#define SBSDIO_NUM_FUNCTION		3
+
+/* function 0 vendor specific CCCR registers */
+#define SDIO_CCCR_BRCM_CARDCAP			0xf0
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
+#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
+#define SDIO_CCCR_BRCM_CARDCTRL		0xf1
+#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET	0x02
+#define SDIO_CCCR_BRCM_SEPINT			0xf2
+
+#define  SDIO_SEPINT_MASK		0x01
+#define  SDIO_SEPINT_OE			0x02
+#define  SDIO_SEPINT_ACT_HI		0x04
+
+/* function 1 miscellaneous registers */
+
+/* sprom command and status */
+#define SBSDIO_SPROM_CS			0x10000
+/* sprom info register */
+#define SBSDIO_SPROM_INFO		0x10001
+/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_LOW		0x10002
+/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003
+/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004
+/* gpio select */
+#define SBSDIO_GPIO_SELECT		0x10005
+/* gpio output */
+#define SBSDIO_GPIO_OUT			0x10006
+/* gpio enable */
+#define SBSDIO_GPIO_EN			0x10007
+/* rev < 7, watermark for sdio device */
+#define SBSDIO_WATERMARK		0x10008
+/* control busy signal generation */
+#define SBSDIO_DEVICE_CTL		0x10009
+
+/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A
+/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B
+/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C
+/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D
+/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E
+/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_SDIOPULLUP		0x1000F
+/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019
+/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A
+/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B
+/* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C
+/* MesBusyCtl (rev 11) */
+#define SBSDIO_FUNC1_MESBUSYCTRL	0x1001D
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL		0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK		0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT	0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK		0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT		1
+#define SBSDIO_FUNC1_SLEEPCSR		0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK		0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT		0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN		1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK	0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT	1
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001F	/* f1 misc register end */
+
+/* function 1 OCP space */
+
+/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+/* with b15, maps to 32-bit SB access */
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000
+
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+
+#define SBSDIO_SBADDRLOW_MASK		0x80	/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff	/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU	/* Valid bits in SBADDRHIGH */
+/* Address bits from SBADDR regs */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#define BRCMF_SDALIGN	(1 << 6)
+
+/* watchdog polling interval in ms */
+#define BRCMF_WD_POLL_MS	10
+
+/**
+ * enum brcmf_sdiod_state - the state of the bus.
+ *
+ * @BRCMF_SDIOD_DOWN: Device can be accessed, no DPC.
+ * @BRCMF_SDIOD_DATA: Ready for data transfers, DPC enabled.
+ * @BRCMF_SDIOD_NOMEDIUM: No medium access to dongle possible.
+ */
+enum brcmf_sdiod_state {
+	BRCMF_SDIOD_DOWN,
+	BRCMF_SDIOD_DATA,
+	BRCMF_SDIOD_NOMEDIUM
+};
+
+struct brcmf_sdreg {
+	int func;
+	int offset;
+	int value;
+};
+
+struct brcmf_sdio;
+struct brcmf_sdiod_freezer;
+
+struct brcmf_sdio_dev {
+	struct sdio_func *func[SDIO_MAX_FUNCS];
+	u8 num_funcs;			/* Supported funcs on client */
+	u32 sbwad;			/* Save backplane window address */
+	struct brcmf_sdio *bus;
+	struct device *dev;
+	struct brcmf_bus *bus_if;
+	struct brcmfmac_sdio_platform_data *pdata;
+	bool oob_irq_requested;
+	bool irq_en;			/* irq enable flags */
+	spinlock_t irq_en_lock;
+	bool irq_wake;			/* irq wake enable flags */
+	bool sg_support;
+	uint max_request_size;
+	ushort max_segment_count;
+	uint max_segment_size;
+	uint txglomsz;
+	struct sg_table sgtable;
+	char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+	char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+	bool wowl_enabled;
+	enum brcmf_sdiod_state state;
+	struct brcmf_sdiod_freezer *freezer;
+};
+
+/* sdio core registers */
+struct sdpcmd_regs {
+	u32 corecontrol;		/* 0x00, rev8 */
+	u32 corestatus;			/* rev8 */
+	u32 PAD[1];
+	u32 biststatus;			/* rev8 */
+
+	/* PCMCIA access */
+	u16 pcmciamesportaladdr;	/* 0x010, rev8 */
+	u16 PAD[1];
+	u16 pcmciamesportalmask;	/* rev8 */
+	u16 PAD[1];
+	u16 pcmciawrframebc;		/* rev8 */
+	u16 PAD[1];
+	u16 pcmciaunderflowtimer;	/* rev8 */
+	u16 PAD[1];
+
+	/* interrupt */
+	u32 intstatus;			/* 0x020, rev8 */
+	u32 hostintmask;		/* rev8 */
+	u32 intmask;			/* rev8 */
+	u32 sbintstatus;		/* rev8 */
+	u32 sbintmask;			/* rev8 */
+	u32 funcintmask;		/* rev4 */
+	u32 PAD[2];
+	u32 tosbmailbox;		/* 0x040, rev8 */
+	u32 tohostmailbox;		/* rev8 */
+	u32 tosbmailboxdata;		/* rev8 */
+	u32 tohostmailboxdata;		/* rev8 */
+
+	/* synchronized access to registers in SDIO clock domain */
+	u32 sdioaccess;			/* 0x050, rev8 */
+	u32 PAD[3];
+
+	/* PCMCIA frame control */
+	u8 pcmciaframectrl;		/* 0x060, rev8 */
+	u8 PAD[3];
+	u8 pcmciawatermark;		/* rev8 */
+	u8 PAD[155];
+
+	/* interrupt batching control */
+	u32 intrcvlazy;			/* 0x100, rev8 */
+	u32 PAD[3];
+
+	/* counters */
+	u32 cmd52rd;			/* 0x110, rev8 */
+	u32 cmd52wr;			/* rev8 */
+	u32 cmd53rd;			/* rev8 */
+	u32 cmd53wr;			/* rev8 */
+	u32 abort;			/* rev8 */
+	u32 datacrcerror;		/* rev8 */
+	u32 rdoutofsync;		/* rev8 */
+	u32 wroutofsync;		/* rev8 */
+	u32 writebusy;			/* rev8 */
+	u32 readwait;			/* rev8 */
+	u32 readterm;			/* rev8 */
+	u32 writeterm;			/* rev8 */
+	u32 PAD[40];
+	u32 clockctlstatus;		/* rev8 */
+	u32 PAD[7];
+
+	u32 PAD[128];			/* DMA engines */
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* 0x600-6ff, rev6 */
+	u16 PAD[55];
+
+	/* PCMCIA backplane access */
+	u16 backplanecsr;		/* 0x76E, rev6 */
+	u16 backplaneaddr0;		/* rev6 */
+	u16 backplaneaddr1;		/* rev6 */
+	u16 backplaneaddr2;		/* rev6 */
+	u16 backplaneaddr3;		/* rev6 */
+	u16 backplanedata0;		/* rev6 */
+	u16 backplanedata1;		/* rev6 */
+	u16 backplanedata2;		/* rev6 */
+	u16 backplanedata3;		/* rev6 */
+	u16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	u16 spromstatus;		/* 0x7BE, rev2 */
+	u32 PAD[464];
+
+	u16 PAD[0x80];
+};
+
+/* Register/deregister interrupt handler. */
+int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+
+/* sdio device register access interface */
+u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+		       int *ret);
+void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+		       int *ret);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
+			 struct sk_buff_head *pktq);
+int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+
+int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt);
+int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes);
+int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
+			   struct sk_buff_head *pktq, uint totlen);
+
+/* Flags bits */
+
+/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_4BYTE	0x1
+/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_FIXED	0x2
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+		      u8 *data, uint size);
+
+/* Issue an abort to the specified function */
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
+			      enum brcmf_sdiod_state state);
+#ifdef CONFIG_PM_SLEEP
+bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
+#else
+static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
+{
+	return false;
+}
+static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
+{
+}
+static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
+{
+}
+#endif /* CONFIG_PM_SLEEP */
+
+struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdio_remove(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus);
+
+void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
+int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep);
+void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus);
+
+#endif /* BRCMFMAC_SDIO_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
new file mode 100644
index 0000000..a10f35c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h> /* bug in tracepoint.h, it should include this */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "tracepoint.h"
+
+void __brcmf_err(const char *func, const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+	pr_err("%s: %pV", func, &vaf);
+	trace_brcmf_err(func, &vaf);
+	va_end(args);
+}
+
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
new file mode 100644
index 0000000..4d7d51f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#if !defined(BRCMF_TRACEPOINT_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define BRCMF_TRACEPOINT_H_
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#ifndef CONFIG_BRCM_TRACING
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+
+#endif /* CONFIG_BRCM_TRACING */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM	brcmfmac
+
+#define MAX_MSG_LEN		100
+
+TRACE_EVENT(brcmf_err,
+	TP_PROTO(const char *func, struct va_format *vaf),
+	TP_ARGS(func, vaf),
+	TP_STRUCT__entry(
+		__string(func, func)
+		__dynamic_array(char, msg, MAX_MSG_LEN)
+	),
+	TP_fast_assign(
+		__assign_str(func, func);
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       MAX_MSG_LEN, vaf->fmt,
+				       *vaf->va) >= MAX_MSG_LEN);
+	),
+	TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+
+TRACE_EVENT(brcmf_dbg,
+	TP_PROTO(u32 level, const char *func, struct va_format *vaf),
+	TP_ARGS(level, func, vaf),
+	TP_STRUCT__entry(
+		__field(u32, level)
+		__string(func, func)
+		__dynamic_array(char, msg, MAX_MSG_LEN)
+	),
+	TP_fast_assign(
+		__entry->level = level;
+		__assign_str(func, func);
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       MAX_MSG_LEN, vaf->fmt,
+				       *vaf->va) >= MAX_MSG_LEN);
+	),
+	TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+
+TRACE_EVENT(brcmf_hexdump,
+	TP_PROTO(void *data, size_t len),
+	TP_ARGS(data, len),
+	TP_STRUCT__entry(
+		__field(unsigned long, len)
+		__field(unsigned long, addr)
+		__dynamic_array(u8, hdata, len)
+	),
+	TP_fast_assign(
+		__entry->len = len;
+		__entry->addr = (unsigned long)data;
+		memcpy(__get_dynamic_array(hdata), data, len);
+	),
+	TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
+);
+
+TRACE_EVENT(brcmf_bcdchdr,
+	TP_PROTO(void *data),
+	TP_ARGS(data),
+	TP_STRUCT__entry(
+		__field(u8, flags)
+		__field(u8, prio)
+		__field(u8, flags2)
+		__field(u32, siglen)
+		__dynamic_array(u8, signal, *((u8 *)data + 3) * 4)
+	),
+	TP_fast_assign(
+		__entry->flags = *(u8 *)data;
+		__entry->prio = *((u8 *)data + 1);
+		__entry->flags2 = *((u8 *)data + 2);
+		__entry->siglen = *((u8 *)data + 3) * 4;
+		memcpy(__get_dynamic_array(signal),
+		       (u8 *)data + 4, __entry->siglen);
+	),
+	TP_printk("bcdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
+);
+
+#ifndef SDPCM_RX
+#define SDPCM_RX	0
+#endif
+#ifndef SDPCM_TX
+#define SDPCM_TX	1
+#endif
+#ifndef SDPCM_GLOM
+#define SDPCM_GLOM	2
+#endif
+
+TRACE_EVENT(brcmf_sdpcm_hdr,
+	TP_PROTO(u8 dir, void *data),
+	TP_ARGS(dir, data),
+	TP_STRUCT__entry(
+		__field(u8, dir)
+		__field(u16, len)
+		__dynamic_array(u8, hdr, dir == SDPCM_GLOM ? 20 : 12)
+	),
+	TP_fast_assign(
+		memcpy(__get_dynamic_array(hdr), data, dir == SDPCM_GLOM ? 20 : 12);
+		__entry->len = *(u8 *)data | (*((u8 *)data + 1) << 8);
+		__entry->dir = dir;
+	),
+	TP_printk("sdpcm: %s len %u, seq %d",
+		  __entry->dir == SDPCM_RX ? "RX" : "TX",
+		  __entry->len, ((u8 *)__get_dynamic_array(hdr))[4])
+);
+
+#ifdef CONFIG_BRCM_TRACING
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE tracepoint
+
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_BRCM_TRACING */
+
+#endif /* BRCMF_TRACEPOINT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
new file mode 100644
index 0000000..689e64d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -0,0 +1,1535 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <linux/vmalloc.h>
+
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_wifi.h>
+#include "bus.h"
+#include "debug.h"
+#include "firmware.h"
+#include "usb.h"
+
+
+#define IOCTL_RESP_TIMEOUT		2000
+
+#define BRCMF_USB_RESET_GETVER_SPINWAIT	100	/* in unit of ms */
+#define BRCMF_USB_RESET_GETVER_LOOP_CNT	10
+
+#define BRCMF_POSTBOOT_ID		0xA123  /* ID to detect if dongle
+						   has boot up */
+#define BRCMF_USB_NRXQ			50
+#define BRCMF_USB_NTXQ			50
+
+#define BRCMF_USB_CBCTL_WRITE		0
+#define BRCMF_USB_CBCTL_READ		1
+#define BRCMF_USB_MAX_PKT_SIZE		1600
+
+#define BRCMF_USB_43143_FW_NAME		"brcm/brcmfmac43143.bin"
+#define BRCMF_USB_43236_FW_NAME		"brcm/brcmfmac43236b.bin"
+#define BRCMF_USB_43242_FW_NAME		"brcm/brcmfmac43242a.bin"
+#define BRCMF_USB_43569_FW_NAME		"brcm/brcmfmac43569.bin"
+
+#define TRX_MAGIC		0x30524448	/* "HDR0" */
+#define TRX_MAX_OFFSET		3		/* Max number of file offsets */
+#define TRX_UNCOMP_IMAGE	0x20		/* Trx holds uncompressed img */
+#define TRX_RDL_CHUNK		1500		/* size of each dl transfer */
+#define TRX_OFFSETS_DLFWLEN_IDX	0
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE	0	/* returns the rdl_state_t struct */
+#define DL_CHECK_CRC	1	/* currently unused */
+#define DL_GO		2	/* execute downloaded image */
+#define DL_START	3	/* initialize dl state */
+#define DL_REBOOT	4	/* reboot the device in 2 seconds */
+#define DL_GETVER	5	/* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED	6	/* execute the downloaded code and set reset
+				 * event to occur in 2 seconds.  It is the
+				 * responsibility of the downloaded code to
+				 * clear this event
+				 */
+#define DL_EXEC		7	/* jump to a supplied address */
+#define DL_RESETCFG	8	/* To support single enum on dongle
+				 * - Not used by bootloader
+				 */
+#define DL_DEFER_RESP_OK 9	/* Potentially defer the response to setup
+				 * if resp unavailable
+				 */
+
+/* states */
+#define DL_WAITING	0	/* waiting to rx first pkt */
+#define DL_READY	1	/* hdr was good, waiting for more of the
+				 * compressed image
+				 */
+#define DL_BAD_HDR	2	/* hdr was corrupted */
+#define DL_BAD_CRC	3	/* compressed image was corrupted */
+#define DL_RUNNABLE	4	/* download was successful,waiting for go cmd */
+#define DL_START_FAIL	5	/* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG	6	/* host specified nvram data exceeds DL_NVRAM
+				 * value
+				 */
+#define DL_IMAGE_TOOBIG	7	/* firmware image too big */
+
+
+struct trx_header_le {
+	__le32 magic;		/* "HDR0" */
+	__le32 len;		/* Length of file including header */
+	__le32 crc32;		/* CRC from flag_version to end of file */
+	__le32 flag_version;	/* 0:15 flags, 16:31 version */
+	__le32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of
+					 * header
+					 */
+};
+
+struct rdl_state_le {
+	__le32 state;
+	__le32 bytes;
+};
+
+struct bootrom_id_le {
+	__le32 chip;		/* Chip id */
+	__le32 chiprev;		/* Chip rev */
+	__le32 ramsize;		/* Size of  RAM */
+	__le32 remapbase;	/* Current remap base address */
+	__le32 boardtype;	/* Type of board */
+	__le32 boardrev;	/* Board revision */
+};
+
+struct brcmf_usb_image {
+	struct list_head list;
+	s8 *fwname;
+	u8 *image;
+	int image_len;
+};
+
+struct brcmf_usbdev_info {
+	struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
+	spinlock_t qlock;
+	struct list_head rx_freeq;
+	struct list_head rx_postq;
+	struct list_head tx_freeq;
+	struct list_head tx_postq;
+	uint rx_pipe, tx_pipe;
+
+	int rx_low_watermark;
+	int tx_low_watermark;
+	int tx_high_watermark;
+	int tx_freecount;
+	bool tx_flowblock;
+	spinlock_t tx_flowblock_lock;
+
+	struct brcmf_usbreq *tx_reqs;
+	struct brcmf_usbreq *rx_reqs;
+
+	const u8 *image;	/* buffer for combine fw and nvram */
+	int image_len;
+
+	struct usb_device *usbdev;
+	struct device *dev;
+	struct mutex dev_init_lock;
+
+	int ctl_in_pipe, ctl_out_pipe;
+	struct urb *ctl_urb; /* URB for control endpoint */
+	struct usb_ctrlrequest ctl_write;
+	struct usb_ctrlrequest ctl_read;
+	u32 ctl_urb_actual_length;
+	int ctl_urb_status;
+	int ctl_completed;
+	wait_queue_head_t ioctl_resp_wait;
+	ulong ctl_op;
+	u8 ifnum;
+
+	struct urb *bulk_urb; /* used for FW download */
+
+	bool wowl_enabled;
+};
+
+static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
+				struct brcmf_usbreq  *req);
+
+static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	return bus_if->bus_priv.usb;
+}
+
+static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
+{
+	return brcmf_usb_get_buspub(dev)->devinfo;
+}
+
+static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
+{
+	return wait_event_timeout(devinfo->ioctl_resp_wait,
+				  devinfo->ctl_completed,
+				  msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
+}
+
+static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
+{
+	if (waitqueue_active(&devinfo->ioctl_resp_wait))
+		wake_up(&devinfo->ioctl_resp_wait);
+}
+
+static void
+brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
+{
+	brcmf_dbg(USB, "Enter, status=%d\n", status);
+
+	if (unlikely(devinfo == NULL))
+		return;
+
+	if (type == BRCMF_USB_CBCTL_READ) {
+		if (status == 0)
+			devinfo->bus_pub.stats.rx_ctlpkts++;
+		else
+			devinfo->bus_pub.stats.rx_ctlerrs++;
+	} else if (type == BRCMF_USB_CBCTL_WRITE) {
+		if (status == 0)
+			devinfo->bus_pub.stats.tx_ctlpkts++;
+		else
+			devinfo->bus_pub.stats.tx_ctlerrs++;
+	}
+
+	devinfo->ctl_urb_status = status;
+	devinfo->ctl_completed = true;
+	brcmf_usb_ioctl_resp_wake(devinfo);
+}
+
+static void
+brcmf_usb_ctlread_complete(struct urb *urb)
+{
+	struct brcmf_usbdev_info *devinfo =
+		(struct brcmf_usbdev_info *)urb->context;
+
+	brcmf_dbg(USB, "Enter\n");
+	devinfo->ctl_urb_actual_length = urb->actual_length;
+	brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
+		urb->status);
+}
+
+static void
+brcmf_usb_ctlwrite_complete(struct urb *urb)
+{
+	struct brcmf_usbdev_info *devinfo =
+		(struct brcmf_usbdev_info *)urb->context;
+
+	brcmf_dbg(USB, "Enter\n");
+	brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
+		urb->status);
+}
+
+static int
+brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
+{
+	int ret;
+	u16 size;
+
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo == NULL || buf == NULL ||
+	    len == 0 || devinfo->ctl_urb == NULL)
+		return -EINVAL;
+
+	size = len;
+	devinfo->ctl_write.wLength = cpu_to_le16p(&size);
+	devinfo->ctl_urb->transfer_buffer_length = size;
+	devinfo->ctl_urb_status = 0;
+	devinfo->ctl_urb_actual_length = 0;
+
+	usb_fill_control_urb(devinfo->ctl_urb,
+		devinfo->usbdev,
+		devinfo->ctl_out_pipe,
+		(unsigned char *) &devinfo->ctl_write,
+		buf, size,
+		(usb_complete_t)brcmf_usb_ctlwrite_complete,
+		devinfo);
+
+	ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
+	if (ret < 0)
+		brcmf_err("usb_submit_urb failed %d\n", ret);
+
+	return ret;
+}
+
+static int
+brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
+{
+	int ret;
+	u16 size;
+
+	brcmf_dbg(USB, "Enter\n");
+	if ((devinfo == NULL) || (buf == NULL) || (len == 0)
+		|| (devinfo->ctl_urb == NULL))
+		return -EINVAL;
+
+	size = len;
+	devinfo->ctl_read.wLength = cpu_to_le16p(&size);
+	devinfo->ctl_urb->transfer_buffer_length = size;
+
+	devinfo->ctl_read.bRequestType = USB_DIR_IN
+		| USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+	devinfo->ctl_read.bRequest = 1;
+
+	usb_fill_control_urb(devinfo->ctl_urb,
+		devinfo->usbdev,
+		devinfo->ctl_in_pipe,
+		(unsigned char *) &devinfo->ctl_read,
+		buf, size,
+		(usb_complete_t)brcmf_usb_ctlread_complete,
+		devinfo);
+
+	ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
+	if (ret < 0)
+		brcmf_err("usb_submit_urb failed %d\n", ret);
+
+	return ret;
+}
+
+static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
+{
+	int err = 0;
+	int timeout = 0;
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
+		return -EIO;
+
+	if (test_and_set_bit(0, &devinfo->ctl_op))
+		return -EIO;
+
+	devinfo->ctl_completed = false;
+	err = brcmf_usb_send_ctl(devinfo, buf, len);
+	if (err) {
+		brcmf_err("fail %d bytes: %d\n", err, len);
+		clear_bit(0, &devinfo->ctl_op);
+		return err;
+	}
+	timeout = brcmf_usb_ioctl_resp_wait(devinfo);
+	clear_bit(0, &devinfo->ctl_op);
+	if (!timeout) {
+		brcmf_err("Txctl wait timed out\n");
+		err = -EIO;
+	}
+	return err;
+}
+
+static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
+{
+	int err = 0;
+	int timeout = 0;
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
+		return -EIO;
+
+	if (test_and_set_bit(0, &devinfo->ctl_op))
+		return -EIO;
+
+	devinfo->ctl_completed = false;
+	err = brcmf_usb_recv_ctl(devinfo, buf, len);
+	if (err) {
+		brcmf_err("fail %d bytes: %d\n", err, len);
+		clear_bit(0, &devinfo->ctl_op);
+		return err;
+	}
+	timeout = brcmf_usb_ioctl_resp_wait(devinfo);
+	err = devinfo->ctl_urb_status;
+	clear_bit(0, &devinfo->ctl_op);
+	if (!timeout) {
+		brcmf_err("rxctl wait timed out\n");
+		err = -EIO;
+	}
+	if (!err)
+		return devinfo->ctl_urb_actual_length;
+	else
+		return err;
+}
+
+static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
+					  struct list_head *q, int *counter)
+{
+	unsigned long flags;
+	struct brcmf_usbreq  *req;
+	spin_lock_irqsave(&devinfo->qlock, flags);
+	if (list_empty(q)) {
+		spin_unlock_irqrestore(&devinfo->qlock, flags);
+		return NULL;
+	}
+	req = list_entry(q->next, struct brcmf_usbreq, list);
+	list_del_init(q->next);
+	if (counter)
+		(*counter)--;
+	spin_unlock_irqrestore(&devinfo->qlock, flags);
+	return req;
+
+}
+
+static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
+			  struct list_head *q, struct brcmf_usbreq *req,
+			  int *counter)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&devinfo->qlock, flags);
+	list_add_tail(&req->list, q);
+	if (counter)
+		(*counter)++;
+	spin_unlock_irqrestore(&devinfo->qlock, flags);
+}
+
+static struct brcmf_usbreq *
+brcmf_usbdev_qinit(struct list_head *q, int qsize)
+{
+	int i;
+	struct brcmf_usbreq *req, *reqs;
+
+	reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
+	if (reqs == NULL)
+		return NULL;
+
+	req = reqs;
+
+	for (i = 0; i < qsize; i++) {
+		req->urb = usb_alloc_urb(0, GFP_ATOMIC);
+		if (!req->urb)
+			goto fail;
+
+		INIT_LIST_HEAD(&req->list);
+		list_add_tail(&req->list, q);
+		req++;
+	}
+	return reqs;
+fail:
+	brcmf_err("fail!\n");
+	while (!list_empty(q)) {
+		req = list_entry(q->next, struct brcmf_usbreq, list);
+		if (req)
+			usb_free_urb(req->urb);
+		list_del(q->next);
+	}
+	return NULL;
+
+}
+
+static void brcmf_usb_free_q(struct list_head *q, bool pending)
+{
+	struct brcmf_usbreq *req, *next;
+	int i = 0;
+	list_for_each_entry_safe(req, next, q, list) {
+		if (!req->urb) {
+			brcmf_err("bad req\n");
+			break;
+		}
+		i++;
+		if (pending) {
+			usb_kill_urb(req->urb);
+		} else {
+			usb_free_urb(req->urb);
+			list_del_init(&req->list);
+		}
+	}
+}
+
+static void brcmf_usb_del_fromq(struct brcmf_usbdev_info *devinfo,
+				struct brcmf_usbreq *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&devinfo->qlock, flags);
+	list_del_init(&req->list);
+	spin_unlock_irqrestore(&devinfo->qlock, flags);
+}
+
+
+static void brcmf_usb_tx_complete(struct urb *urb)
+{
+	struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
+	struct brcmf_usbdev_info *devinfo = req->devinfo;
+	unsigned long flags;
+
+	brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
+		  req->skb);
+	brcmf_usb_del_fromq(devinfo, req);
+
+	brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
+	req->skb = NULL;
+	brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+	spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
+	if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
+		devinfo->tx_flowblock) {
+		brcmf_txflowblock(devinfo->dev, false);
+		devinfo->tx_flowblock = false;
+	}
+	spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
+}
+
+static void brcmf_usb_rx_complete(struct urb *urb)
+{
+	struct brcmf_usbreq  *req = (struct brcmf_usbreq *)urb->context;
+	struct brcmf_usbdev_info *devinfo = req->devinfo;
+	struct sk_buff *skb;
+
+	brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
+	brcmf_usb_del_fromq(devinfo, req);
+	skb = req->skb;
+	req->skb = NULL;
+
+	/* zero lenght packets indicate usb "failure". Do not refill */
+	if (urb->status != 0 || !urb->actual_length) {
+		brcmu_pkt_buf_free_skb(skb);
+		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
+		return;
+	}
+
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+		skb_put(skb, urb->actual_length);
+		brcmf_rx_frame(devinfo->dev, skb);
+		brcmf_usb_rx_refill(devinfo, req);
+	} else {
+		brcmu_pkt_buf_free_skb(skb);
+		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
+	}
+	return;
+
+}
+
+static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
+				struct brcmf_usbreq  *req)
+{
+	struct sk_buff *skb;
+	int ret;
+
+	if (!req || !devinfo)
+		return;
+
+	skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
+	if (!skb) {
+		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
+		return;
+	}
+	req->skb = skb;
+
+	usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe,
+			  skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
+			  req);
+	req->devinfo = devinfo;
+	brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
+
+	ret = usb_submit_urb(req->urb, GFP_ATOMIC);
+	if (ret) {
+		brcmf_usb_del_fromq(devinfo, req);
+		brcmu_pkt_buf_free_skb(req->skb);
+		req->skb = NULL;
+		brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
+	}
+	return;
+}
+
+static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
+{
+	struct brcmf_usbreq *req;
+
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+		brcmf_err("bus is not up=%d\n", devinfo->bus_pub.state);
+		return;
+	}
+	while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
+		brcmf_usb_rx_refill(devinfo, req);
+}
+
+static void
+brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
+{
+	struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
+	int old_state;
+
+	brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
+		  devinfo->bus_pub.state, state);
+
+	if (devinfo->bus_pub.state == state)
+		return;
+
+	old_state = devinfo->bus_pub.state;
+	devinfo->bus_pub.state = state;
+
+	/* update state of upper layer */
+	if (state == BRCMFMAC_USB_STATE_DOWN) {
+		brcmf_dbg(USB, "DBUS is down\n");
+		brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
+	} else if (state == BRCMFMAC_USB_STATE_UP) {
+		brcmf_dbg(USB, "DBUS is up\n");
+		brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_UP);
+	} else {
+		brcmf_dbg(USB, "DBUS current state=%d\n", state);
+	}
+}
+
+static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
+{
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+	struct brcmf_usbreq  *req;
+	int ret;
+	unsigned long flags;
+
+	brcmf_dbg(USB, "Enter, skb=%p\n", skb);
+	if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+		ret = -EIO;
+		goto fail;
+	}
+
+	req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
+					&devinfo->tx_freecount);
+	if (!req) {
+		brcmf_err("no req to send\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->skb = skb;
+	req->devinfo = devinfo;
+	usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
+			  skb->data, skb->len, brcmf_usb_tx_complete, req);
+	req->urb->transfer_flags |= URB_ZERO_PACKET;
+	brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
+	ret = usb_submit_urb(req->urb, GFP_ATOMIC);
+	if (ret) {
+		brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
+		brcmf_usb_del_fromq(devinfo, req);
+		req->skb = NULL;
+		brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
+			      &devinfo->tx_freecount);
+		goto fail;
+	}
+
+	spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
+	if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
+	    !devinfo->tx_flowblock) {
+		brcmf_txflowblock(dev, true);
+		devinfo->tx_flowblock = true;
+	}
+	spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
+	return 0;
+
+fail:
+	return ret;
+}
+
+
+static int brcmf_usb_up(struct device *dev)
+{
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
+		return 0;
+
+	/* Success, indicate devinfo is fully up */
+	brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
+
+	if (devinfo->ctl_urb) {
+		devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
+		devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
+
+		/* CTL Write */
+		devinfo->ctl_write.bRequestType =
+			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+		devinfo->ctl_write.bRequest = 0;
+		devinfo->ctl_write.wValue = cpu_to_le16(0);
+		devinfo->ctl_write.wIndex = cpu_to_le16(devinfo->ifnum);
+
+		/* CTL Read */
+		devinfo->ctl_read.bRequestType =
+			USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+		devinfo->ctl_read.bRequest = 1;
+		devinfo->ctl_read.wValue = cpu_to_le16(0);
+		devinfo->ctl_read.wIndex = cpu_to_le16(devinfo->ifnum);
+	}
+	brcmf_usb_rx_fill_all(devinfo);
+	return 0;
+}
+
+static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
+{
+	if (devinfo->ctl_urb)
+		usb_kill_urb(devinfo->ctl_urb);
+	if (devinfo->bulk_urb)
+		usb_kill_urb(devinfo->bulk_urb);
+	brcmf_usb_free_q(&devinfo->tx_postq, true);
+	brcmf_usb_free_q(&devinfo->rx_postq, true);
+}
+
+static void brcmf_usb_down(struct device *dev)
+{
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo == NULL)
+		return;
+
+	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN)
+		return;
+
+	brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
+
+	brcmf_cancel_all_urbs(devinfo);
+}
+
+static void
+brcmf_usb_sync_complete(struct urb *urb)
+{
+	struct brcmf_usbdev_info *devinfo =
+			(struct brcmf_usbdev_info *)urb->context;
+
+	devinfo->ctl_completed = true;
+	brcmf_usb_ioctl_resp_wake(devinfo);
+}
+
+static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
+			    void *buffer, int buflen)
+{
+	int ret;
+	char *tmpbuf;
+	u16 size;
+
+	if ((!devinfo) || (devinfo->ctl_urb == NULL))
+		return -EINVAL;
+
+	tmpbuf = kmalloc(buflen, GFP_ATOMIC);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	size = buflen;
+	devinfo->ctl_urb->transfer_buffer_length = size;
+
+	devinfo->ctl_read.wLength = cpu_to_le16p(&size);
+	devinfo->ctl_read.bRequestType = USB_DIR_IN | USB_TYPE_VENDOR |
+		USB_RECIP_INTERFACE;
+	devinfo->ctl_read.bRequest = cmd;
+
+	usb_fill_control_urb(devinfo->ctl_urb,
+		devinfo->usbdev,
+		usb_rcvctrlpipe(devinfo->usbdev, 0),
+		(unsigned char *) &devinfo->ctl_read,
+		(void *) tmpbuf, size,
+		(usb_complete_t)brcmf_usb_sync_complete, devinfo);
+
+	devinfo->ctl_completed = false;
+	ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
+	if (ret < 0) {
+		brcmf_err("usb_submit_urb failed %d\n", ret);
+		goto finalize;
+	}
+
+	if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
+		usb_kill_urb(devinfo->ctl_urb);
+		ret = -ETIMEDOUT;
+	} else {
+		memcpy(buffer, tmpbuf, buflen);
+	}
+
+finalize:
+	kfree(tmpbuf);
+	return ret;
+}
+
+static bool
+brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo)
+{
+	struct bootrom_id_le id;
+	u32 chipid, chiprev;
+
+	brcmf_dbg(USB, "Enter\n");
+
+	if (devinfo == NULL)
+		return false;
+
+	/* Check if firmware downloaded already by querying runtime ID */
+	id.chip = cpu_to_le32(0xDEAD);
+	brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
+
+	chipid = le32_to_cpu(id.chip);
+	chiprev = le32_to_cpu(id.chiprev);
+
+	if ((chipid & 0x4300) == 0x4300)
+		brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev);
+	else
+		brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev);
+	if (chipid == BRCMF_POSTBOOT_ID) {
+		brcmf_dbg(USB, "firmware already downloaded\n");
+		brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
+		return false;
+	} else {
+		devinfo->bus_pub.devid = chipid;
+		devinfo->bus_pub.chiprev = chiprev;
+	}
+	return true;
+}
+
+static int
+brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
+{
+	struct bootrom_id_le id;
+	u32 loop_cnt;
+	int err;
+
+	brcmf_dbg(USB, "Enter\n");
+
+	loop_cnt = 0;
+	do {
+		mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
+		loop_cnt++;
+		id.chip = cpu_to_le32(0xDEAD);       /* Get the ID */
+		err = brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
+		if ((err) && (err != -ETIMEDOUT))
+			return err;
+		if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
+			break;
+	} while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
+
+	if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
+		brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n",
+			  le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
+
+		brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
+		return 0;
+	} else {
+		brcmf_err("Cannot talk to Dongle. Firmware is not UP, %d ms\n",
+			  BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt);
+		return -EINVAL;
+	}
+}
+
+
+static int
+brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
+{
+	int ret;
+
+	if ((devinfo == NULL) || (devinfo->bulk_urb == NULL))
+		return -EINVAL;
+
+	/* Prepare the URB */
+	usb_fill_bulk_urb(devinfo->bulk_urb, devinfo->usbdev,
+			  devinfo->tx_pipe, buffer, len,
+			  (usb_complete_t)brcmf_usb_sync_complete, devinfo);
+
+	devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
+
+	devinfo->ctl_completed = false;
+	ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
+	if (ret) {
+		brcmf_err("usb_submit_urb failed %d\n", ret);
+		return ret;
+	}
+	ret = brcmf_usb_ioctl_resp_wait(devinfo);
+	return (ret == 0);
+}
+
+static int
+brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
+{
+	unsigned int sendlen, sent, dllen;
+	char *bulkchunk = NULL, *dlpos;
+	struct rdl_state_le state;
+	u32 rdlstate, rdlbytes;
+	int err = 0;
+
+	brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
+
+	bulkchunk = kmalloc(TRX_RDL_CHUNK, GFP_ATOMIC);
+	if (bulkchunk == NULL) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	/* 1) Prepare USB boot loader for runtime image */
+	brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
+
+	rdlstate = le32_to_cpu(state.state);
+	rdlbytes = le32_to_cpu(state.bytes);
+
+	/* 2) Check we are in the Waiting state */
+	if (rdlstate != DL_WAITING) {
+		brcmf_err("Failed to DL_START\n");
+		err = -EINVAL;
+		goto fail;
+	}
+	sent = 0;
+	dlpos = fw;
+	dllen = fwlen;
+
+	/* Get chip id and rev */
+	while (rdlbytes != dllen) {
+		/* Wait until the usb device reports it received all
+		 * the bytes we sent */
+		if ((rdlbytes == sent) && (rdlbytes != dllen)) {
+			if ((dllen-sent) < TRX_RDL_CHUNK)
+				sendlen = dllen-sent;
+			else
+				sendlen = TRX_RDL_CHUNK;
+
+			/* simply avoid having to send a ZLP by ensuring we
+			 * never have an even
+			 * multiple of 64
+			 */
+			if (!(sendlen % 64))
+				sendlen -= 4;
+
+			/* send data */
+			memcpy(bulkchunk, dlpos, sendlen);
+			if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk,
+						   sendlen)) {
+				brcmf_err("send_bulk failed\n");
+				err = -EINVAL;
+				goto fail;
+			}
+
+			dlpos += sendlen;
+			sent += sendlen;
+		}
+		err = brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
+				       sizeof(state));
+		if (err) {
+			brcmf_err("DL_GETSTATE Failed\n");
+			goto fail;
+		}
+
+		rdlstate = le32_to_cpu(state.state);
+		rdlbytes = le32_to_cpu(state.bytes);
+
+		/* restart if an error is reported */
+		if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) {
+			brcmf_err("Bad Hdr or Bad CRC state %d\n",
+				  rdlstate);
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+
+fail:
+	kfree(bulkchunk);
+	brcmf_dbg(USB, "Exit, err=%d\n", err);
+	return err;
+}
+
+static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
+{
+	int err;
+
+	brcmf_dbg(USB, "Enter\n");
+
+	if (devinfo == NULL)
+		return -EINVAL;
+
+	if (devinfo->bus_pub.devid == 0xDEAD)
+		return -EINVAL;
+
+	err = brcmf_usb_dl_writeimage(devinfo, fw, len);
+	if (err == 0)
+		devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE;
+	else
+		devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL;
+	brcmf_dbg(USB, "Exit, err=%d\n", err);
+
+	return err;
+}
+
+static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
+{
+	struct rdl_state_le state;
+
+	brcmf_dbg(USB, "Enter\n");
+	if (!devinfo)
+		return -EINVAL;
+
+	if (devinfo->bus_pub.devid == 0xDEAD)
+		return -EINVAL;
+
+	/* Check we are runnable */
+	state.state = 0;
+	brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state, sizeof(state));
+
+	/* Start the image */
+	if (state.state == cpu_to_le32(DL_RUNNABLE)) {
+		if (brcmf_usb_dl_cmd(devinfo, DL_GO, &state, sizeof(state)))
+			return -ENODEV;
+		if (brcmf_usb_resetcfg(devinfo))
+			return -ENODEV;
+		/* The Dongle may go for re-enumeration. */
+	} else {
+		brcmf_err("Dongle not runnable\n");
+		return -EINVAL;
+	}
+	brcmf_dbg(USB, "Exit\n");
+	return 0;
+}
+
+static bool brcmf_usb_chip_support(int chipid, int chiprev)
+{
+	switch(chipid) {
+	case BRCM_CC_43143_CHIP_ID:
+		return true;
+	case BRCM_CC_43235_CHIP_ID:
+	case BRCM_CC_43236_CHIP_ID:
+	case BRCM_CC_43238_CHIP_ID:
+		return (chiprev == 3);
+	case BRCM_CC_43242_CHIP_ID:
+		return true;
+	case BRCM_CC_43566_CHIP_ID:
+	case BRCM_CC_43569_CHIP_ID:
+		return true;
+	default:
+		break;
+	}
+	return false;
+}
+
+static int
+brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
+{
+	int devid, chiprev;
+	int err;
+
+	brcmf_dbg(USB, "Enter\n");
+	if (devinfo == NULL)
+		return -ENODEV;
+
+	devid = devinfo->bus_pub.devid;
+	chiprev = devinfo->bus_pub.chiprev;
+
+	if (!brcmf_usb_chip_support(devid, chiprev)) {
+		brcmf_err("unsupported chip %d rev %d\n",
+			  devid, chiprev);
+		return -EINVAL;
+	}
+
+	if (!devinfo->image) {
+		brcmf_err("No firmware!\n");
+		return -ENOENT;
+	}
+
+	err = brcmf_usb_dlstart(devinfo,
+		(u8 *)devinfo->image, devinfo->image_len);
+	if (err == 0)
+		err = brcmf_usb_dlrun(devinfo);
+	return err;
+}
+
+
+static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
+{
+	brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
+
+	/* free the URBS */
+	brcmf_usb_free_q(&devinfo->rx_freeq, false);
+	brcmf_usb_free_q(&devinfo->tx_freeq, false);
+
+	usb_free_urb(devinfo->ctl_urb);
+	usb_free_urb(devinfo->bulk_urb);
+
+	kfree(devinfo->tx_reqs);
+	kfree(devinfo->rx_reqs);
+}
+
+
+static int check_file(const u8 *headers)
+{
+	struct trx_header_le *trx;
+	int actual_len = -1;
+
+	brcmf_dbg(USB, "Enter\n");
+	/* Extract trx header */
+	trx = (struct trx_header_le *) headers;
+	if (trx->magic != cpu_to_le32(TRX_MAGIC))
+		return -1;
+
+	headers += sizeof(struct trx_header_le);
+
+	if (le32_to_cpu(trx->flag_version) & TRX_UNCOMP_IMAGE) {
+		actual_len = le32_to_cpu(trx->offsets[TRX_OFFSETS_DLFWLEN_IDX]);
+		return actual_len + sizeof(struct trx_header_le);
+	}
+	return -1;
+}
+
+static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
+{
+	switch (devinfo->bus_pub.devid) {
+	case BRCM_CC_43143_CHIP_ID:
+		return BRCMF_USB_43143_FW_NAME;
+	case BRCM_CC_43235_CHIP_ID:
+	case BRCM_CC_43236_CHIP_ID:
+	case BRCM_CC_43238_CHIP_ID:
+		return BRCMF_USB_43236_FW_NAME;
+	case BRCM_CC_43242_CHIP_ID:
+		return BRCMF_USB_43242_FW_NAME;
+	case BRCM_CC_43566_CHIP_ID:
+	case BRCM_CC_43569_CHIP_ID:
+		return BRCMF_USB_43569_FW_NAME;
+	default:
+		return NULL;
+	}
+}
+
+
+static
+struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
+				      int nrxq, int ntxq)
+{
+	brcmf_dbg(USB, "Enter\n");
+
+	devinfo->bus_pub.nrxq = nrxq;
+	devinfo->rx_low_watermark = nrxq / 2;
+	devinfo->bus_pub.devinfo = devinfo;
+	devinfo->bus_pub.ntxq = ntxq;
+	devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN;
+
+	/* flow control when too many tx urbs posted */
+	devinfo->tx_low_watermark = ntxq / 4;
+	devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
+	devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
+
+	/* Initialize other structure content */
+	init_waitqueue_head(&devinfo->ioctl_resp_wait);
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&devinfo->qlock);
+	spin_lock_init(&devinfo->tx_flowblock_lock);
+
+	INIT_LIST_HEAD(&devinfo->rx_freeq);
+	INIT_LIST_HEAD(&devinfo->rx_postq);
+
+	INIT_LIST_HEAD(&devinfo->tx_freeq);
+	INIT_LIST_HEAD(&devinfo->tx_postq);
+
+	devinfo->tx_flowblock = false;
+
+	devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
+	if (!devinfo->rx_reqs)
+		goto error;
+
+	devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
+	if (!devinfo->tx_reqs)
+		goto error;
+	devinfo->tx_freecount = ntxq;
+
+	devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!devinfo->ctl_urb) {
+		brcmf_err("usb_alloc_urb (ctl) failed\n");
+		goto error;
+	}
+	devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!devinfo->bulk_urb) {
+		brcmf_err("usb_alloc_urb (bulk) failed\n");
+		goto error;
+	}
+
+	return &devinfo->bus_pub;
+
+error:
+	brcmf_err("failed!\n");
+	brcmf_usb_detach(devinfo);
+	return NULL;
+}
+
+static void brcmf_usb_wowl_config(struct device *dev, bool enabled)
+{
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+
+	brcmf_dbg(USB, "Configuring WOWL, enabled=%d\n", enabled);
+	devinfo->wowl_enabled = enabled;
+	if (enabled)
+		device_set_wakeup_enable(devinfo->dev, true);
+	else
+		device_set_wakeup_enable(devinfo->dev, false);
+}
+
+static struct brcmf_bus_ops brcmf_usb_bus_ops = {
+	.txdata = brcmf_usb_tx,
+	.stop = brcmf_usb_down,
+	.txctl = brcmf_usb_tx_ctlpkt,
+	.rxctl = brcmf_usb_rx_ctlpkt,
+	.wowl_config = brcmf_usb_wowl_config,
+};
+
+static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
+{
+	int ret;
+
+	/* Attach to the common driver interface */
+	ret = brcmf_attach(devinfo->dev);
+	if (ret) {
+		brcmf_err("brcmf_attach failed\n");
+		return ret;
+	}
+
+	ret = brcmf_usb_up(devinfo->dev);
+	if (ret)
+		goto fail;
+
+	ret = brcmf_bus_start(devinfo->dev);
+	if (ret)
+		goto fail;
+
+	return 0;
+fail:
+	brcmf_detach(devinfo->dev);
+	return ret;
+}
+
+static void brcmf_usb_probe_phase2(struct device *dev,
+				   const struct firmware *fw,
+				   void *nvram, u32 nvlen)
+{
+	struct brcmf_bus *bus = dev_get_drvdata(dev);
+	struct brcmf_usbdev_info *devinfo;
+	int ret;
+
+	brcmf_dbg(USB, "Start fw downloading\n");
+
+	devinfo = bus->bus_priv.usb->devinfo;
+	ret = check_file(fw->data);
+	if (ret < 0) {
+		brcmf_err("invalid firmware\n");
+		release_firmware(fw);
+		goto error;
+	}
+
+	devinfo->image = fw->data;
+	devinfo->image_len = fw->size;
+
+	ret = brcmf_usb_fw_download(devinfo);
+	release_firmware(fw);
+	if (ret)
+		goto error;
+
+	ret = brcmf_usb_bus_setup(devinfo);
+	if (ret)
+		goto error;
+
+	mutex_unlock(&devinfo->dev_init_lock);
+	return;
+error:
+	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+	mutex_unlock(&devinfo->dev_init_lock);
+	device_release_driver(dev);
+}
+
+static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
+{
+	struct brcmf_bus *bus = NULL;
+	struct brcmf_usbdev *bus_pub = NULL;
+	struct device *dev = devinfo->dev;
+	int ret;
+
+	brcmf_dbg(USB, "Enter\n");
+	bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
+	if (!bus_pub)
+		return -ENODEV;
+
+	bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+	if (!bus) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	bus->dev = dev;
+	bus_pub->bus = bus;
+	bus->bus_priv.usb = bus_pub;
+	dev_set_drvdata(dev, bus);
+	bus->ops = &brcmf_usb_bus_ops;
+	bus->proto_type = BRCMF_PROTO_BCDC;
+	bus->always_use_fws_queue = true;
+#ifdef CONFIG_PM
+	bus->wowl_supported = true;
+#endif
+
+	if (!brcmf_usb_dlneeded(devinfo)) {
+		ret = brcmf_usb_bus_setup(devinfo);
+		if (ret)
+			goto fail;
+		/* we are done */
+		mutex_unlock(&devinfo->dev_init_lock);
+		return 0;
+	}
+	bus->chip = bus_pub->devid;
+	bus->chiprev = bus_pub->chiprev;
+
+	/* request firmware here */
+	ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
+				     NULL, brcmf_usb_probe_phase2);
+	if (ret) {
+		brcmf_err("firmware request failed: %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	/* Release resources in reverse order */
+	kfree(bus);
+	brcmf_usb_detach(devinfo);
+	return ret;
+}
+
+static void
+brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
+{
+	if (!devinfo)
+		return;
+	brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo);
+
+	brcmf_detach(devinfo->dev);
+	kfree(devinfo->bus_pub.bus);
+	brcmf_usb_detach(devinfo);
+}
+
+static int
+brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+	struct usb_device *usb = interface_to_usbdev(intf);
+	struct brcmf_usbdev_info *devinfo;
+	struct usb_interface_descriptor	*desc;
+	struct usb_endpoint_descriptor *endpoint;
+	int ret = 0;
+	u32 num_of_eps;
+	u8 endpoint_num, ep;
+
+	brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
+
+	devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
+	if (devinfo == NULL)
+		return -ENOMEM;
+
+	devinfo->usbdev = usb;
+	devinfo->dev = &usb->dev;
+	/* Take an init lock, to protect for disconnect while still loading.
+	 * Necessary because of the asynchronous firmware load construction
+	 */
+	mutex_init(&devinfo->dev_init_lock);
+	mutex_lock(&devinfo->dev_init_lock);
+
+	usb_set_intfdata(intf, devinfo);
+
+	/* Check that the device supports only one configuration */
+	if (usb->descriptor.bNumConfigurations != 1) {
+		brcmf_err("Number of configurations: %d not supported\n",
+			  usb->descriptor.bNumConfigurations);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if ((usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) &&
+	    (usb->descriptor.bDeviceClass != USB_CLASS_MISC) &&
+	    (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS_CONTROLLER)) {
+		brcmf_err("Device class: 0x%x not supported\n",
+			  usb->descriptor.bDeviceClass);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	desc = &intf->altsetting[0].desc;
+	if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
+	    (desc->bInterfaceSubClass != 2) ||
+	    (desc->bInterfaceProtocol != 0xff)) {
+		brcmf_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n",
+			  desc->bInterfaceNumber, desc->bInterfaceClass,
+			  desc->bInterfaceSubClass, desc->bInterfaceProtocol);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	num_of_eps = desc->bNumEndpoints;
+	for (ep = 0; ep < num_of_eps; ep++) {
+		endpoint = &intf->altsetting[0].endpoint[ep].desc;
+		endpoint_num = usb_endpoint_num(endpoint);
+		if (!usb_endpoint_xfer_bulk(endpoint))
+			continue;
+		if (usb_endpoint_dir_in(endpoint)) {
+			if (!devinfo->rx_pipe)
+				devinfo->rx_pipe =
+					usb_rcvbulkpipe(usb, endpoint_num);
+		} else {
+			if (!devinfo->tx_pipe)
+				devinfo->tx_pipe =
+					usb_sndbulkpipe(usb, endpoint_num);
+		}
+	}
+	if (devinfo->rx_pipe == 0) {
+		brcmf_err("No RX (in) Bulk EP found\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+	if (devinfo->tx_pipe == 0) {
+		brcmf_err("No TX (out) Bulk EP found\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	devinfo->ifnum = desc->bInterfaceNumber;
+
+	if (usb->speed == USB_SPEED_SUPER)
+		brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
+	else if (usb->speed == USB_SPEED_HIGH)
+		brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
+	else
+		brcmf_dbg(USB, "Broadcom full speed USB WLAN interface detected\n");
+
+	ret = brcmf_usb_probe_cb(devinfo);
+	if (ret)
+		goto fail;
+
+	/* Success */
+	return 0;
+
+fail:
+	mutex_unlock(&devinfo->dev_init_lock);
+	kfree(devinfo);
+	usb_set_intfdata(intf, NULL);
+	return ret;
+}
+
+static void
+brcmf_usb_disconnect(struct usb_interface *intf)
+{
+	struct brcmf_usbdev_info *devinfo;
+
+	brcmf_dbg(USB, "Enter\n");
+	devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
+
+	if (devinfo) {
+		mutex_lock(&devinfo->dev_init_lock);
+		/* Make sure that devinfo still exists. Firmware probe routines
+		 * may have released the device and cleared the intfdata.
+		 */
+		if (!usb_get_intfdata(intf))
+			goto done;
+
+		brcmf_usb_disconnect_cb(devinfo);
+		kfree(devinfo);
+	}
+done:
+	brcmf_dbg(USB, "Exit\n");
+}
+
+/*
+ * only need to signal the bus being down and update the state.
+ */
+static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
+{
+	struct usb_device *usb = interface_to_usbdev(intf);
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+	brcmf_dbg(USB, "Enter\n");
+	devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
+	if (devinfo->wowl_enabled)
+		brcmf_cancel_all_urbs(devinfo);
+	else
+		brcmf_detach(&usb->dev);
+	return 0;
+}
+
+/*
+ * (re-) start the bus.
+ */
+static int brcmf_usb_resume(struct usb_interface *intf)
+{
+	struct usb_device *usb = interface_to_usbdev(intf);
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+	brcmf_dbg(USB, "Enter\n");
+	if (!devinfo->wowl_enabled)
+		return brcmf_usb_bus_setup(devinfo);
+
+	devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP;
+	brcmf_usb_rx_fill_all(devinfo);
+	return 0;
+}
+
+static int brcmf_usb_reset_resume(struct usb_interface *intf)
+{
+	struct usb_device *usb = interface_to_usbdev(intf);
+	struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+	brcmf_dbg(USB, "Enter\n");
+
+	return brcmf_fw_get_firmwares(&usb->dev, 0,
+				      brcmf_usb_get_fwname(devinfo), NULL,
+				      brcmf_usb_probe_phase2);
+}
+
+#define BRCMF_USB_DEVICE(dev_id)	\
+	{ USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
+
+static struct usb_device_id brcmf_usb_devid_table[] = {
+	BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
+	BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
+	BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
+	BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
+	/* special entry for device with firmware loaded and running */
+	BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
+	{ /* end: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
+MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43569_FW_NAME);
+
+static struct usb_driver brcmf_usbdrvr = {
+	.name = KBUILD_MODNAME,
+	.probe = brcmf_usb_probe,
+	.disconnect = brcmf_usb_disconnect,
+	.id_table = brcmf_usb_devid_table,
+	.suspend = brcmf_usb_suspend,
+	.resume = brcmf_usb_resume,
+	.reset_resume = brcmf_usb_reset_resume,
+	.supports_autosuspend = 1,
+	.disable_hub_initiated_lpm = 1,
+};
+
+static int brcmf_usb_reset_device(struct device *dev, void *notused)
+{
+	/* device past is the usb interface so we
+	 * need to use parent here.
+	 */
+	brcmf_dev_reset(dev->parent);
+	return 0;
+}
+
+void brcmf_usb_exit(void)
+{
+	struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
+	int ret;
+
+	brcmf_dbg(USB, "Enter\n");
+	ret = driver_for_each_device(drv, NULL, NULL,
+				     brcmf_usb_reset_device);
+	usb_deregister(&brcmf_usbdrvr);
+}
+
+void brcmf_usb_register(void)
+{
+	brcmf_dbg(USB, "Enter\n");
+	usb_register(&brcmf_usbdrvr);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
new file mode 100644
index 0000000..f483a8c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMFMAC_USB_H
+#define BRCMFMAC_USB_H
+
+enum brcmf_usb_state {
+	BRCMFMAC_USB_STATE_DOWN,
+	BRCMFMAC_USB_STATE_DL_FAIL,
+	BRCMFMAC_USB_STATE_DL_DONE,
+	BRCMFMAC_USB_STATE_UP,
+	BRCMFMAC_USB_STATE_SLEEP
+};
+
+struct brcmf_stats {
+	u32 tx_ctlpkts;
+	u32 tx_ctlerrs;
+	u32 rx_ctlpkts;
+	u32 rx_ctlerrs;
+};
+
+struct brcmf_usbdev {
+	struct brcmf_bus *bus;
+	struct brcmf_usbdev_info *devinfo;
+	enum brcmf_usb_state state;
+	struct brcmf_stats stats;
+	int ntxq, nrxq, rxsize;
+	u32 bus_mtu;
+	int devid;
+	int chiprev; /* chip revsion number */
+};
+
+/* IO Request Block (IRB) */
+struct brcmf_usbreq {
+	struct list_head list;
+	struct brcmf_usbdev_info *devinfo;
+	struct urb *urb;
+	struct sk_buff  *skb;
+};
+
+#endif /* BRCMFMAC_USB_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
new file mode 100644
index 0000000..8eff275
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include <brcmu_wifi.h>
+#include "fwil_types.h"
+#include "core.h"
+#include "p2p.h"
+#include "debug.h"
+#include "cfg80211.h"
+#include "vendor.h"
+#include "fwil.h"
+
+static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
+						 struct wireless_dev *wdev,
+						 const void *data, int len)
+{
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
+	const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
+	struct sk_buff *reply;
+	int ret, payload, ret_len;
+	void *dcmd_buf = NULL, *wr_pointer;
+	u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+
+	if (len < sizeof(*cmdhdr)) {
+		brcmf_err("vendor command too short: %d\n", len);
+		return -EINVAL;
+	}
+
+	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+	ifp = vif->ifp;
+
+	brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
+
+	if (cmdhdr->offset > len) {
+		brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
+		return -EINVAL;
+	}
+
+	len -= cmdhdr->offset;
+	ret_len = cmdhdr->len;
+	if (ret_len > 0 || len > 0) {
+		if (len > BRCMF_DCMD_MAXLEN) {
+			brcmf_err("oversize input buffer %d\n", len);
+			len = BRCMF_DCMD_MAXLEN;
+		}
+		if (ret_len > BRCMF_DCMD_MAXLEN) {
+			brcmf_err("oversize return buffer %d\n", ret_len);
+			ret_len = BRCMF_DCMD_MAXLEN;
+		}
+		payload = max(ret_len, len) + 1;
+		dcmd_buf = vzalloc(payload);
+		if (NULL == dcmd_buf)
+			return -ENOMEM;
+
+		memcpy(dcmd_buf, (void *)cmdhdr + cmdhdr->offset, len);
+		*(char *)(dcmd_buf + len)  = '\0';
+	}
+
+	if (cmdhdr->set)
+		ret = brcmf_fil_cmd_data_set(ifp, cmdhdr->cmd, dcmd_buf,
+					     ret_len);
+	else
+		ret = brcmf_fil_cmd_data_get(ifp, cmdhdr->cmd, dcmd_buf,
+					     ret_len);
+	if (ret != 0)
+		goto exit;
+
+	wr_pointer = dcmd_buf;
+	while (ret_len > 0) {
+		msglen = ret_len > maxmsglen ? maxmsglen : ret_len;
+		ret_len -= msglen;
+		payload = msglen + sizeof(msglen);
+		reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+		if (NULL == reply) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		if (nla_put(reply, BRCMF_NLATTR_DATA, msglen, wr_pointer) ||
+		    nla_put_u16(reply, BRCMF_NLATTR_LEN, msglen)) {
+			kfree_skb(reply);
+			ret = -ENOBUFS;
+			break;
+		}
+
+		ret = cfg80211_vendor_cmd_reply(reply);
+		if (ret)
+			break;
+
+		wr_pointer += msglen;
+	}
+
+exit:
+	vfree(dcmd_buf);
+
+	return ret;
+}
+
+const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
+	{
+		{
+			.vendor_id = BROADCOM_OUI,
+			.subcmd = BRCMF_VNDR_CMDS_DCMD
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_NETDEV,
+		.doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
+	},
+};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.h b/drivers/net/wireless/brcm80211/brcmfmac/vendor.h
new file mode 100644
index 0000000..061b7bf
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _vendor_h_
+#define _vendor_h_
+
+#define BROADCOM_OUI	0x001018
+
+enum brcmf_vndr_cmds {
+	BRCMF_VNDR_CMDS_UNSPEC,
+	BRCMF_VNDR_CMDS_DCMD,
+	BRCMF_VNDR_CMDS_LAST
+};
+
+/**
+ * enum brcmf_nlattrs - nl80211 message attributes
+ *
+ * @BRCMF_NLATTR_LEN: message body length
+ * @BRCMF_NLATTR_DATA: message body
+ */
+enum brcmf_nlattrs {
+	BRCMF_NLATTR_UNSPEC,
+
+	BRCMF_NLATTR_LEN,
+	BRCMF_NLATTR_DATA,
+
+	__BRCMF_NLATTR_AFTER_LAST,
+	BRCMF_NLATTR_MAX = __BRCMF_NLATTR_AFTER_LAST - 1
+};
+
+/**
+ * struct brcmf_vndr_dcmd_hdr - message header for cfg80211 vendor command dcmd
+ *				support
+ *
+ * @cmd: common dongle cmd definition
+ * @len: length of expecting return buffer
+ * @offset: offset of data buffer
+ * @set: get or set request(optional)
+ * @magic: magic number for verification
+ */
+struct brcmf_vndr_dcmd_hdr {
+	uint cmd;
+	int len;
+	uint offset;
+	uint set;
+	uint magic;
+};
+
+extern const struct wiphy_vendor_command brcmf_vendor_cmds[];
+
+#endif /* _vendor_h_ */