Add cpmodem_shim

Change-Id: I469fe04efac7b6636f9d2d1a8ae93c6c1ac30dc1
diff --git a/cpmodem_shim/Kbuild b/cpmodem_shim/Kbuild
new file mode 100644
index 0000000..880321d
--- /dev/null
+++ b/cpmodem_shim/Kbuild
@@ -0,0 +1,2 @@
+obj-m	+= cpmodem_shim.o
+ccflags-y += $(EXTRA_CFLAGS)
diff --git a/cpmodem_shim/Makefile b/cpmodem_shim/Makefile
new file mode 100644
index 0000000..bf58d2a
--- /dev/null
+++ b/cpmodem_shim/Makefile
@@ -0,0 +1,35 @@
+# Makefile for cpmodem_shim net_device hooks.
+
+ROOT ?= $(abspath ../../..)
+include $(ROOT)/Makefile.config
+
+VERBOSITY := $(if $(filter $(MAKEVERBOSE),yes),1,0)
+EXTRA_CFLAGS := -I$(CURDIR)/include
+
+#uncomment for debugging
+#EXTRA_CFLAGS += -O0
+#EXTRA_CFLAGS += -g
+
+unexport CC CPP LD CFLAGS CPPFLAGS LDFLAGS AR RANLIB
+
+# grab the chipset, platform and memory defines from CPDEFINES. PRODUCT_INFO_CHIPSET can be in two forms (an int or a python invalid string):
+#  -DPRODUCT_INFO_CHIPSET=1234 or -DPRODUCT_INFO_CHIPSET='1234'
+#  PRODUCT_INFO_MEMORY is always an int so can use directly
+#  PRODUCT_PLATFORM is a symbol so needs double quotes added to turn it into a string
+# The first patsubst removes the single quotes, the second one puts double quotes so the code can treat the define as a C string always.
+CPNEWDEFINES := $(filter -DPRODUCT_INFO_CHIPSET=% -DPRODUCT_INFO_MEMORY=% -DPRODUCT_PLATFORM=%, $(CPDEFINES))
+CPNEWDEFINES := $(patsubst -DPRODUCT_INFO_CHIPSET='%', -DPRODUCT_INFO_CHIPSET=%, $(CPNEWDEFINES))
+CPNEWDEFINES := $(patsubst -DPRODUCT_INFO_CHIPSET=%, -DPRODUCT_INFO_CHIPSET=\\\"%\\\", $(CPNEWDEFINES))
+CPNEWDEFINES := $(patsubst -DPRODUCT_PLATFORM=%, -DPRODUCT_PLATFORM=\\\"%\\\", $(CPNEWDEFINES))
+EXTRA_CFLAGS += $(CPNEWDEFINES)
+
+default: install
+
+all:
+	$(MAKE) -C $(CPKERN) V=$(VERBOSITY) M=$(CURDIR) EXTRA_CFLAGS="$(EXTRA_CFLAGS)"
+
+install: all
+	$(MAKE) -C $(CPKERN) M=$(CURDIR) INSTALL_MOD_PATH=$(ROOT)/$(IMGDIR)/rootfs ARCH=$(ARCH)  modules_install
+
+clean:
+	$(MAKE) -C $(CPKERN) V=$(VERBOSITY) M=$(CURDIR) clean
diff --git a/cpmodem_shim/cpmodem_shim.c b/cpmodem_shim/cpmodem_shim.c
new file mode 100755
index 0000000..0ad3061
--- /dev/null
+++ b/cpmodem_shim/cpmodem_shim.c
@@ -0,0 +1,6555 @@
+/*
+ * FILE NAME cpmodem_shim.c
+ *
+ * BRIEF MODULE DESCRIPTION
+ *  Frankendriver - USB to ethernet, ip or PPP controlled via a block driver.
+ *
+ *  Author: CradlePoint Technology, Inc.  <source@cradlepoint.com>
+ *  		Ben Kendall <benk@cradlepoint.com>
+ *  		Cory Atkin <catkin@cradlepoint.com>
+ *
+ * Copyright 2012, CradlePoint Technology, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ */
+
+
+// Necessary includes for device drivers
+#include <linux/module.h>   	// Needed by all modules 
+#include <linux/kernel.h>   	// Needed for KERN_xxxx 
+#include <linux/init.h> 		// Needed for the macros 
+#include <linux/cdev.h>
+#include <linux/slab.h> 		// kmalloc()
+#include <linux/fs.h>   		// everything... 
+#include <linux/poll.h>
+#include <linux/errno.h>		// error codes
+#include <linux/types.h>		// size_t
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h>		
+#include <linux/skbuff.h>   	
+#include <linux/list.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <net/addrconf.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+/* #include <asm/system.h>  	// cli(), *_flags */
+#include <asm/uaccess.h>		// copy_from/to_user
+#include <linux/usb.h>
+#include <linux/version.h>      // LINUX_VERSION_CODE
+#include <cpmodem_shim.h>
+#include <cpmodem_wrapper.h>
+
+
+//#define KERNEL_2_6_21 // comment this out for 3.0.29 kernel
+/*********************************************** logging and debug ************************************************/
+
+#define RUNTIME_DEBUG_TRACE (1 << 0)
+#define RUNTIME_DEBUG_INFO (1 << 1)
+#define RUNTIME_DEBUG_WARN (1 << 2)
+#define RUNTIME_DEBUG_ERROR (1 << 3)
+#define RUNTIME_LOG 0
+#define RUNTIME_ASSERT -1
+
+//#undef RUNTIME_DEBUG
+//#define RUNTIME_DEBUG ( /*RUNTIME_DEBUG_TRACE |*/  RUNTIME_DEBUG_INFO | RUNTIME_DEBUG_WARN | RUNTIME_DEBUG_ERROR )
+
+
+static int cp_lkm_log_level = 0;
+ 
+#ifdef RUNTIME_DEBUG
+static const char *cp_lkm_shim_runtime_debug_level_str[] = {
+	"ASSERT",
+	"TRACE",
+	"INFO",
+	"WARN",
+	"ERROR",
+};
+#else
+static const char *cp_lkm_shim_debug_log_level_str[] = {
+	"ASSERT",
+	"ERROR",
+	"WARN",
+	"INFO",
+	"TRACE",
+	"PRINTF"
+};
+#endif
+
+static int cp_out_get_level_index(int level)
+{
+	int level_index = 0;
+	while (level) {
+		level = level >> 1;
+		level_index++;
+	}
+	return level_index;
+}
+
+static void cp_out(int level, const char * file, int line, const char *fmt, ...)
+{
+	int file_str_len = 0;
+	char *file_pos = (char *)file; 
+	char *fmt1;
+	va_list arg;
+	int level_index = 0;
+	const char *level_str = NULL;
+	const char *kernel_lvl_str = NULL;
+
+	if (level>0) { // level of 0 is LOG and -1 is ASSERT - always output
+		level_index = cp_out_get_level_index(level);
+
+#ifdef RUNTIME_DEBUG
+		if (!(RUNTIME_DEBUG & level)) {
+			return;
+		}
+		level_str = cp_lkm_shim_runtime_debug_level_str[level_index];
+#else
+		if (!(cp_lkm_log_level & level)) {
+			return;
+		}
+		level_str = cp_lkm_shim_debug_log_level_str[level_index];
+#endif
+	}
+
+
+	switch(level) {
+	case RUNTIME_DEBUG_TRACE:
+		kernel_lvl_str = KERN_INFO;
+		break;
+	case RUNTIME_DEBUG_INFO: 
+		kernel_lvl_str = KERN_INFO;
+		break;
+	case RUNTIME_DEBUG_WARN: 
+		kernel_lvl_str = KERN_WARNING;
+		break;
+	case RUNTIME_DEBUG_ERROR:
+		kernel_lvl_str = KERN_ERR;
+		break;
+	case RUNTIME_LOG:      
+		kernel_lvl_str = KERN_INFO;
+		break;
+	case RUNTIME_ASSERT:  
+		kernel_lvl_str = KERN_ERR;
+		break;
+	default:
+		kernel_lvl_str = KERN_INFO;
+		break;
+	}
+
+
+	va_start(arg, fmt);
+
+	if (file) {
+		char *pos = (char *)file;
+		while ((pos = strchr(pos, '/'))) {
+			pos++;
+			file_pos = pos;
+		}
+
+		file_str_len = strlen(file_pos);
+	}
+
+	fmt1 = kmalloc(strlen(fmt) + file_str_len + 12 + 6 + 2, GFP_ATOMIC); // +6 for debug type indication, +2 for linux syslog level
+	if (!fmt1) {
+		return;
+	}
+	if (level_str) {
+		if (file) {
+			sprintf(fmt1, "%s%6s  %s(%4d):%s\n", kernel_lvl_str, level_str, file_pos, line, fmt);
+		} else {
+			sprintf(fmt1, "%s%6s  %s\n", kernel_lvl_str, level_str, fmt);
+		}
+	} else {
+		if (file) {
+			sprintf(fmt1, "%s%s(%4d):%s\n", kernel_lvl_str, file_pos, line, fmt);
+		} else {
+			sprintf(fmt1, "%s%s\n", kernel_lvl_str, fmt);
+		}
+	}
+	vprintk(fmt1, arg);
+	kfree(fmt1);
+	va_end(arg);
+}
+
+#ifdef RUNTIME_DEBUG
+// assert is always defined if RUNTIME_DEBUG is defined
+// bad idea to kill things in kernel, so we just print the assert msg and keep going
+#define DEBUG_ASSERT(a, args...) \
+	if (!(a)) { \
+		printk(KERN_ERR "\n!!! CPMODEM_SHIM ASSERT !!!\n"); \
+		cp_out(RUNTIME_ASSERT, __FILE__, __LINE__, args); \
+		dump_stack(); \
+	}
+#define DEBUG_TRACE(args...) cp_out(RUNTIME_DEBUG_TRACE, __FILE__, __LINE__, args)
+#define DEBUG_INFO(args...)  cp_out(RUNTIME_DEBUG_INFO, __FILE__, __LINE__, args)
+#define DEBUG_WARN(args...)  cp_out(RUNTIME_DEBUG_WARN, __FILE__, __LINE__, args)
+#define DEBUG_ERROR(args...) cp_out(RUNTIME_DEBUG_ERROR, __FILE__, __LINE__, args)
+#else
+#define DEBUG_ASSERT(a, args...) 
+#define DEBUG_TRACE(args...)	cp_out(LOG_DEBUG_LEVEL_TRACE, __FILE__, __LINE__, args)
+								
+#define DEBUG_INFO(args...) 	cp_out(LOG_DEBUG_LEVEL_INFO, __FILE__, __LINE__, args)
+								
+#define DEBUG_WARN(args...) 	cp_out(LOG_DEBUG_LEVEL_WARN, __FILE__, __LINE__, args)
+								
+#define DEBUG_ERROR(args...)	cp_out(LOG_DEBUG_LEVEL_ERROR, __FILE__, __LINE__, args)
+								
+#define DEBUG_PRINTF(args...)   cp_out(LOG_DEBUG_LEVEL_PRINTF, __FILE__, __LINE__, args)
+								
+#endif
+
+#define LOG(args...) cp_out(RUNTIME_LOG, NULL, 0, args)
+
+/*********************************************** general definitions and helper functions *************************/
+
+// Buffer to store data
+struct cp_lkm_read_msg {
+	struct cp_lkm_msg_hdr hdr;
+	struct sk_buff *skb;
+	struct list_head list;
+};
+
+struct cp_lkm_common_ctx {
+	u8 open_cnt;
+
+	// read operation members
+	wait_queue_head_t inq;
+	struct list_head read_list;    
+	spinlock_t  read_list_lock;
+	bool reading_data;
+	bool q_waiting;
+	// write operation members
+	struct sk_buff *write_skb;
+
+	int (*open)(struct cp_lkm_common_ctx *ctx); // called at open
+	int (*close)(struct cp_lkm_common_ctx *ctx); // called at close
+	int (*handle_msg)(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb); // called at write
+	int (*handle_ioctl)(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp); // called at ioctl
+};
+
+
+int cp_lkm_open(struct inode *inode, struct file *filp);
+int cp_lkm_release(struct inode *inode, struct file *filp);
+ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos);
+ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos);
+#ifdef KERNEL_2_6_21
+int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *);
+
+static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common);
+static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common);
+static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb);
+
+/* Structure that declares the usual file
+	access functions */
+struct file_operations cp_lkm_fops = {
+	.owner = THIS_MODULE,
+	.read = cp_lkm_read,
+	.write = cp_lkm_write,
+#ifdef KERNEL_2_6_21
+	.ioctl = cp_lkm_ioctl,
+#else
+	.unlocked_ioctl = cp_lkm_ioctl,
+#endif
+	.open = cp_lkm_open,
+	.poll = cp_lkm_poll,
+	.release = cp_lkm_release
+};
+
+static int major;
+static struct device *cp_lkm_dev[2];
+static struct class *cp_lkm_class;
+
+#define CP_LKM_USB_MGR_MINOR 0
+#define CP_LKM_PM_MGR_MINOR 1
+#define CP_LKM_ITER         3000   //CP_LIM_ITER * CP_LKM_TIMEOUT_MS = 30000 or 30 seconds
+#define CP_LKM_TIMEOUT_MS   10
+
+typedef int (*cp_lkm_data_transfer_t)(void *ctx, struct sk_buff *skb);
+typedef void (*cp_lkm_data_hdr_size_t)(void *ctx, int wrapper_hdr_size, int *hdr_size, int* hdr_offset);
+typedef int (*cp_lkm_poll_t)(void *ctx, int budget);
+typedef void (*cp_lkm_schedule_t)(void *ctx);
+typedef void (*cp_lkm_complete_t)(void *ctx);
+typedef int (*cp_lkm_msg_t)(void *ctx);
+struct cp_lkm_edi {
+	//values provided by usb side, called by pm side
+	cp_lkm_data_transfer_t usb_send;
+	void *usb_send_ctx;
+	
+	//value provided by pm side, called by usb side
+	cp_lkm_msg_t pm_send_pause;    //called by usb to pause the network q
+	cp_lkm_msg_t pm_send_resume;   //called by usb to resume the network q
+	cp_lkm_data_transfer_t pm_recv;
+	cp_lkm_data_hdr_size_t pm_get_hdr_size;  //ask pm how much space it needs for headers
+	void *pm_recv_ctx;
+
+	void *pm_stats64_ctx;
+};
+
+static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link);
+
+struct cp_lkm_pm_stats64 {
+	u64	rx_packets;	   
+	u64	tx_packets;	   
+	u64	rx_bytes;	   
+	u64	tx_bytes;	   
+	u64	rx_errors;	   
+	u64	tx_errors;	   
+	u64	rx_dropped;	   
+	u64	tx_dropped;	   
+
+	u64	rx_over_errors;	   
+
+	struct u64_stats_sync syncp;
+};
+
+struct cp_lkm_pm_common {
+	int unique_id;
+	u32 attached;
+	cp_lkm_pm_type_t type;
+	struct net_device *net_dev;
+	struct cp_lkm_edi *edi;
+	struct list_head filter_list;
+	u32 filter_drop_cnt;
+
+	// keep these in pm context so dual sim hidden unplug/plug do not affect the stats
+	struct cp_lkm_pm_stats64 *pcpu_stats64;
+
+	int pm_link_count;  	 //token used to prevent xmit and poll from being called if we are linking or unlinking, -1 = unlinking so block xmit and poll, 
+	spinlock_t pm_link_lock; //lock to protect getting and releasing the pm_link_count token
+
+	struct list_head list;
+};
+
+//static void cp_lkm_pm_update_stats64(struct cp_lkm_pm_stats64 *stats, u64 *field, u64 incr);
+#define UPDATE_STATS(stats_ctx, field, incr) if (stats_ctx) { \
+												struct cp_lkm_pm_stats64 *stats = this_cpu_ptr(((struct cp_lkm_pm_common *)stats_ctx)->pcpu_stats64); \
+												if (stats) { \
+													u64_stats_update_begin(&stats->syncp); \
+													stats->field += incr; \
+													u64_stats_update_end(&stats->syncp); \
+												} \
+											 }
+
+//Keep these commented out for release
+//static int dbg_memleak_timer_started = 0;
+//static struct timer_list dbg_memleak_timer;  
+//static spinlock_t dbg_state_lock;
+//static int dbg_state_init = 0;
+//static int g_dbg_memalloc_cnt = 0;
+//static int g_stuck_cnt = 0;
+//static int g_stuck_chk = 0;
+//static int g_unlink_cnt = 0;
+
+typedef size_t ref_t;
+typedef void (*memref_final_method_t)(void *buf);
+struct memref {
+	memref_final_method_t mfree;
+	atomic_t refs;
+};
+
+ 
+void *memref_alloc(size_t size, memref_final_method_t mfree)
+{
+	struct memref *ptr;
+
+	ptr = (struct memref *)kmalloc(sizeof(struct memref) + size, GFP_ATOMIC);
+	if (!ptr) {
+		return NULL;
+	}
+	//g_dbg_memalloc_cnt++;
+	ptr->mfree = mfree;
+	atomic_set(&ptr->refs, 1);
+
+	return (ptr + 1);
+}
+
+void *memref_alloc_and_zero(size_t size, memref_final_method_t mfree)
+{
+	void *ptr;
+
+	ptr = memref_alloc(size, mfree);
+	if (!ptr) {
+		return NULL;
+	}
+
+	memset(ptr, 0x00, size);
+
+	return ptr;
+}
+
+static void *memref_ref(void *buf)
+{
+	struct memref *mb;
+
+	if (!buf) {
+		return NULL;
+	}
+
+	mb = (struct memref *)(buf) - 1;
+
+//  if (0 == atomic_read(&mb->refs)) {
+//  	DEBUG_INFO("%s() !refs", __FUNCTION__);
+//  	  return NULL;
+//    }
+
+	atomic_inc(&mb->refs);
+
+	return buf;
+}
+
+#if 0
+static ref_t memref_cnt(void *buf)
+{
+	struct memref *mb;
+
+	if (!buf) {
+		return 0;
+	}
+
+	mb = (struct memref *)(buf) - 1;
+	return atomic_read(&mb->refs);
+}
+#endif
+
+static ref_t memref_deref(void *buf)
+{
+	struct memref *mb;
+
+	if (!buf) {
+		return 0;
+	}
+
+	mb = (struct memref *)(buf) - 1;
+
+//  if (0 == atomic_read(&mb->refs)) {
+//  	DEBUG_INFO("%s() !refs", __FUNCTION__);
+//  	  return NULL;
+//    }
+
+	if (atomic_dec_and_test(&mb->refs)) {
+		//g_dbg_memalloc_cnt--;
+		if (mb->mfree) {
+			mb->mfree(buf);
+		}
+		kfree(mb);
+		return 0;
+	}
+
+	return atomic_read(&mb->refs);
+}
+
+/*
+ * Generic function to repeatedly call a function until it either succeeds or the delay and iters 
+ * have been exhausted. Optionally it can throw a kernel panic on failure. 
+ *  
+ * ctxt     - the ctxt to pass into do_fun 
+ * do_fun   - the function to call until it returns success 
+ * delay_ms - the amount of time to delay between calls to do_fun on failure 
+ * iter     - the number of times to call do_fun 
+ * die_str  - if should panic on failure, then pass in the die_str to display 
+ *  
+ * if die_str provided, this function will not exit on failure. 
+ * else it will exit with the result of the call to do_fun 
+ * Note: total wait time is delay_ms * iter 
+*/
+typedef bool (*do_function_t)(void* ctx1, void* ctx2);
+bool cp_lkm_do_or_die(void* ctx1, void*ctx2, do_function_t do_fun, u32 delay_ms, u32 iter, const char* die_str)
+{
+	bool done = false;
+	//set_current_state(TASK_UNINTERRUPTIBLE);
+	while (!done && iter) {
+		iter--;
+		done = do_fun(ctx1,ctx2);
+		if (!done) {
+			msleep(delay_ms);
+			//schedule_timeout(msecs_to_jiffies(delay_ms));
+			//set_current_state(TASK_UNINTERRUPTIBLE);
+		}
+	}
+	if(!done && die_str) {
+		panic(die_str);
+		//BUG_ON()
+	}
+	//set_current_state(TASK_RUNNING);
+	return done;
+}
+
+/*******************************  kernel module USB/Wrapper functionality ********************************* 
+ *  
+ * The shim has multiple entry points. It can be pumped by hw interrupts, software interrupts, or threads. 
+ * The trick to getting the shim to work properly is knowing from which contexts the different functions can be called 
+ * and what you can do in that context. 
+ *  
+ * The biggest concern is to make sure we aren't nulling out a function or instance pointer in one context while another 
+ * context is using it. Pointers are changed when linking or unlinking to the protocol manager or when the device unplugs. 
+ * For link/unlink or unplug, we need to make sure all other processing has been blocked or stopped. We use a combination of 
+ * tokens and spinlocks to achieve this. 
+ *  
+ * Another complexity is dealing with multi-core processors such as we have in some routers. With multi-core you can have 
+ * a hw interrupt, software interrupt or thread running on one core and a hw interrupt, soft interrupt, or thread running on 
+ * another at the same time. In addition, the same soft interrupt code can run on both cores at the same time. 
+ * With single core, the hw int would block the thread. The shim was orginally designed with a single-core system, so a lot of work 
+ * has been put into verifying multi-core works. 
+ *  
+ * Single core: We can be pumped by:
+ *  	Hardware interrupt  	 - all interrupts disabled, can't be preempted
+ *  	Software interrupt  	 - hw interrupts not disabled, can be preempted by hw interrupt
+ *  	Thread or other process  - can be preempted by hw or sw interrupt.
+ *  
+ * Multi core: all bets are off. Everything can run at the same time so you have to be very careful with locks and tokens to not corrupt 
+ *  		   variables and to not run funtions reentrantly.
+ *  
+ * Here are the specific contexts (threads, processes)that pump us:
+ *   1. USB on a hardware interrupt context. This happens on tx and rx done (all interrupts disabled, schedule callbacks and get out fast)
+ *   2. USB on the hub thread.  This happens on unplug  (can sleep or pause, but be careful because it stops all USB system hub processing)
+ *   3. Kernel workqueue thread  (our own callback, can sleep or pause, but be careful, it stops all the kernel workqueue processing)
+ *   4. tasklet or timer soft interrupt context (our own callbacks on sw interrupt, hw interrupts enabled, can't sleep or do pause)
+ *   5. ioctl or device write on a kernel thread (this is cpusb in app space talking to us, runs on a thread, can be prempted in multi-core)
+ *   6. network (send from network side, runs as a software interrupt)
+ * 
+ *   Which functions are called in which contexts and what they do:
+ *   #1 - cp_lkm_usb_xmit_complete  - called by usb layer when transmit is done in hw interrupt context
+ *  								throw transfer in done q, on success, schedule tasklet or NAPI poll (#4) by calling
+ *  								cp_lkm_usb_done_and_defer_data() for data packets or cp_lkm_usb_done_and_defer_other() for non-data pkts.
+ *  								On error schedule kevent (#3) by calling cp_lkm_usb_defer_kevent()
+ *  	  cp_lkm_usb_recv_complete  - called by usb layer when recv is done in hw interrupt context
+ *  								throw transfer in done q, schedule tasklet or NAPI poll (#4), on error schedule kevent (#3)
+ * 
+ *   #2 - cp_lkm_usb_probe  	  - called when the usb hub layer detects a plug, called on hub thread context
+ *  	  cp_lkm_usb_disconnect   - called when the usb hub layer detects an unplug, called on hub thread context
+ *  								schedule mgr_kevent to clean up
+ * 
+ *   #3 - cp_lkm_usb_kevent 	  - scheduled by tx and rx complete (#1) on USB halt errors or out of memory failure. Is a workqueue thread
+ *  								clears the halts, sees if memory available. On success, schedules the tasklet or NAPI poll(#4)
+ * 
+ *   #4 - cp_lkm_usb_process_data_done_tasklet - Scheduled by rx or tx complete (#1). Runs in soft int context. This function is used when we
+ *  											 are using a non-NAPI compliant protocol manager (i.e. PPP). It processes recv'd pkts and sends
+ *  											 them onto the protocol manager, frees all sent skb's and restock more recv urbs to the USB layer.
+ *  	  cp_lkm_usb_process_other_done_tasklet -Same as first one except is it scheduled anytime we recv a pkt that needs to go to the common
+ *  	                                         modem stack instead of to the network stack (ctrl, status or diagnostics pkt)
+ *  
+ *   #5 - cp_lkm_usb_handle_ioctl - ioctl mux function called by the kernel when the app ioctl is called
+ *  								calls the appropriate mux function
+ *  	  cp_lkm_usb_plug_intf    - called by ioctl mux to register a device. Register a usb driver to catch
+ *  								the plug event from the usb stack
+ *  	  cp_lkm_usb_open_intf    - called by ioctl mux indicate the data channel is active. This causes us to
+ *  								mux all data packets to the network stack instead of up to cpusb in app space
+ *  	  cp_lkm_usb_close_intf   - called by ioctl mux to indicate the data connection has gone down.
+ *  								This causes us to mux all packets up to cpusb in app space instead of to network
+ *  								
+ *  	  cp_lkm_usb_unplug_intf  - called by ioctl mux. Releases the interface, deregisters the usb driver, cleans up memory
+ *  	  cp_lkm_usb_handle_msg   - called by the device driver write function. This is how cpusb sends us usb packets that
+ *  								we need to send to usb
+ *   #6 - cp_lkm_usb_start_xmit   - called by the network interface
+ *  								sends a transmit to the usb layer
+*/
+
+
+struct cp_lkm_usb_dev;
+struct cp_lkm_usb_base_dev;
+
+
+/* we record the state for each of our queued skbs */
+enum skb_state {
+	illegal = 0,
+	out_start,     // start a data or other transmit
+	out_done,      // data or other transmit done
+	in_data_start, // start a recv (either data or other)
+	in_data_done,  // recv data done
+	in_data_cleanup,
+	in_other_start,
+	in_other_done, // recv other done
+	in_other_cleanup,
+	ctrl_start,    // start a usb ctrl transfer
+	ctrl_done,     // usb ctrl transfer finished
+	unlink_start   // telling usb to give our urb back
+};
+
+#define EVENT_TX_HALT    0
+#define EVENT_RX_HALT    1
+#define EVENT_RX_MEMORY  2
+#define EVENT_STS_SPLIT  3
+#define EVENT_LINK_RESET 4
+
+//These are standard USB defines
+#define  UE_BULK		 0x02
+#define  UE_INTERRUPT    0x03
+
+#define MAX_INTF_EPS 10
+
+#define CP_LKM_USB_RECV   0x01
+#define CP_LKM_USB_LISTEN 0x02
+
+struct cp_lkm_base_ep
+{
+	struct list_head list;     // for inserting in the cpbdev list of base endpoints
+	struct list_head eps;      // list of cloned endpoints based off this one
+	struct cp_lkm_usb_base_dev* cpbdev; // pointer back to the cpdev this endpoint belongs to
+	int ep_num; 			   // endpoint number
+	unsigned long err_flags;   // errors on the ep (halt, no mem)
+	int con_flags;  		   //connection flags (recv, listen)
+	int q_cnt;  			   //number of urbs down at the lower layer
+	int type;   			   //ep type (interrupt, bulk etc)
+	int max_transfer_size;
+	int pipe;
+	int interval;   		   // interval for interrupt end points
+};
+
+struct cp_lkm_ep
+{
+	struct list_head list_bep;    // for being inserted into the bep's list of eps
+	struct list_head list_cpdev;  // for being inserted into the cpdev's list of eps
+	struct cp_lkm_base_ep* bep;   // pointer to this ep's base endpoint
+	struct cp_lkm_usb_dev* cpdev; // pointer back to the cpdev this endpoint belongs to
+	int con_flags;  		   //connection flags (recv, listen)
+	int ep_num; 			   // duplicated from base endpoint for convenience
+};
+
+/* This struct gets stored in skb->cb which is currently a 48 byte buffer
+   The size of this struct needs to not ever be bigger than 48 
+*/
+struct skb_data {   
+	//if pointers and ints are 64 bits (8 bytes) then this is 48 bytes currently and
+	//no other variables can be added
+	struct urb  *urb;
+	struct cp_lkm_usb_base_dev *cpbdev;
+	struct cp_lkm_base_ep* bep;
+	enum skb_state state;
+	int status;
+	int unique_id;   //id of cpdev that sent the tx pkt
+};
+
+#define MAX_USB_DRVR_NAME 10
+#define USB_DRVR_FRMT_STR "cpusb%d"
+
+struct cp_lkm_usb_base_dev
+{
+	struct list_head list;  	  //for inserting in global dev list
+	struct list_head cpdev_list;  //list of cpdevs cloned from this base dev
+	struct list_head in_bep_list;     // list of base in endpoints
+	struct list_head out_bep_list;     // list of base out endpoints
+	int data_in_bep_num;   //data in ep number
+	int data_out_bep_num;  //data out ep number
+
+	struct usb_driver* usb_driver;  		 
+	struct usb_device_id* usb_id_table;  
+	int vid;							   
+	int pid;							   
+	int intf_num;   					   
+	int alt_intf_num;
+	int usb_bus;
+	int usb_addr;
+	int feature_flags;
+	int base_id;                  //unique id of the first clone to plug
+	cp_lkm_usb_state_t base_state;   					
+
+	struct sk_buff_head in_q;     //recv skb's are stored here while down at usb waiting to be filled with recv data 
+	struct sk_buff_head out_q;    //send skb's are stored here while down at usb waiting to be transmitted
+	struct sk_buff_head ctrlq;    //ctrl skb's are stored here while down at usb waiting to be filled or transmitted
+	struct sk_buff_head data_tx_done;  //tx skb's are stored here while waiting to be freed
+	struct sk_buff_head data_rx_done;  //recv and ctrl skb's are stored here while waiting to have recv data processed
+	struct sk_buff_head other_done;    //sent skb's are stored here while waiting to be freed
+
+	u32 data_q_len; 				   // holds count of data pkts (both rx and tx) needing to be processed
+	spinlock_t data_q_lock; 		   // lock to keep data_q_len sync'd
+	spinlock_t processing_state_lock;
+	cp_lkm_usb_process_state_t processing_state;
+	spinlock_t other_state_lock;
+	cp_lkm_usb_process_state_t other_state;
+	bool scheduled;  //tasklet scheduled to process the pending
+
+	struct tasklet_struct other_process_tasklet;
+	struct tasklet_struct data_process_tasklet;
+
+	int rx_schedule_threshold;
+	int tx_schedule_threshold;
+	int tx_resume_threshold;
+
+	struct work_struct  kevent;
+	char usb_drvr_name[MAX_USB_DRVR_NAME];
+	void* wrapper_ctxt;
+	int wrapper_hdr_size;
+	int pm_hdr_size;
+	int pm_hdr_offset;
+
+	struct usb_interface* intf;
+	struct usb_device *udev;
+
+	int plug_result;
+	bool disconnect_wait;
+
+	struct timer_list rx_delay;  
+
+	int tx_usb_q_count;
+	bool tx_paused;
+
+	struct timer_list usb_pause_stuck_timer;  
+	int tx_proc_cnt;   //how many data tx pkts have we successfully sent
+	int tx_proc_cnt_at_pause; //how many data tx pkts we had sent when we paused
+
+	#if 0
+	//debug stuff, comment out
+	//unsigned int dbg_total_stuck_cnt;  
+	//unsigned int dbg_total_tx_at_stuck_cnt;  
+	//unsigned int dbg_total_tx_proc;
+	#endif
+};
+
+struct cp_lkm_usb_dev
+{
+	//init at open  	  
+	struct cp_lkm_usb_base_dev* cpbdev;
+	int unique_id;  	
+	int pm_id;  				 
+	int clone_num;
+	int mux_id;
+		  
+	cp_lkm_usb_state_t state;   					
+	struct list_head list;  	  //for inserting in base dev list
+
+	struct cp_lkm_edi* edi;
+
+	struct list_head in_ep_list;     //list of in endpoints on the dev
+	struct list_head out_ep_list;     //list of out endpoints on the dev
+	int data_in_ep_num;   //data in ep number
+	int data_out_ep_num;  //data out ep number
+
+	//for debug
+	#if 0
+	struct timer_list dbg_timer;
+	unsigned int dbg_total_rx_irq;
+	unsigned int dbg_total_tx_irq;
+	unsigned int dbg_total_rx_proc;
+	unsigned int dbg_total_d_done;
+	unsigned int dbg_total_o_done;
+	unsigned int dbg_total_pause;
+	unsigned int dbg_total_resume;
+	unsigned int dbg_total_max_work;
+	unsigned int dbg_total_timeout;
+	unsigned int dbg_total_budget;
+	unsigned int dbg_total_o_tasklet;
+	unsigned int dbg_total_d_resched;
+	unsigned int dbg_total_wq_sched;
+	unsigned int dbg_total_napi_sched;
+	unsigned int dbg_total_tasklet_sched;
+	unsigned int dbg_total_d_comp;
+	//unsigned int dbg_total_ic;
+	//unsigned int dbg_total_tc;
+	unsigned int dbg_total_rx_qlen;
+	unsigned int dbg_total_tx_qlen;
+	unsigned int dbg_total_num_hybrid_t;
+	unsigned int dbg_total_num_normal_t;
+	unsigned int dbg_total_num_hybrid;
+	unsigned int dbg_total_num_normal;
+	unsigned int dbg_total_num_d_timers;
+	unsigned int dbg_total_sch_sk;
+	#endif
+};
+
+struct cp_lkm_usb_ctx 
+{
+	struct cp_lkm_common_ctx common;
+	struct list_head dev_list;
+	spinlock_t  lock;    //used to protect access to dev_list from different instances. Also used to coordinate thread accesses from usb and cpmodem layers.
+						 //when one thread grabs the lock, no other threads can run (soft and hw IRQs can still run). The usb hub unplug handler runs on a thread.
+						 //this means if one thread grabs the lock it can be guaranteed the modem can unplug while it is doing its thing.
+};
+
+//static void cp_lkm_usb_dbg_memleak_timer (unsigned long param);
+//static void cp_lkm_usb_dbg_timer (unsigned long param);
+
+enum {
+	CP_LKM_STUCK_INIT = 0,
+	CP_LKM_STUCK_START,
+	CP_LKM_STUCK_STOP,
+	CP_LKM_STUCK_DEINIT
+};
+static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action);
+static void cp_lkm_usb_pause_stuck_timer(unsigned long param);
+
+static void cp_lkm_usb_delay_timer (unsigned long param);
+static void cp_lkm_usb_kevent (struct work_struct *work);
+static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx);
+static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx);
+static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
+static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
+
+static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb);
+static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb, int src, struct cp_lkm_ep* ep);
+static void cp_lkm_usb_xmit_complete (struct urb *urb);
+static int cp_lkm_usb_submit_recv (struct cp_lkm_usb_base_dev* cpbdev, struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data);
+static void cp_lkm_usb_recv_complete (struct urb *urb);
+
+static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
+static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb);
+static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
+
+static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci);
+static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep);
+
+static void cp_lkm_usb_process_other_done_tasklet (unsigned long param);
+static void cp_lkm_usb_process_data_done_tasklet (unsigned long param);
+static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpdev);
+static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev);
+static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work);
+static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_resume, bool have_lock);
+
+static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep);
+static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in);
+static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev);
+
+static struct cp_lkm_usb_ctx cp_lkm_usb_mgr;
+
+// Knobs we can tweak on a processor by processor basis to maximize performance
+// Dummy values filled in here so we don't get warning on using unitialized variables
+static int CP_LKM_PM_NAPI_WEIGHT = 0;       //budget we register with NAPI (max number of pkts it thinks we will process). 
+static int CP_LKM_USB_NAPI_MAX_WORK = 0;    //actual number of pkts we will process (we're not entirely honest with NAPI)
+static int CP_LKM_USB_MAX_RX_QLEN = 0;      //max number of rx data URBs we allow to flow in the shim (we alloc these)
+static int CP_LKM_USB_MAX_OTHER_QLEN = 0;    //max number of rx urbs on non-data endpoints
+static int CP_LKM_USB_TX_PAUSE_Q_PKTS = 0;     //max number of tx data URBs we allow to flow in the shim (alloc'd by network stack, we control this by pausing)
+static int CP_LKM_USB_TX_RESUME_Q_PKTS = 0;     //un-pause network at this number
+//static int CP_LKM_USB_TX_RESUME_Q_PKTS_HYBRID = 0;  //un-pause network at this number when in hybrid mode with pkt counting
+static int CP_LKM_USB_TX_SCHED_CNT = 0;      //How many done tx's we allow to accumulate before scheduling cleanup in normal mode
+//static int CP_LKM_USB_TX_SCHED_CNT_HYBRID = 0; //How many done tx's we allow to accumulate before scheduling cleanup in hybrid mode with pkt counting
+static int CP_LKM_USB_RX_SCHED_CNT = 0;      //How many done rx's we allow to accumulate before scheduling processing in normal mode
+//static int CP_LKM_USB_RX_SCHED_CNT_HYBRID = 0; //How many done rx's we allow to accumulate before scheduling processing in hybrid mode with pkt counting
+static int CP_LKM_USB_RESTOCK_MULTIPLE = 0;  //How many rx URBs we should restock as we process them (0 means don't restock as we go, 1 means every one, 2 means 1 out of every 2 etc)
+//static int CP_LKM_USB_DATA_MAX_PPS = 0;   //Packets per second that will cause us to transition from normal to hybrid mode when using pkt counting
+//static int 	CP_LKM_USB_DATA_MIN_PPS = 0;  //packets per second that will cause us to transition from hybrid back to normal when using pkt counting
+static int CP_LKM_USB_TASKLET_CNT = 0;    //in hybrid mode, schedule tasklet on cnts 0 to this number
+static int CP_LKM_USB_WORKQUEUE_CNT = 0;  //in hybrid mode, schedule workqueue on cnts CP_LKM_USB_TASKLET_CNT to this number, then start cnt over 
+static int CP_LKM_USB_PROCESS_DIVISOR = 0; //times to loop through the process loop, doing pkts/divisor pkts each time. Set to 1 to only process what was there when entering 
+//broadcom EHCI controller has issues we need to work around
+static int cp_lkm_is_broadcom = 0;
+
+#define CP_LKM_USB_PAUSED_CNT 5000
+
+//TODO remove
+#if 0
+static int g_dbg_data_skballoc_cnt = 0;
+static int g_dbg_other_skballoc_cnt = 0;
+static int g_dbg_ctrl_skballoc_cnt = 0;
+static int g_dbg_xmit_skballoc_cnt = 0;
+static int g_dbg_urballoc_cnt = 0;
+static int g_dbg_unplug_cnt = 0;
+static void cp_lkm_usb_urb_cnt(int inc)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dbg_state_lock, flags);
+	g_dbg_urballoc_cnt += inc;
+	spin_unlock_irqrestore(&dbg_state_lock, flags); //release lock so interrupts can resume firing
+}
+static void cp_lkm_usb_cnts(int state, int inc)
+{
+	#if 1
+	unsigned long flags;
+	spin_lock_irqsave(&dbg_state_lock, flags);
+
+	switch (state) {
+		case in_other_start:
+		case in_other_done:
+		case in_other_cleanup:
+			g_dbg_other_skballoc_cnt+=inc;
+			break;
+		case ctrl_start:
+		case ctrl_done:
+			g_dbg_ctrl_skballoc_cnt+=inc;
+			break;
+		case out_start:
+		case out_done:
+			g_dbg_xmit_skballoc_cnt+=inc;
+			break;
+		case in_data_start:
+		case in_data_done:
+		case in_data_cleanup:
+			g_dbg_data_skballoc_cnt+=inc;
+			break;
+		case unlink_start:
+			g_dbg_unplug_cnt+=inc;
+			break;
+		default:
+			printk("!!clean: unknown skb state: %d\n",state);
+			break;
+	}
+	spin_unlock_irqrestore(&dbg_state_lock, flags); 
+	#endif
+}
+#endif
+
+static struct cp_lkm_usb_dev* cp_lkm_usb_find_muxed_dev(struct cp_lkm_usb_base_dev* cpbdev, int mux_id)
+{
+	struct list_head *pos;
+	list_for_each(pos, &cpbdev->cpdev_list){
+		struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+		//printk("%s() cpdev: %p, cpdev->mux_id: %d\n", __FUNCTION__, cpdev, cpdev->mux_id);
+		if(cpdev->mux_id == mux_id) {
+			return cpdev;
+		}
+	}
+	return NULL;
+}
+
+static struct cp_lkm_usb_dev* cp_lkm_usb_find_dev(int uniqueid)
+{
+	struct list_head *bpos;
+	struct list_head *pos;
+	list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
+		struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
+		list_for_each(pos, &cpbdev->cpdev_list){
+			struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+			if(cpdev->unique_id == uniqueid) {
+				return cpdev;
+			}
+		}
+	}
+	return NULL;
+}
+
+#define CP_LKM_DEV_MATCH_ALL 1
+#define CP_LKM_DEV_MATCH_BUS_ADDR_ONLY 2
+
+// Find base device from its bus, addr and unique id
+static struct cp_lkm_usb_base_dev* cp_lkm_usb_find_base_dev(int bus, int addr, int unique_id, int match)
+{
+	struct list_head *pos;
+	struct list_head *bpos;
+	list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
+		struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
+		if(cpbdev->usb_bus == bus && cpbdev->usb_addr == addr) {
+			if (match == CP_LKM_DEV_MATCH_BUS_ADDR_ONLY) {
+				return cpbdev;
+			}
+			if (cpbdev->base_id == unique_id) {
+				//matches the base_id so don't need to look further
+				return cpbdev;
+			}
+			//look to see if matches the unique_id of one of the cpdevs (only hit this case when running clones)
+			list_for_each(pos, &cpbdev->cpdev_list){
+				struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+				if (cpdev->unique_id == unique_id) {
+					return cpbdev;
+				}
+			}
+		}
+	}
+	return NULL;
+}
+
+/*
+static struct cp_lkm_usb_dev* cp_lkm_usb_get_head_dev(void)
+{
+	struct list_head *bpos;
+	struct list_head *pos;
+	list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
+		struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
+		list_for_each(pos, &cpbdev->cpdev_list){
+			struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+			return cpdev;
+		}
+	}
+	return NULL;
+}
+*/
+
+// pause or unpause all cpdevs associated with this cpbdev
+static void cp_lkm_usb_dev_pause(struct cp_lkm_usb_base_dev* cpbdev, bool pause)
+{
+	struct list_head *pos;
+
+	list_for_each(pos, &cpbdev->cpdev_list){
+		struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+		if (pause) {
+			if(cpdev->edi->pm_send_pause) {
+				cpdev->edi->pm_send_pause(cpdev->edi->pm_recv_ctx);
+				//cpdev->dbg_total_pause++;
+			}
+		}
+		else{
+			if (cpdev->edi->pm_send_resume) {
+				//cpdev->dbg_total_resume++;
+				cpdev->edi->pm_send_resume(cpdev->edi->pm_recv_ctx);
+			}
+		}
+	}
+	cpbdev->tx_paused = pause;
+}
+
+static void cp_lkm_usb_clean_list(struct sk_buff_head* list)
+{
+	struct sk_buff *skb;
+	struct skb_data *entry;
+
+	while((skb = skb_dequeue(list)) != NULL){
+		DEBUG_TRACE("%s() found a straggler", __FUNCTION__);
+		entry = (struct skb_data *) skb->cb;
+		if(entry->urb) {
+			//cp_lkm_usb_urb_cnt(-1);
+			usb_free_urb (entry->urb);
+		}
+		//cp_lkm_usb_cnts(entry->state, -1);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void cp_lkm_usb_mark_as_dead(struct cp_lkm_usb_dev* cpdev)
+{
+	cpdev->edi->usb_send_ctx = NULL;
+	if(cpdev->state != CP_LKM_USB_DEAD) {
+		LOG("Device with id:%d unplugged", cpdev->unique_id);
+	}
+	cpdev->state = CP_LKM_USB_DEAD;
+}
+
+static void cp_lkm_usb_mark_base_as_dead(struct cp_lkm_usb_base_dev* cpbdev)
+{
+	cpbdev->base_state = CP_LKM_USB_DEAD;
+}
+
+static struct cp_lkm_base_ep* cp_lkm_usb_get_bep(struct cp_lkm_usb_base_dev* cpbdev, int ep_num)
+{
+	struct cp_lkm_base_ep* bep = NULL;
+	struct list_head *entry, *nxt, *head;
+
+	if(USB_DIR_IN & ep_num) {
+		//printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
+		head = &cpbdev->in_bep_list;
+	}
+	else{
+		//printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
+		head = &cpbdev->out_bep_list;
+	}
+
+	list_for_each_safe(entry, nxt, head) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		if (bep->ep_num == ep_num) {
+			//printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
+			return bep;
+		}
+	}
+	//printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
+
+	return NULL;
+}
+
+static struct cp_lkm_ep* cp_lkm_usb_get_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
+{
+	struct cp_lkm_ep* ep = NULL;
+	struct list_head *entry, *nxt, *head;
+
+	if(USB_DIR_IN & ep_num) {
+		//printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
+		head = &cpdev->in_ep_list;
+	}
+	else{
+		//printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
+		head = &cpdev->out_ep_list;
+	}
+
+	list_for_each_safe(entry, nxt, head) {
+		ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
+		if (ep->ep_num == ep_num) {
+			//printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
+			return ep;
+		}
+	}
+	//printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
+
+	return NULL;
+}
+
+static void cp_lkm_usb_bep_finalize(void *arg)
+{
+	struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)arg;
+	struct list_head *entry, *nxt;
+	struct cp_lkm_ep *ep;
+
+	//printk("%s() start\n", __FUNCTION__);
+	//todo remove
+	//del_timer_sync(&cpdev->dbg_timer);
+
+	//printk("%s() - free eps\n",__FUNCTION__);
+	list_for_each_safe(entry, nxt, &bep->eps) {
+		ep = list_entry(entry, struct cp_lkm_ep, list_bep);
+		//printk("%s() - free ep: %p from bep: %p\n",__FUNCTION__,ep,bep);
+		list_del(&ep->list_bep);
+		memref_deref(ep);
+	}
+
+}
+
+static void cp_lkm_usb_ep_finalize(void *arg)
+{
+	//struct cp_lkm_ep* ep = (struct cp_lkm_ep*)arg;
+	//printk("%s() - free ep: %p, ep_num: 0x%x\n",__FUNCTION__,arg ,ep->ep_num);
+}
+
+static struct cp_lkm_ep* cp_lkm_usb_create_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
+{
+	struct cp_lkm_ep* ep;
+	struct cp_lkm_base_ep* bep;
+	struct cp_lkm_usb_base_dev* cpbdev;
+
+	DEBUG_ASSERT(cpdev, "cpdev is null");
+	cpbdev = cpdev->cpbdev;
+	DEBUG_ASSERT(cpbdev, "cpbdev is null");
+
+	//see if already exists first
+	ep = cp_lkm_usb_get_ep(cpdev, ep_num);
+	if(ep) {
+		DEBUG_TRACE("%s() ep: %p already exists", __FUNCTION__, ep);
+		//printk("%s() ep: 0x%x already exists\n", __FUNCTION__, ep_num);
+		return ep;
+	}
+	//printk("%s() - create new ep, cpdev: %p, ep_num: 0x%x\n",__FUNCTION__,cpdev, ep_num);
+
+	//Need to create new ep and possibly a new bep. We will alloc and init everything first and
+	//then if that all works, we will put everything in its proper place (in lists and stuff)
+	ep = memref_alloc_and_zero(sizeof(struct cp_lkm_ep), cp_lkm_usb_ep_finalize);
+	if(!ep) {
+		DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
+		return NULL;
+	}
+	INIT_LIST_HEAD(&ep->list_bep);
+	INIT_LIST_HEAD(&ep->list_cpdev);
+	ep->ep_num = ep_num;
+
+	//may need to create a new base ep if this is the first time we've seen this endpoint number and direction
+	//this is always the case for non-cloned interfaces
+	bep = cp_lkm_usb_get_bep(cpbdev, ep_num);
+	if (!bep) {
+		bep = memref_alloc_and_zero(sizeof(struct cp_lkm_base_ep), cp_lkm_usb_bep_finalize);
+		if(!bep) {
+			DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
+			memref_deref(ep);
+			return NULL;
+		}
+		//printk("%s() - create new bep: %p, cpbdev: %p, ep_num: 0x%x\n",__FUNCTION__,bep, cpbdev, ep_num);
+		bep->ep_num = ep_num;
+		bep->cpbdev = cpbdev;
+		INIT_LIST_HEAD(&bep->list);
+		INIT_LIST_HEAD(&bep->eps);
+		if(USB_DIR_IN & ep_num) {
+			list_add_tail(&bep->list, &cpbdev->in_bep_list);
+		}
+		else{
+			list_add_tail(&bep->list, &cpbdev->out_bep_list);
+		}
+	}
+
+	//if we get here, everything alloc'd ok, so can insert in lists and stuf
+
+	// Each ep will have two memrefs, one from the alloc which is for entry in the cpdev list
+	// and another for entry into the bep list. This way the ep won't be freed until it is removed 
+	// from both lists at unplug time
+	ep->cpdev = cpdev;
+	ep->bep = bep;
+	if(USB_DIR_IN & ep_num) {
+		list_add_tail(&ep->list_cpdev, &cpdev->in_ep_list);
+	}
+	else{
+		list_add_tail(&ep->list_cpdev, &cpdev->out_ep_list);
+	}
+	memref_ref(ep);
+	list_add_tail(&ep->list_bep, &bep->eps);
+	return ep;
+
+}
+
+// cp_lkm_usb_plug_intf is called by cpusb via the ioctl. It registers a driver for the interface.
+// This function is then called by the lower usb layer so we can claim that interface. 
+int cp_lkm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+	struct cp_lkm_usb_base_dev* cpbdev;
+	struct usb_device* udev;
+	struct usb_host_interface* interface;
+	int unique_id;
+	//unsigned long flags;
+	int rc;
+	uintptr_t tmp_uid;
+
+	usb_get_intf(intf);
+
+	//printk("%s()\n",__FUNCTION__);
+
+	udev = interface_to_usbdev (intf);
+	interface = intf->cur_altsetting;
+
+	unique_id = (int)id->driver_info;
+	tmp_uid = unique_id;
+	spin_lock(&cp_lkm_usb_mgr.lock);
+
+	// Error scenario to watch for here:
+	//   1. Device unplugs and replugs before the upper app detects the unplug and calls our unplug_intf. In
+	//  	this case this driver is still registered and will get the new probe (we don't want this, we want the app driver
+	//  	to get the plug and claim the device orginally).  When disconnect happens we set the state to DEAD. If we get
+	//  	a probe on a dead device, don't take it.
+	cpbdev = cp_lkm_usb_find_base_dev(udev->bus->busnum, udev->devnum, unique_id, CP_LKM_DEV_MATCH_ALL);
+	if(!cpbdev || cpbdev->base_state == CP_LKM_USB_DEAD) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+
+		DEBUG_TRACE("%s() no cpdev or already dead", __FUNCTION__);
+		return -ENXIO;
+	}
+
+	//make sure it is for our device (match the usb addresses)
+	//printk("%s() id: %d ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!\n", __FUNCTION__, unique_id,
+	 //		 cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
+	if(cpbdev->usb_bus != udev->bus->busnum || cpbdev->usb_addr != udev->devnum || cpbdev->intf_num != interface->desc.bInterfaceNumber) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+
+		DEBUG_TRACE("%s() reject ourbus: %d, probebus: %d, ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!", __FUNCTION__,
+				   cpbdev->usb_bus, udev->bus->busnum, cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
+		return -ENXIO;
+	}
+	cpbdev->intf = intf;
+	cpbdev->udev = udev;
+
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	if(cpbdev->alt_intf_num) {
+		rc = usb_set_interface(udev, cpbdev->intf_num, cpbdev->alt_intf_num);
+		if(rc) {
+			DEBUG_ERROR("%s() set intf failed :%d", __FUNCTION__,rc);
+			cpbdev->plug_result = -1; //only set this on failure, not reject
+			return -1;
+		}
+	}
+
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	cpbdev->base_state = CP_LKM_USB_CTRL;
+
+	usb_set_intfdata(intf, (void*)tmp_uid);
+	usb_get_dev (udev);
+	memref_ref(cpbdev);
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_INIT);
+
+	//throughput control stuff
+	cpbdev->rx_schedule_threshold = CP_LKM_USB_RX_SCHED_CNT;
+	cpbdev->tx_schedule_threshold = CP_LKM_USB_TX_SCHED_CNT;
+	cpbdev->tx_resume_threshold = CP_LKM_USB_TX_RESUME_Q_PKTS;
+
+	
+	//todo remove
+	//if (!dbg_memleak_timer_started) {
+	//	dbg_memleak_timer_started = 1;
+	//	dbg_memleak_timer.function = cp_lkm_usb_dbg_memleak_timer;
+	//	dbg_memleak_timer.data = 0;
+
+	//	init_timer(&dbg_memleak_timer);
+	//	mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(20000));
+	//}
+	//if (dbg_state_init == 0) {
+	//	spin_lock_init(&dbg_state_lock);
+	//	dbg_state_init = 1;
+	//}
+
+
+
+	DEBUG_TRACE("%s() probe done", __FUNCTION__);
+	return 0;
+}
+
+static bool cp_lkm_usb_shuter_down_do_pm_unlink(void* ctx1, void* ctx2)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
+	struct cp_lkm_usb_dev* cpdev;
+	struct list_head *pos;
+	unsigned long flags;
+	//Unlink from the pm and disable the data state machine 
+	bool done = false;
+	spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+	if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
+		cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //data soft interrupt handlers now won't run
+
+		spin_lock(&cpbdev->data_q_lock);
+		cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT;
+		spin_unlock(&cpbdev->data_q_lock);  //usb hw interrupts now won't schedule soft interrupt handlers
+
+		spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags); //release lock so interrupts can resume firing
+		//unlink the pm side for all cpdevs associated with this cpbdev. Once this returns we are guaranteed not to get any new xmit skb's from the pm
+		list_for_each(pos, &cpbdev->cpdev_list){
+			cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+			LOG("Unlink cpdev: %p from pm", cpdev);
+			cp_lkm_pm_usb_link(cpdev->edi, cpdev->pm_id, 0);
+			cpdev->edi->usb_send_ctx = NULL;
+		}
+
+		spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+		done = true;
+	}
+	spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
+	return done;
+}
+
+static bool cp_lkm_usb_shuter_down_do_other_tasklet(void* ctx1, void* ctx2)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
+	unsigned long flags;
+	bool done = false;
+	spin_lock_irqsave(&cpbdev->other_state_lock, flags);
+	if(cpbdev->other_state == USB_PROCESS_STATE_IDLE){
+		cpbdev->other_state = USB_PROCESS_STATE_PAUSED;
+		done = true;
+	}
+	spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
+	return done;
+}
+
+static bool cp_lkm_usb_shuter_down_do_empty_queues(void* ctx1, void* ctx2)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
+	bool done = false;
+
+	if (skb_queue_empty(&cpbdev->in_q) &&
+		skb_queue_empty(&cpbdev->out_q) &&
+		skb_queue_empty(&cpbdev->ctrlq)){
+		done = true;
+	}
+	return done;
+}
+
+static void cp_lkm_usb_shuter_down(struct cp_lkm_usb_base_dev* cpbdev)
+{
+	struct list_head *entry, *nxt;
+	struct cp_lkm_base_ep *bep;
+
+
+	//printk("%s() done\n", __FUNCTION__);
+
+	//Unlink from the pm and disable the data state machine 
+	LOG("Unlink cpdev from pm");
+	cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_pm_unlink, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to unlink pm from cpdev");
+
+	//disable the 'other' tasklet
+	LOG("Disable cpdev other tasklet");
+	cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_other_tasklet, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to shutdown cpdev other tasklet");
+	
+	//Once we get here no xmits can happen or any recv or xmit done processing can happen so no new kevents can be scheduled
+	//so we can stop them here
+	//clear all the flags before flushing the kevents so that we won't try to do anything during the kevent callback
+	list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		bep->err_flags = 0;
+		bep->con_flags = 0;
+	}
+	list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		bep->err_flags = 0;
+		bep->con_flags = 0;
+	}
+
+	//This forces the kernel to run all scheduled kevents, so any of our pending ones will run. (Note: Make sure 
+	//our kevent handlers check to see if we are attached before doing anything so that we don't schedule anything new while
+	//shutting down) 
+	LOG("Cancel cpdev kevents");
+	cancel_work_sync(&cpbdev->kevent);
+
+	//Make sure all the urbs have been cancelled
+	// ensure there are no more active urbs
+	//set_current_state(TASK_UNINTERRUPTIBLE);
+	//these cause the urbs to be cancelled and the callbacks to be called. The urbs are removed from
+	//the queues in the callbacks.
+	cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, NULL);
+	cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, NULL);
+	cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->ctrlq, NULL);
+
+	LOG("Wait for all cpdev urbs to be returned");
+	cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_empty_queues, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to empty cpdev queues");
+
+	//shutdown timer and tasklets
+	LOG("Shutdown cpdev timers and tasklets");
+	del_timer_sync (&cpbdev->rx_delay);
+	cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_DEINIT);
+
+	tasklet_kill(&cpbdev->data_process_tasklet);
+	tasklet_kill(&cpbdev->other_process_tasklet);
+
+	// All outstanding transfers are back, so now we can clean up.
+	cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
+	cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
+	cp_lkm_usb_clean_list(&cpbdev->other_done);
+
+	//printk("%s() done\n", __FUNCTION__);
+	usb_set_intfdata(cpbdev->intf, NULL);
+	usb_put_intf(cpbdev->intf);
+	cpbdev->intf = NULL;
+	LOG("cpdev unplug done");
+
+	return;
+
+}
+
+// Called when the USB hub detects that our device just unplugged.
+// Called in a thread context. We do the lower usb cleanup here because there
+// are some things that have to be done before exiting from disconnect. 
+// We don't clean up the upper layer stuff because the upper layer doesn't yet know
+// we are unplugged and will continue to send us data. When the upper layer gets the 
+// unplug notify, it will call cp_lkm_usb_unplug_intf. We finish cleaning up in there.
+void cp_lkm_usb_disconnect(struct usb_interface *intf)
+{
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	//unsigned long flags;
+	int unique_id;
+
+	// We don't want this function to run at the same time as any of the calls from the modem common stack (ioctl and write)
+	// They all grab this lock for the duration of their calls. They also check the state of the device before proceeding.
+	// Once we have the lock, we know none of them are running. Any new calls will block waiting on the lock. 
+	// If we then change the state to dead we can release the lock while we do the rest of cleanup. When they get the lock
+	// they will see the state is dead and error out and return immediately. This prevents us from blocking the common modem thread.
+	spin_lock(&cp_lkm_usb_mgr.lock);
+
+	//If cpdev is not in intf, then this is the close->disconnect path, so do nothing
+	unique_id = (uintptr_t)usb_get_intfdata(intf);
+
+	//struct usb_device *udev;
+	//printk("%s() start, id: %d\n", __FUNCTION__, unique_id);
+
+	//see if device already went away, this should be impossible
+	//the unique id is always for the first instance if running clones
+	cpdev = cp_lkm_usb_find_dev(unique_id);
+	if(!cpdev) {
+		//printk("%s() no cpdev, id: %d\n", __FUNCTION__, unique_id);
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		return;
+	}
+	cpbdev = cpdev->cpbdev;
+	cpbdev->disconnect_wait = true;
+
+	// Mark the device as dead so we won't start anything new. 
+	// NOTE: make sure nothing new can be started on the USB side from this point on.
+	//  	 This includes transmits from the network. Transmits from cpusb.
+	//  	 Recv packets, halt clears, ioctls etc
+	cp_lkm_usb_mark_base_as_dead(cpbdev);
+
+	// Once device is marked dead, we can release the semaphore. This is so write and ioctl from the modem stack
+	// can return quickly with errors instead of blocking while the disconnect completes. 
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	cp_lkm_usb_shuter_down(cpbdev);
+
+	cpbdev->disconnect_wait = false;
+	memref_deref(cpbdev);
+
+	//printk("%s() done id: %d\n", __FUNCTION__,unique_id);
+}
+
+static void cp_lkm_usb_base_dev_finalize(void *arg)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)arg;
+	struct list_head *entry, *nxt;
+	struct cp_lkm_base_ep *bep;
+	//int unique_id = cpbdev->base_id;
+	//printk("%s()\n", __FUNCTION__);
+
+	//if was added to the list, need to remove it.
+	if(cpbdev->list.next != &cpbdev->list) {
+		spin_lock(&cp_lkm_usb_mgr.lock);
+		list_del(&cpbdev->list);
+		//printk("%s() free cpbdev from global list \n", __FUNCTION__);
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+	}
+
+	//These should already be empty, but just in case
+	//printk("%s() clean lists\n", __FUNCTION__);
+	cp_lkm_usb_clean_list(&cpbdev->in_q);
+	cp_lkm_usb_clean_list(&cpbdev->out_q);
+	cp_lkm_usb_clean_list(&cpbdev->ctrlq);
+	cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
+	cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
+	cp_lkm_usb_clean_list(&cpbdev->other_done);
+
+	if(cpbdev->wrapper_ctxt) {
+		//printk("%s() free wrapper\n", __FUNCTION__);
+		cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
+		cpbdev->wrapper_ctxt = NULL;
+	}
+	if(cpbdev->usb_driver) {
+		//printk("%s() free driver\n", __FUNCTION__);
+		kfree(cpbdev->usb_driver);
+		cpbdev->usb_driver = NULL;
+	}
+	if(cpbdev->usb_id_table) {
+		//printk("%s() free id table\n", __FUNCTION__);
+		kfree(cpbdev->usb_id_table);
+		cpbdev->usb_id_table = NULL;
+	}
+	if(cpbdev->udev) {
+		//printk("%s() free udev\n", __FUNCTION__);
+		usb_put_dev (cpbdev->udev);
+		cpbdev->udev = NULL;
+	}
+
+	//printk("%s() - free eps\n",__FUNCTION__);
+	list_for_each_safe(entry, nxt, &cpbdev->cpdev_list) {
+		struct cp_lkm_usb_dev* cpdev = list_entry(entry, struct cp_lkm_usb_dev, list);
+		//printk("%s() - free cpdev: %p from cpbdev: %p\n",__FUNCTION__, cpdev, cpbdev);
+		list_del(&cpdev->list);
+		memref_deref(cpdev);
+	}
+	list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		//printk("%s() - free in bep: %p from cpbdev: %p\n",__FUNCTION__,bep, cpbdev);
+		list_del(&bep->list);
+		memref_deref(bep);
+	}
+	list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		//printk("%s() - free out bep: %p from cpbdev: %p\n ",__FUNCTION__,bep, cpbdev);
+		list_del(&bep->list);
+		memref_deref(bep);
+	}
+	//printk("%s() done base_id: %d\n", __FUNCTION__,unique_id);
+
+}
+
+static void cp_lkm_usb_dev_finalize(void *arg)
+{
+	struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)arg;
+	struct list_head *entry, *nxt;
+	struct cp_lkm_ep *ep;
+
+	//printk("%s() start\n", __FUNCTION__);
+	//todo remove
+	//del_timer_sync(&cpdev->dbg_timer);
+
+	//printk("%s() - free eps\n",__FUNCTION__);
+	list_for_each_safe(entry, nxt, &cpdev->in_ep_list) {
+		ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
+		//printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
+		list_del(&ep->list_cpdev);
+		memref_deref(ep);
+	}
+	list_for_each_safe(entry, nxt, &cpdev->out_ep_list) {
+		ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
+		//printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
+		list_del(&ep->list_cpdev);
+		memref_deref(ep);
+	}
+
+	if(cpdev->edi) {
+		//printk("%s() free edi\n", __FUNCTION__);
+		cpdev->edi->usb_send_ctx = NULL;
+		cpdev->edi->usb_send = NULL;
+
+		memref_deref(cpdev->edi);
+		cpdev->edi = NULL;
+	}
+
+	//printk("%s() end \n", __FUNCTION__);
+}
+
+static int cp_lkm_usb_plug_intf(struct cp_lkm_usb_plug_intf* pi)
+{
+	int retval;
+	struct cp_lkm_usb_dev* cpdev = NULL;
+	struct cp_lkm_usb_base_dev* cpbdev = NULL;
+	bool need_new;
+	bool is_cloneable;
+
+	//Make sure we aren't going to overflow the skb space reserved for us to use
+	//DEBUG_ASSERT(sizeof(struct skb_data) < sizeof(((struct sk_buff*)0)->cb));
+	//DEBUG_INFO("%s(), skb_data size: %d, skb_buff cb size: %d",__FUNCTION__,sizeof(struct skb_data),sizeof(((struct sk_buff*)0)->cb));
+
+	// We need to alloc a new cpbdev on plug if:
+	// 1. The device is not cloned at this layer (thus each plug has its own cpbdev) 
+	//    Note: Some devices are cloned at other layers (cpusb_linux.c), so they can be running as clones in the system, but not at this layer.
+	//          This is why we can't just look at the clone_num to determine.
+	// 2. It is cloneable and clone_num is 0 (only the first clone gets a new cpbdev, the rest share it)
+	is_cloneable = pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF;
+	need_new = !is_cloneable || (is_cloneable && pi->clone_num == 0);
+
+	//printk("%s() start id:%d vid/pid: 0x%x/0x%x, bus/addr: %d/%d, intf: %d, flags: 0x%x, clone: %d, mux: %d\n", __FUNCTION__, pi->unique_id, pi->vid, pi->pid, pi->bus, pi->addr, pi->intf_num, pi->feature_flags, pi->clone_num, pi->mux_id);
+
+	if (need_new) {
+		//first instance, so need a new cpbdev
+		cpbdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_base_dev), cp_lkm_usb_base_dev_finalize);
+		if(!cpbdev) {
+			//printk("%s() failed to alloc cpbdev\n", __FUNCTION__);
+			goto init_fail;
+		}
+		//printk("%s() id: %d, alloc'd new cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
+		cpbdev->base_state = CP_LKM_USB_INIT;
+		cpbdev->vid = pi->vid;   							
+		cpbdev->pid = pi->pid;   							
+		cpbdev->intf_num = pi->intf_num;
+		cpbdev->alt_intf_num = pi->alt_intf_num;    
+		cpbdev->usb_bus = pi->bus;   				   
+		cpbdev->usb_addr = pi->addr;
+		cpbdev->feature_flags = pi->feature_flags;   				
+		cpbdev->base_id = pi->unique_id; 
+		INIT_LIST_HEAD(&cpbdev->in_bep_list);
+		INIT_LIST_HEAD(&cpbdev->out_bep_list);
+		INIT_LIST_HEAD(&cpbdev->list);
+		INIT_LIST_HEAD(&cpbdev->cpdev_list);
+		cpbdev->data_in_bep_num = pi->ep_in;
+		cpbdev->data_out_bep_num = pi->ep_out;
+
+		//alloc and register the usb driver
+		cpbdev->usb_driver = kzalloc(sizeof(struct usb_driver), GFP_KERNEL);
+		if(!cpbdev->usb_driver) {
+			//printk("%s() failed to alloc driver\n", __FUNCTION__);
+			goto init_fail;
+		}
+
+		cpbdev->usb_id_table = kzalloc(sizeof(struct usb_device_id)*2, GFP_KERNEL);
+		if(!cpbdev->usb_id_table) {
+			//printk("%s() failed to alloc table\n", __FUNCTION__);
+			goto init_fail;
+		}
+
+		cpbdev->usb_id_table[0].idVendor = cpbdev->vid;  
+		cpbdev->usb_id_table[0].idProduct = cpbdev->pid;
+		cpbdev->usb_id_table[0].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
+		cpbdev->usb_id_table[0].driver_info = (unsigned long)pi->unique_id;
+
+		//create unique drvr string
+		sprintf(cpbdev->usb_drvr_name, USB_DRVR_FRMT_STR, pi->unique_id); 
+		cpbdev->usb_driver->name = cpbdev->usb_drvr_name;
+		cpbdev->usb_driver->probe = cp_lkm_usb_probe;
+		cpbdev->usb_driver->disconnect = cp_lkm_usb_disconnect;
+		cpbdev->usb_driver->id_table = cpbdev->usb_id_table;
+
+
+		skb_queue_head_init (&cpbdev->in_q);
+		skb_queue_head_init (&cpbdev->out_q);
+		skb_queue_head_init (&cpbdev->ctrlq);
+		skb_queue_head_init (&cpbdev->data_tx_done);
+		skb_queue_head_init (&cpbdev->data_rx_done);
+		skb_queue_head_init (&cpbdev->other_done);
+		cpbdev->data_q_len = 0;
+		spin_lock_init(&cpbdev->data_q_lock);
+		spin_lock_init(&cpbdev->processing_state_lock);
+		spin_lock_init(&cpbdev->other_state_lock);
+		cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
+		cpbdev->other_state = USB_PROCESS_STATE_IDLE;
+		INIT_WORK(&cpbdev->kevent, cp_lkm_usb_kevent);
+
+		cpbdev->rx_delay.function = cp_lkm_usb_delay_timer; //TODO: this needs to handle the cpdev or cpbdev??
+		cpbdev->rx_delay.data = (unsigned long) cpbdev; //????? should this be cpdev??
+		init_timer (&cpbdev->rx_delay);
+
+		cpbdev->data_process_tasklet.func = cp_lkm_usb_process_data_done_tasklet; //TODO: modify to take cpbdev
+		cpbdev->data_process_tasklet.data = (unsigned long) cpbdev;
+
+		cpbdev->other_process_tasklet.func = cp_lkm_usb_process_other_done_tasklet; //TODO: modify to take cpbdev
+		cpbdev->other_process_tasklet.data = (unsigned long) cpbdev;
+
+		cpbdev->disconnect_wait = false;
+
+		spin_lock(&cp_lkm_usb_mgr.lock);
+		list_add_tail(&cpbdev->list, &cp_lkm_usb_mgr.dev_list);
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+
+		// When we call register, it calls our probe function with all available matching interfaces. In probe
+		// we save the result of the probe so we can return fail here if it didn't go well
+		//printk("%s() reg drvr for vid:%x, pid:%x, addr:%d, intf:%d\n", __FUNCTION__, pi->vid,pi->pid,pi->addr,pi->intf_num);
+		retval = usb_register(cpbdev->usb_driver);
+		if(retval || cpbdev->plug_result != 0) {
+			//printk("%s() failed to register driver or probe failed retval:%d, plug_result:%d\n", __FUNCTION__, retval, cpbdev->plug_result);
+			goto init_fail;
+		}
+		cpbdev->base_state = CP_LKM_USB_CTRL;
+		DEBUG_TRACE("%s() done", __FUNCTION__);	
+	}
+	else{
+		//clone, should already have a base dev
+		cpbdev = cp_lkm_usb_find_base_dev(pi->bus, pi->addr, pi->unique_id, CP_LKM_DEV_MATCH_BUS_ADDR_ONLY);
+		if(!cpbdev) {
+			//printk("%s() failed to find cpbdev\n", __FUNCTION__);
+			goto init_fail;
+		}
+		//printk("%s() id: %d, already have cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
+	}
+
+	// make sure base dev has all the feature flags of every clone
+	cpbdev->feature_flags |= pi->feature_flags;
+
+	//printk("%s() id: %d, cpbdev: %p, alloc new cpdev\n", __FUNCTION__, pi->unique_id, cpbdev);
+	cpdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_dev), cp_lkm_usb_dev_finalize);
+	if(!cpdev) {
+		//printk("%s() failed to alloc cpdev\n", __FUNCTION__);
+		goto init_fail;
+	}
+	//printk("%s() id: %d, cpdev: %p\n", __FUNCTION__, pi->unique_id, cpdev);
+
+	INIT_LIST_HEAD(&cpdev->in_ep_list);
+	INIT_LIST_HEAD(&cpdev->out_ep_list);
+	INIT_LIST_HEAD(&cpdev->list);
+	//add to list right away so if anything below fails, it will be cleaned up when cpbdev is cleaned up
+	list_add_tail(&cpdev->list, &cpbdev->cpdev_list);
+	cpdev->cpbdev = cpbdev;
+	cpdev->unique_id = pi->unique_id;  
+	//clone and mux are only used with muxed clone interfaces. 
+	cpdev->clone_num = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->clone_num : 0;
+	cpdev->mux_id = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->mux_id : CP_LKM_WRAPPER_DEFAULT_ID;
+	//printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, pi->unique_id, pi->clone_num, cpdev->mux_id);
+	cpdev->data_in_ep_num = pi->ep_in;
+	cpdev->data_out_ep_num = pi->ep_out;
+	//pre-create the data endpoints so they will be first in the list, since they are most often used
+	cp_lkm_usb_create_ep(cpdev, pi->ep_in);
+	cp_lkm_usb_create_ep(cpdev, pi->ep_out);
+	cpdev->edi = memref_alloc_and_zero(sizeof(struct cp_lkm_edi), NULL);
+	if(!cpdev->edi) {
+		//printk("%s() failed to alloc edi\n", __FUNCTION__);
+		goto init_fail;
+	}
+	cpdev->edi->usb_send = cp_lkm_usb_start_xmit;
+
+	//for debug, comment out before checkin
+	//cpdev->dbg_timer.function = cp_lkm_usb_dbg_timer;
+	//cpdev->dbg_timer.data = (unsigned long)cpdev;
+	//init_timer(&cpdev->dbg_timer);
+	//mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(10000));
+
+	//TODO CA: I think this shouldn't be set until open, commenting out for now to see if blows chow in plug fest
+	//cpdev->edi->usb_send_ctx = cpdev;
+
+	cpdev->state = CP_LKM_USB_CTRL;
+
+	//printk("%s() done success id: %d\n", __FUNCTION__, pi->unique_id);
+
+	return 0;
+
+init_fail:
+	if(cpbdev) {
+		//the finalizer for cpbdev does the clean up
+		memref_deref(cpbdev);
+	}
+	//returning an error to the modem stack on plug will cause it to hard reset
+	//the modem, thus causing the rest of the driver cleanup to occur
+	//printk("%s() open_intf fail\n", __FUNCTION__);
+	return -1;
+}
+
+static int cp_lkm_usb_set_wrapper(struct cp_lkm_usb_set_wrapper* sw)
+{	//unsigned long flags;
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	void* wrapper_info = NULL;
+	unsigned long not_copied;
+	int res = 0;
+	//printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, sw->unique_id, sw->clone_num, sw->mux_id);
+
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	cpdev = cp_lkm_usb_find_dev(sw->unique_id);
+
+	if(!cpdev) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() no cpdev found for id: %d\n", __FUNCTION__, sw->unique_id);
+		return -1;
+	}
+	cpbdev = cpdev->cpbdev;
+	if(cpbdev->base_state == CP_LKM_USB_DEAD){
+		//modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() set_wrapper fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev->state);
+		return 0;
+	}
+
+// benk - what if wrapper_info_len is 0???
+	if(cpbdev->wrapper_ctxt){
+		//already have a wrapper so free it
+		cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
+	}
+
+	if(sw->wrapper_info_len) {
+		wrapper_info = kzalloc(sw->wrapper_info_len, GFP_KERNEL);
+		if(!wrapper_info) {
+			DEBUG_ERROR("%s() couldn't alloc wrapper info", __FUNCTION__);
+			res = -1;
+			goto set_wrapper_done;
+		}
+	}
+
+
+	//copy the wrapper info from user to kernel space
+	not_copied = copy_from_user(wrapper_info, sw->wrapper_info, sw->wrapper_info_len);
+	if (not_copied) {
+		DEBUG_ERROR("%s() couldn't copy wrapper info", __FUNCTION__);
+		res = -1;
+		goto set_wrapper_done;
+	}
+	//alloc the wrapper instance. On success it takes ownership of the wrapper_info and is responsible for freeing it
+	DEBUG_INFO("%s() wrapper: %d", __FUNCTION__, sw->wrapper);
+	cpbdev->wrapper_ctxt = cp_lkm_wrapper_instance_alloc(sw->wrapper, wrapper_info, sw->wrapper_info_len);
+	if(!cpbdev->wrapper_ctxt){
+		DEBUG_ERROR("%s() couldn't alloc wrapper", __FUNCTION__);
+		res = -1;
+		goto set_wrapper_done;
+	}
+	cpbdev->wrapper_hdr_size = cp_lkm_wrapper_hdr_size(cpbdev->wrapper_ctxt);
+	cp_lkm_wrapper_set_state(cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
+
+	cpdev->clone_num = sw->clone_num;
+	cpdev->mux_id = sw->mux_id;
+
+
+set_wrapper_done:
+	if(wrapper_info) {
+		kfree(wrapper_info);
+	}
+
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+	return res;
+
+}
+
+static int cp_lkm_usb_set_mux_id(struct cp_lkm_usb_set_mux_id* smi)
+{	//unsigned long flags;
+	struct cp_lkm_usb_dev* cpdev;
+	//struct cp_lkm_usb_base_dev* cpbdev;
+	int res = 0;
+
+	//printk("%s()\n", __FUNCTION__);
+
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	cpdev = cp_lkm_usb_find_dev(smi->unique_id);
+	if(!cpdev) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() failed to find cpdev for id: %d\n", __FUNCTION__, smi->unique_id);
+		return -1;
+	}
+	if(cpdev->cpbdev->base_state == CP_LKM_USB_DEAD){
+		//modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		return 0;
+	}
+	cpdev->mux_id = smi->mux_id;
+	//printk("%s() unique_id: %d, mux_id: %d\n", __FUNCTION__, smi->unique_id, smi->mux_id);
+
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+	return res;
+
+}
+
+static int cp_lkm_usb_open_intf(struct cp_lkm_usb_open_intf* oi)
+{
+	//unsigned long flags;
+	struct cp_lkm_usb_dev* cpdev;
+
+	//printk("%s() u-uid: %d\n", __FUNCTION__,oi->unique_id);
+
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	cpdev = cp_lkm_usb_find_dev(oi->unique_id);
+
+	//if state isn't CP_LKM_USB_CTRL, then the interface either did not plug for some reason (i.e. didn't get probe from usb),
+	//or it plugged, but then unplugged before open was called.
+	if(!cpdev || cpdev->cpbdev->base_state  != CP_LKM_USB_CTRL) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() open_intf fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev?cpdev->state:0xff);
+		return -1;
+	}
+	cpdev->state = CP_LKM_USB_ACTIVE;
+	cpdev->edi->usb_send_ctx = cpdev; //this allows the network side to call me
+	cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_ACTIVE);
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+	//printk("%s() done\n", __FUNCTION__);
+	return 0;
+
+}
+
+static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci)
+{
+	//unsigned long flags;
+	struct cp_lkm_usb_dev* cpdev;
+
+	//printk("%s() u-uid: %d\n", __FUNCTION__, ci->unique_id);
+
+	//down(&cp_lkm_usb_mgr.thread_sem);
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	cpdev = cp_lkm_usb_find_dev(ci->unique_id);
+
+	if(!cpdev || cpdev->cpbdev->base_state  == CP_LKM_USB_DEAD) {
+		//device has already unplugged, or is half-unplugged, so don't allow this action to complete
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//up(&cp_lkm_usb_mgr.thread_sem);
+		return 0;
+	}
+	cpdev->edi->usb_send_ctx = NULL;  //disconnect from network side so he won't send me any more data
+	cpdev->state = CP_LKM_USB_CTRL;
+	cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+	//up(&cp_lkm_usb_mgr.thread_sem);
+	//printk("%s() done\n", __FUNCTION__);
+
+	return 0;
+}
+
+static bool cp_lkm_usb_unplug_do_disconnect_wait(void* ctx1, void* ctx2)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
+	bool done = false;
+	if (cpbdev->disconnect_wait == false){
+		done = true;
+	}
+	return done;
+}
+
+/* 
+ * This function is called when the common modem stack wants to give up the interface. 
+ * There are two scenarios: 
+ *   1. Modem unplugs which leads to the following flow:
+ *  		-> cp_lkm_usb_disconnect is called by USB sublayer, it cleans up bottom half of cpdev and waits for common modem stack unplug
+ *  		-> common modem stack sees unplug event
+ *  		-> it calls this function to finish the cleanup and deregister the driver
+ *  		-> we are done
+ *  
+ *   2. Common modem stack decides to give up the interface due to one common
+ *  	modem driver relinquishing the modem and another common modem driver grabbing it.
+ *  	This leads to the following flow:
+ *  		-> Common modem stack calls this function.
+ *  		-> it calls usb_deregister() which will call cp_lkm_usb_disconnect in context
+ *  		-> cp_lkm_usb_disconnect shuts down and frees the usb interface
+ *  		-> After usb_deregister() exits we finish and exit.
+ *  
+ *  Notes: This means the two shutdown functions, this one and cp_lkm_usb_disconnect can be
+ *  	   run in any order, so they must not stomp on each other. For example since
+ *  	   cp_lkm_usb_disconnect frees the interface with the kernel, this function better
+ *  	   not do anything that requires the interface after calling usb_deregister()
+ *  
+ *  	   The modem stack is single threaded so this function can never be reentrant
+ */
+static int cp_lkm_usb_unplug_intf(struct cp_lkm_usb_unplug_intf* ui)
+{
+	//find dev in list by unique id
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	bool shuter_down = true;
+	struct list_head *pos;
+
+	//printk("%s() start id: %d\n", __FUNCTION__, ui->unique_id);
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	//The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
+	cpdev = cp_lkm_usb_find_dev(ui->unique_id);
+	if(!cpdev) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		return -1;
+	}
+	cpbdev = cpdev->cpbdev;
+
+	cp_lkm_usb_mark_as_dead(cpdev);
+
+	list_for_each(pos, &cpbdev->cpdev_list){
+		struct cp_lkm_usb_dev* tmp_cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
+		if(tmp_cpdev->state != CP_LKM_USB_DEAD) {
+			//don't shut down until all clone devices have unplugged
+			shuter_down = false;
+			break;
+		}
+	}
+
+	//free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
+	//which will try and grab the semaphore, so we would be deadlocked
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	if (shuter_down) {
+		LOG("Wait for cpdev to finish unplugging");
+		cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_unplug_do_disconnect_wait,CP_LKM_TIMEOUT_MS,CP_LKM_ITER,"cpdev failed to finish disconnecting");
+	
+		//printk("%s() usb_deregister\n",__FUNCTION__);
+		usb_deregister(cpbdev->usb_driver);
+
+		/* clean up */
+		memref_deref(cpbdev);
+
+	}
+	/* IMPORTANT: don't do anything other than deref after call to deregister*/
+
+	LOG("cpdev done unplugging");
+
+	return 0;
+}
+
+/*
+ * Handle endpoint action requests from modem stack. 
+ *  
+ * Important things to know: 
+ *  In normal mode:
+ *    1. There will be 1 cpdev per cpbdev, and 1 ep per bep.
+ *    2. Every different ep can either be listened on or recv'd on, but never both at the same time
+ *  
+ *  In clone mode:
+ *    1. There will be n cpdevs per cpbdev, and n eps ber bep (depending on number of clones).
+ *    2. Every different ep can either be listened on or recv'd on, but never both at the same time.
+ *    3. All cloned data eps can be listened on at the same time (data header allows us to mux data between all the data eps, data endpoints don't use recv).
+ *    4. With all other cloned eps of the same type (AT, CNS, QMI), only one clone can be listened on or recv'd on at a time.
+ *  	 This is because there are not headers on these channels to let us know where to mux the data to. Fortunately, the
+ *  	 modem stack enforces this, so we don't have to enforce it here, but we can use it to know how to route cloned packets
+ *  	 coming in on non-data channel endpoints 
+*/
+static int cp_lkm_usb_ep_action(struct cp_lkm_usb_ep_action* ea)
+{
+	struct cp_lkm_ep* ep;
+	struct cp_lkm_base_ep* bep = NULL;
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	//unsigned long flags;
+	int pump_recv = 0;
+
+	//printk("%s() - action: %d, ep_num: 0x%x, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, ea->unique_id);
+
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	//There should always be a device, and it should always be plugged
+	cpdev = cp_lkm_usb_find_dev(ea->unique_id);
+	if(!cpdev) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() no device found for unique id: %d\n", __FUNCTION__, ea->unique_id);
+		return -1;
+	}
+
+	cpbdev = cpdev->cpbdev;
+	if(cpbdev->base_state == CP_LKM_USB_INIT) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() no probe yet, unique_id: %d, action: %d\n", __FUNCTION__,ea->unique_id,ea->action);
+		return -1;
+	}
+	if(cpbdev->base_state == CP_LKM_USB_DEAD) {
+		// The device can unplug down here before cpusb knows about it so it can continue to send us stuff. 
+		// The modem will unplug soon so just act like we did it and return ok. I didn't want to
+		// return an error because that might cause cpusb unnecessary heartburn.
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() cpdev already dead, shouldn't be doing this: id: %d, action: %d cpbdev: %p, cpdev: %p\n", __FUNCTION__,ea->unique_id,ea->action,cpbdev,cpdev);
+		return 0;
+	}
+	DEBUG_ASSERT(cpbdev, "cpbdev is null");
+	//create the ep if it doesn't already exist
+	if(ea->action == EP_ACTION_CREATE) {
+		cp_lkm_usb_create_ep(cpdev, ea->ep_num);
+	}
+
+	if (ea->action == EP_ACTION_FLUSH_CONTROL) {
+		ep = NULL;
+	} else {
+		ep = cp_lkm_usb_get_ep(cpdev, ea->ep_num);
+		if(!ep) {
+			spin_unlock(&cp_lkm_usb_mgr.lock);
+			//printk("%s() failed to find ep: 0x%x for action: %d\n", __FUNCTION__, ea->ep_num, ea->action);
+			return -1;
+		}
+		bep = ep->bep;
+		DEBUG_ASSERT(bep,"base ep is null");
+	}
+
+
+	//if (ep && ea->action != EP_ACTION_RECV) {
+	//	printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
+	//}
+
+	//printk("ea->action: %d, ep_num: %d\n", ea->action, ea->ep_num);
+	switch(ea->action) {
+	case EP_ACTION_CREATE:
+		//printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
+		//initialize endpoint fields
+		bep->type = ea->ep_type;
+		bep->max_transfer_size = ea->max_transfer_size;
+		bep->interval = ea->interval;
+
+		DEBUG_ASSERT(cpbdev->udev,"udev is null");
+		if(bep->ep_num & USB_DIR_IN) { //in
+			if(bep->type == UE_BULK) {
+				bep->pipe = usb_rcvbulkpipe(cpbdev->udev,bep->ep_num);
+			}
+			else{ //interrupt
+				bep->pipe = usb_rcvintpipe(cpbdev->udev, bep->ep_num);
+			}   			
+		}
+		else{ //out
+			if(bep->type == UE_BULK) { 
+				bep->pipe = usb_sndbulkpipe(cpbdev->udev,bep->ep_num);
+			}
+			else{ //interrupt
+				bep->pipe = usb_sndintpipe(cpbdev->udev, bep->ep_num);
+			}
+		}
+		DEBUG_TRACE("%s() create action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
+		break;
+
+	case EP_ACTION_LISTEN:
+		DEBUG_TRACE("%s() listen action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
+		ep->con_flags |= CP_LKM_USB_LISTEN;
+		//listen on any endpoint starts listen on base
+		bep->con_flags |= CP_LKM_USB_LISTEN;
+		pump_recv = 1;
+		break;
+
+	case EP_ACTION_LISTEN_STOP:
+		{
+			bool listen_done = true;
+			struct list_head *entry, *nxt;
+			struct cp_lkm_ep *tmp_ep;
+
+			DEBUG_TRACE("%s() listen stop action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
+
+			// the ep is done listening
+			ep->con_flags &= ~CP_LKM_USB_LISTEN;
+
+			//now see if all eps on this bep are done listening
+			list_for_each_safe(entry, nxt, &bep->eps) {
+				tmp_ep = list_entry(entry, struct cp_lkm_ep, list_bep);
+				if(tmp_ep->con_flags & CP_LKM_USB_LISTEN) {
+					//if any of the eps on the bep still listening, then still listen on the bep
+					listen_done = false;
+					break;
+				}
+			}
+			if(listen_done) {
+				bep->con_flags &= ~CP_LKM_USB_LISTEN;
+				//If RX_HALT bit set then there is an error on this endpoint and the kevent will be scheduled to fix the error. As part of the fix
+				//he will unlink the urbs. Bad things can happen if we call cp_lkm_usb_unlink_urbs here at same time the kevent handler is calling it
+				if(!test_bit (EVENT_RX_HALT, &bep->err_flags)){
+					//TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
+					cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->in_q, bep);
+				}
+			}
+		}
+		break;
+
+	case EP_ACTION_RECV:
+		DEBUG_TRACE("%s() recv action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
+		// can only have one pending recv on a given ep
+		ep->con_flags |= CP_LKM_USB_RECV;
+		bep->con_flags |= CP_LKM_USB_RECV;
+		pump_recv = 1;
+		break;
+
+	case EP_ACTION_FLUSH_CONTROL:
+		//printk("%s() flush control action:%d\n", __FUNCTION__, ea->action);
+		//TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
+		//We don't schedule kevents to clear endpoint halts since they are self recovering so we don't need to test the halt bits on the ctrl channel
+		cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->ctrlq, NULL);
+		break;
+
+	case EP_ACTION_SET_MAX_TX_SIZE:
+		//printk("%s() set max tx size to %d on ep: 0x%x\n",__FUNCTION__,ea->max_transfer_size, ea->ep_num);
+		bep->max_transfer_size = ea->max_transfer_size;
+		break;
+
+	default:
+		break;
+	}
+
+
+	if(pump_recv) {
+		cp_lkm_schedule_rx_restock(cpbdev, bep);
+	}
+
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	return 0;
+}
+
+static bool cp_lkm_usb_do_pm_link(void* ctx1, void* ctx2)
+{
+	struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)ctx1;
+	struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
+	struct cp_lkm_usb_pm_link* upl = (struct cp_lkm_usb_pm_link*)ctx2;
+	unsigned long flags;
+	bool done = false;
+	int rc;
+
+	//printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
+
+	// We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
+	// function pointers that are used by the data processing state machine and by the code that schedules the data
+	// processing machine. 
+	// 
+	// We need to shut both of those down before doing the linking. 
+	//   1: We shut the machine down by setting the state to USB_PROCESS_STATE_PAUSED. 
+	//   2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
+	spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+	if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
+		cpbdev->processing_state = USB_PROCESS_STATE_PAUSED;  //pauses the data processing soft irq handler
+
+		spin_lock(&cpbdev->data_q_lock);
+		cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT;   	  //stops the hw irq handlers from trying to schedule the soft irq handler
+		spin_unlock(&cpbdev->data_q_lock);
+
+		if(upl->link) {
+			cpdev->edi->usb_send_ctx = cpdev;
+		}
+
+		//release lock while calling pm since we don't know how long they may take. We have already set the processing_state to
+		//paused so the soft interrupt routines won't try to do anything so we are safe.
+		spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
+
+		rc = cp_lkm_pm_usb_link(cpdev->edi, upl->pm_unique_id, upl->link);
+		DEBUG_ASSERT(rc == 0, "Failed to link usb and pm");
+
+		spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+		if(upl->link) {
+			if (cpdev->edi->pm_get_hdr_size && cpdev->edi->pm_recv_ctx) {
+				cpdev->edi->pm_get_hdr_size(cpdev->edi->pm_recv_ctx, cpbdev->wrapper_hdr_size, &cpbdev->pm_hdr_size, &cpbdev->pm_hdr_offset);
+			}
+		}
+		else{
+			cpdev->edi->usb_send_ctx = NULL;
+		}
+
+		cpdev->pm_id = upl->pm_unique_id;
+
+		spin_lock(&cpbdev->data_q_lock);
+		//set things back up properly before re-enabling the soft irq and hardware handlers 
+		cpbdev->data_q_len = cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen; //this must be set before calling schedule_data_process
+		spin_unlock(&cpbdev->data_q_lock);
+
+		cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
+		done = true;
+	}
+	spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
+
+	return done;
+}
+
+static int cp_lkm_usb_pm_link(struct cp_lkm_usb_pm_link* upl)
+{
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_usb_base_dev* cpbdev;
+
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	//There should always be a device, and it should always be plugged
+	cpdev = cp_lkm_usb_find_dev(upl->usb_unique_id);
+
+	//printk("%s() cpdev: %p, u-uid: %d, pm-uid: %d, up: %d\n", __FUNCTION__, cpdev, upl->usb_unique_id, upl->pm_unique_id, upl->link);
+
+	if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_INIT) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() no device or no probe yet\n", __FUNCTION__);
+		return -1;
+	}
+	cpbdev = cpdev->cpbdev;
+	// The device can unplug down here before cpusb knows about it so it can continue to send us stuff. 
+	// The modem will unplug soon so just act like we did it and return ok. I didn't want to
+	// return an error because that might cause cpusb unnecessary heartburn.
+	if(cpbdev->base_state == CP_LKM_USB_DEAD) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		//printk("%s() device already unplugged\n", __FUNCTION__);
+		return 0;
+	}
+
+	//printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
+	// We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
+	// function pointers that are used by the data processing state machine and by the code that schedules the data
+	// processing machine. 
+	// 
+	// We need to shut both of those down before doing the linking. 
+	//   1: We shut the machine down by setting the state to USB_processing_state_PAUSED. 
+	//   2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
+	cp_lkm_do_or_die(cpdev, upl, cp_lkm_usb_do_pm_link, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "cpdev failed to link with pm");
+
+	//printk("%s() done\n", __FUNCTION__);
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+	//force a resume
+	cp_lkm_schedule_data_process(cpbdev, false, true, false);
+	return 0;
+}
+
+static int cp_lkm_usb_is_alive_intf(struct cp_lkm_usb_is_alive_intf *alivei)
+{
+	//find dev in list by unique id
+	struct cp_lkm_usb_dev *cpdev;
+	int alive;
+
+	//printk("%s() start\n", __FUNCTION__);
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	//The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
+	cpdev = cp_lkm_usb_find_dev(alivei->unique_id);
+
+	if(!cpdev) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		return -1;
+	}
+
+	alive = (cpdev->state == CP_LKM_USB_DEAD) ? -1 : 0;
+	//free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
+	//which will try and grab the semaphore, so we would be deadlocked
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	return alive;
+}
+static bool cp_lkm_usb_is_attached(struct cp_lkm_usb_dev* cpdev)
+{
+	return (cpdev->state == CP_LKM_USB_ACTIVE || cpdev->state == CP_LKM_USB_CTRL);
+}
+
+static bool cp_lkm_usb_is_base_attached(struct cp_lkm_usb_base_dev* cpbdev)
+{
+	//base has three possible states: INIT, CTRL, DEAD (it never goes to ACTIVE, only the cpdev's do that)
+	return cpbdev->base_state == CP_LKM_USB_CTRL;
+}
+
+
+// 
+// Input:
+//   if_data:       set to true if caller only wants to schedule if there is data pending
+//   is_reschedule: set to true if the caller is the scheduled handler to see if it should be rescheduled
+//   have_lock:     true if the caller already has the lock
+// 
+// returns:
+//  true if scheduled new processing
+//  false if didn't schedule. 
+// 
+// Note: returns false if it was currently scheduled 
+static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_reschedule, bool have_lock)
+{
+	unsigned long flags;
+	bool res = false;
+
+	if (!have_lock) {
+		spin_lock_irqsave(&cpbdev->data_q_lock, flags);
+	}
+
+	//never schedule processing when we are paused
+	if (cpbdev->data_q_len == CP_LKM_USB_PAUSED_CNT) {
+		goto schedule_done;
+	}
+
+	if (is_reschedule) {
+		cpbdev->scheduled = false;
+	}
+
+	if (cpbdev->scheduled == true) {
+		goto schedule_done;
+	}
+
+	if (if_data) {
+		if(!cp_lkm_usb_have_data(cpbdev)){
+			goto schedule_done;
+		}
+	}
+
+	cpbdev->scheduled = true;
+	res = true;
+
+	//cpdev->dbg_total_tasklet_sched++;
+	tasklet_schedule(&cpbdev->data_process_tasklet);
+
+schedule_done:
+	if (!have_lock) {
+		spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
+	}
+	return res;
+}
+
+static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep)
+{
+	if(bep == NULL) {
+		cp_lkm_schedule_data_process(cpbdev,false,false,false);
+		tasklet_schedule(&cpbdev->other_process_tasklet);
+	}
+	else if(bep->ep_num == cpbdev->data_in_bep_num) {
+		//printk("start data ep listen\n");
+		cp_lkm_schedule_data_process(cpbdev,false,false,false);
+	}
+	else{
+		tasklet_schedule(&cpbdev->other_process_tasklet);
+	}
+}
+
+#define DATA_SRC_TX 0
+#define DATA_SRC_RX 1
+#define DATA_SRC_OTHER 2
+static void cp_lkm_usb_done_and_defer_data(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb, int src)
+{
+	unsigned long   	flags;
+
+	spin_lock_irqsave(&cpbdev->data_q_lock, flags);
+	if(src == DATA_SRC_TX) {
+		__skb_queue_tail(&cpbdev->data_tx_done, skb);
+	}
+	else{
+		__skb_queue_tail(&cpbdev->data_rx_done, skb);
+	}
+	if(cpbdev->data_q_len != CP_LKM_USB_PAUSED_CNT) {
+		cpbdev->data_q_len++;
+		cp_lkm_schedule_data_process(cpbdev,true,false,true);
+	}
+	spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
+
+}
+
+//for non data endpoint pkts
+static void cp_lkm_usb_done_and_defer_other(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb)
+{
+	unsigned long   	flags;
+
+	spin_lock_irqsave(&cpbdev->other_done.lock, flags);
+	__skb_queue_tail(&cpbdev->other_done, skb);
+	//only rearm the softirq if the list was empty
+	if(cpbdev->other_done.qlen == 1) {
+		tasklet_schedule(&cpbdev->other_process_tasklet);
+	}
+	spin_unlock_irqrestore(&cpbdev->other_done.lock, flags);
+}
+
+static void cp_lkm_usb_process_other_done_tasklet (unsigned long param)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
+	struct sk_buff  	*skb;
+	struct skb_data 	*entry;
+	bool timed_out = false;
+	unsigned long time_limit = jiffies + 2;
+	bool can_restock = true;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpbdev->other_state_lock, flags);
+	if(cpbdev->other_state != USB_PROCESS_STATE_IDLE){
+		spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
+		return;
+	}
+	cpbdev->other_state = USB_PROCESS_STATE_ACTIVE;
+	spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
+
+	if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
+		//printk("%s(), cpbdev %p delaying or no longer attached, base_state: %d\n", __FUNCTION__,cpbdev,cpbdev->base_state);
+		can_restock = false;
+	}
+	//cpdev->dbg_total_o_done++;
+
+	while(!timed_out) {
+		skb = skb_dequeue(&cpbdev->other_done);
+		if(skb == NULL) {
+			break;
+		}
+		entry = (struct skb_data *) skb->cb;
+
+		//printk("%s(), other data cpbdev: %p, bep: %p, num: 0x%x\n",__FUNCTION__,cpbdev,entry->bep,(entry->bep?entry->bep->ep_num:0));
+
+		//cp_lkm_usb_cnts(entry->state,-1);
+		switch (entry->state) {
+			case in_other_done:
+				if(entry->urb) {
+					//cp_lkm_usb_urb_cnt(-1);
+					usb_free_urb (entry->urb);
+				}
+				cp_lkm_usb_other_recv_process(cpbdev, skb);
+				break;
+			case ctrl_done:
+				if(entry->urb) {
+					//cp_lkm_usb_urb_cnt(-1);
+					usb_free_urb (entry->urb);
+				}
+				cp_lkm_usb_ctrl_process(cpbdev, skb);
+				break;
+			case out_done:
+			case in_other_cleanup:
+				if(entry->urb) {
+					//cp_lkm_usb_urb_cnt(-1);
+					usb_free_urb (entry->urb);
+				}
+				dev_kfree_skb_any(skb);
+				break;
+			case unlink_start:
+			default:
+				//printk("!!other: unknown skb state: %d\n",entry->state);
+				break;
+		}
+
+		if(time_after_eq(jiffies, time_limit)) {
+			//ran out of time, process this one and then bail
+			timed_out = true;
+		}
+	}
+
+	if(can_restock) {
+		cp_lkm_usb_rx_other_restock(cpbdev);
+	}
+
+	if(timed_out) {
+		tasklet_schedule(&cpbdev->other_process_tasklet);
+	}
+
+	spin_lock_irqsave(&cpbdev->other_state_lock, flags);
+	cpbdev->other_state = USB_PROCESS_STATE_IDLE;
+	spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
+
+	return ;
+}
+
+// Timer callback. This runs in soft interrupt context. 
+// 
+// The call to restock can blow chow (actually when it calls cp_lkm_schedule_data_process)
+//   if an unlink or unplug happens while we are still in the call.
+// 
+// Unlink or plug can happen during this call on multi core platforms with kernel preemption enabled.
+// This timer is scheduled if we ran into some unexpected USB error and want
+//   to give the USB endpoint some time before trying to reschedule recv urbs on it.
+// 
+// The whole purpose of this function is to pump the system if it is otherwise idle. If 
+// it isn't idle, we can count on those processes to call cp_lkm_schedule_rx_restock when done. 
+static void cp_lkm_usb_delay_timer (unsigned long param)
+{
+	unsigned long flags;
+
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
+	spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+	if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
+		cp_lkm_schedule_rx_restock(cpbdev,NULL);
+	}
+	spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
+}
+
+#if 0
+static void cp_lkm_usb_dbg_memleak_timer (unsigned long param)
+{
+	printk("+=+=+=+=+=!!!!mem: %d, urb: %d, skb: data: %d, other: %d, xmit: %d, ctrl: %d, unplug:%d, stck_cnt: %d, stck_chk: %d, unlink: %d\n",g_dbg_memalloc_cnt,g_dbg_urballoc_cnt,g_dbg_data_skballoc_cnt,g_dbg_other_skballoc_cnt,g_dbg_xmit_skballoc_cnt,g_dbg_ctrl_skballoc_cnt,g_dbg_unplug_cnt,g_stuck_cnt,g_stuck_chk,g_unlink_cnt);
+	mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(5000));
+}
+#endif
+
+
+/*
+ * We pause the transmit if there are too many urbs down at the usb layer. 
+ * The Broadcom processor's USB block sometimes gets stuck meaning we will never 
+ * unpause. This function is used to detect if we are paused because of a stuck and 
+ * try to recover it. 
+*/
+static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action)
+{
+	//only broadcom has the stuck problem
+	if (cp_lkm_is_broadcom == 0) {
+		//printk("Not BRCM!!!!\n");
+		return;
+	}
+
+	//TODO: it seems like this might work fine with clones. I don't think it hurts to be inited,
+	//      started or stopped multiple times??
+	//g_stuck_chk++;
+	switch(action) {
+	case CP_LKM_STUCK_INIT:
+		cpbdev->usb_pause_stuck_timer.function = cp_lkm_usb_pause_stuck_timer;
+		cpbdev->usb_pause_stuck_timer.data = (unsigned long)cpbdev;
+		init_timer(&cpbdev->usb_pause_stuck_timer);
+		break;
+	case CP_LKM_STUCK_START:
+		mod_timer(&cpbdev->usb_pause_stuck_timer, jiffies + msecs_to_jiffies(3000));
+		cpbdev->tx_proc_cnt_at_pause = cpbdev->tx_proc_cnt;
+		break;
+	case CP_LKM_STUCK_STOP:
+	case CP_LKM_STUCK_DEINIT:
+		del_timer_sync(&cpbdev->usb_pause_stuck_timer);
+		break;
+	}
+}
+
+// Broadcom has a problem in the EHCI controller where if it gets a NAK on an out packet
+// it occassionally doesn't update the status of the URB and retry it. This results in the endpoint getting stuck.
+// If we detect that it is stuck (if the tx has been paused for more than 3 seconds) then we cancel the
+// struck urb and this gets things going again. The cancelled urb results in a dropped packet which is undesirable,
+// but preferrable to being stuck.
+static void cp_lkm_usb_pause_stuck_timer (unsigned long param)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
+	struct skb_data *entry;
+	struct sk_buff *skb;
+	struct urb *urb = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpbdev->out_q.lock, flags);
+	if (cpbdev->tx_paused) {
+		// cancel stuck urb?
+		skb = skb_peek(&cpbdev->out_q);
+		if (skb) {
+			entry = (struct skb_data *) skb->cb;
+			if (entry) {
+				if(cpbdev->tx_proc_cnt_at_pause == cpbdev->tx_proc_cnt){
+					//printk("\n!!!!!!Canceling stuck URB, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n", cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
+					urb = entry->urb;
+					usb_get_urb(urb);
+				}
+				//else{
+					//some pkts were transmitted successfully while waiting, though not enough to unpause us.
+					//this means the tx is not stuck, so don't need to cancel anything
+					//printk("\n!!!!!!Restarting stuck URB timer, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n",cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
+				//}
+				// restart just in case this doesn't unpause tx
+				cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
+				//g_stuck_cnt++;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&cpbdev->out_q.lock, flags);
+	if (urb) {
+		//printk("\n!!!!!!Canceling stuck URB!!!!!!!!!!\n");
+		//cpbdev->dbg_total_stuck_cnt++;
+		usb_unlink_urb (urb);
+		usb_put_urb(urb);
+	}
+}
+
+#if 0
+static void cp_lkm_usb_dbg_timer (unsigned long param)
+{
+	struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
+	struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
+	printk("!!!!cpdev: %p, clone: %d, id: 0x%x, q_cnt: %d, p: %d, stuck_cnt: %d, tx done: %d, ip_copies: %d!!!!!!!\n",cpdev, cpdev->clone_num,cpdev->mux_id,cpbdev->tx_usb_q_count,cpbdev->tx_paused, cpbdev->dbg_total_stuck_cnt, cpbdev->tx_proc_cnt,num_ip_copies);
+
+	//printk("!!!!Stuck urb count: %d, total_pause: %d, cpdev: %p, is_brcm: %d!!!!!!!\n",cpdev->dbg_total_stuck_cnt,cpdev->dbg_total_pause,cpdev,cp_lkm_is_broadcom);
+	//printk("!!!!!!!!!!!\n");
+	#if 0
+	int txa;
+	int rxa;
+	int drql;
+	int dtql;
+	//int ab;
+	int tx,rx;
+	int pkt_avg;
+	//int epqc, in_q;
+
+	cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
+	cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
+
+	//ab = cpdev->dbg_total_budget/(cpdev->dbg_total_d_done+1);
+	txa = cpdev->dbg_total_tx_proc/(cpdev->dbg_total_d_done+1);
+	rxa = cpdev->dbg_total_rx_proc/(cpdev->dbg_total_d_done+1);
+	drql = cpdev->dbg_total_rx_qlen/(cpdev->dbg_total_d_done+1);
+	dtql = cpdev->dbg_total_tx_qlen/(cpdev->dbg_total_d_done+1);
+	//epqc = cpdev->in_eps[CP_LKM_DATA_INDEX].q_cnt;
+	//in_q = cpdev->in_q.qlen;
+	tx = cpdev->dbg_total_tx_irq;
+	rx = cpdev->dbg_total_rx_irq;
+	pkt_avg = (tx+rx)/5; 
+	printk("tot: %d, tx: %d, rx: %d, pa: %d, dones: %d, p: %d\n", tx+rx, tx, rx, pkt_avg, cpdev->dbg_total_d_done, cpdev->dbg_total_pause);
+	printk("resch: %d, d_c: %d, sch_n: %d, sch_t: %d, sch_wq: %d, sch_sk: %d, ds: %d\n", cpdev->dbg_total_d_resched, cpdev->dbg_total_d_comp, cpdev->dbg_total_napi_sched,cpdev->dbg_total_tasklet_sched, cpdev->dbg_total_wq_sched,cpdev->dbg_total_sch_sk, cpdev->data_state);
+	printk("txa: %d, rxa: %d, to: %d, HZ:%d \n", txa , rxa, cpdev->dbg_total_timeout, HZ);
+	printk("nrm_t: %d, blk_t: %d, nrm: %d, blk: %d, ntmrs: %d \n", cpdev->dbg_total_num_normal_t,cpdev->dbg_total_num_hybrid_t,cpdev->dbg_total_num_normal,cpdev->dbg_total_num_hybrid, cpdev->dbg_total_num_d_timers);
+	printk("psd: %d, tuqc: %d, schd: %d, dql: %d, rql: %d, tql: %d, toq: %d\n",cpdev->tx_paused,cpdev->tx_usb_q_count,cpdev->scheduled,cpdev->data_q_len,cpdev->data_rx_done.qlen,cpdev->data_tx_done.qlen,cpdev->out_q.qlen);
+	printk("txirq: %d, txprc: %d\n",cpdev->dbg_total_tx_irq, cpdev->dbg_total_tx_proc);
+
+	//printk("ipqc: %d, in_q: %d\n", epqc, in_q);
+	//printk("d0: %p,d1: %p,d2: %p,d3: %p,d4: %p\n", devs[0],devs[1],devs[2],devs[3],devs[4]);
+	cpdev->dbg_total_d_done = cpdev->dbg_total_d_resched = cpdev->dbg_total_d_comp = 0;
+	cpdev->dbg_total_pause = cpdev->dbg_total_max_work = cpdev->dbg_total_budget = 0;
+	cpdev->dbg_total_tx_irq = cpdev->dbg_total_rx_irq = 0;
+	cpdev->dbg_total_tx_proc = cpdev->dbg_total_rx_proc = 0;
+	cpdev->dbg_total_rx_qlen =  cpdev->dbg_total_tx_qlen = 0;
+	cpdev->dbg_total_napi_sched=cpdev->dbg_total_tasklet_sched=cpdev->dbg_total_wq_sched=0;
+	cpdev->dbg_total_num_normal_t=cpdev->dbg_total_num_hybrid_t=cpdev->dbg_total_num_normal=cpdev->dbg_total_num_hybrid=cpdev->dbg_total_num_d_timers = 0;
+	#endif
+
+	mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(5000));
+
+}
+#endif
+
+
+//Caller must have the data_q_lock before calling
+static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev)
+{
+	//return the amount of work to be done if it exceeds the threshold, else return 0
+	if(cpbdev->data_rx_done.qlen >= cpbdev->rx_schedule_threshold || cpbdev->data_tx_done.qlen >= cpbdev->tx_schedule_threshold){
+		return cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen;
+	}
+	return 0;
+}
+
+
+#if 1
+static int cp_lkm_usb_process_data_done(struct cp_lkm_usb_base_dev *cpbdev, int budget)
+{
+	struct sk_buff  	*skb;
+	struct skb_data 	*entry;
+	struct cp_lkm_usb_dev* cpdev __attribute__((unused));
+	unsigned long time_limit = jiffies + 3;
+	int retval;
+	int restock = 0;
+	unsigned long flags;
+	int rx_work_done = 0;
+	int tx_work_done = 0;
+	int work_done = 0;
+	int can_restock = 1;
+	int i;
+	int loop;
+	int num_proc;
+	int actual_budget;
+	int num_rx;
+	int num_tx;
+	struct sk_buff_head done_q;
+	bool paused;
+
+	skb_queue_head_init (&done_q);
+
+	//cpdev->dbg_total_d_done++;
+	//cpdev->dbg_total_budget += budget;
+	//cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
+	//cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
+
+	// if the delay timer is running, we aren't supposed to send any more recv urbs to the usb layer.
+	// if the device has detached, we need to finish processing done pkts, but don't resubmit any new urbs
+	if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
+		//printk("%s(), cpdev delaying or no longer attached\n", __FUNCTION__);
+		can_restock = 0;
+	}
+
+	paused = cpbdev->tx_paused;
+
+	actual_budget = CP_LKM_USB_NAPI_MAX_WORK;
+	for(loop=0;loop<CP_LKM_USB_PROCESS_DIVISOR;loop++) {
+		if(time_after_eq(jiffies, time_limit)) {
+			//ran out of time, process this one and then bail
+			work_done = budget;
+			//cpdev->dbg_total_timeout++;
+			break;
+		}
+		//keep restocking the q until we max out the budget or timeout or runout
+		if(rx_work_done >= actual_budget || (paused && tx_work_done >= actual_budget)) {
+			work_done = budget;
+			break;
+		}
+		spin_lock_irqsave(&cpbdev->data_q_lock, flags);
+		num_rx = cpbdev->data_rx_done.qlen;
+		num_tx = cpbdev->data_tx_done.qlen;
+		num_proc = max(num_rx,num_tx);
+		num_proc = min(num_proc,actual_budget/CP_LKM_USB_PROCESS_DIVISOR); //grab 1/divisor of remaining budget each time
+		// Note: A unit of work for the shim is either a lone tx, a lone rx or a combo of a rx and a tx.
+		//       Here we calculate how much work to do on this poll. If there was work left over from last time
+		//       finish processing it.
+		for(i = 0; i < num_proc; i++) {
+			skb = __skb_dequeue (&cpbdev->data_rx_done);
+			if(skb){
+				cpbdev->data_q_len--;
+				__skb_queue_tail(&done_q, skb);
+			}
+			skb = __skb_dequeue (&cpbdev->data_tx_done);
+			if(skb){
+				cpbdev->data_q_len--;
+				__skb_queue_tail(&done_q, skb);
+			}
+		}
+		spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
+
+		//nothing in the q, we are done
+		if(done_q.qlen == 0) {
+			break;
+		}
+
+		while((skb = __skb_dequeue(&done_q))){
+			entry = (struct skb_data *) skb->cb;
+			//cp_lkm_usb_cnts(entry->state,-1);
+			switch (entry->state) {
+				case in_data_done:
+					//cpdev->dbg_total_rx_proc++;
+					entry->bep->q_cnt--;
+					restock++;
+					rx_work_done++;
+					work_done++;
+					if(can_restock && restock == CP_LKM_USB_RESTOCK_MULTIPLE) {
+						restock = 0;
+
+						retval = cp_lkm_usb_submit_recv (cpbdev, entry->urb, GFP_ATOMIC, entry->bep, true);
+						if (retval < 0) {
+							//printk("%s(), can't resubmit\n", __FUNCTION__);
+							//cp_lkm_usb_urb_cnt(-1);
+							usb_free_urb (entry->urb);
+							can_restock = 0;
+						}
+					}
+					else{
+						//cp_lkm_usb_urb_cnt(-1);
+						usb_free_urb (entry->urb);
+					}
+					cp_lkm_usb_data_recv_process(cpbdev, skb);
+					break;
+				case out_done:
+					work_done++;
+					tx_work_done++;
+					//fall through on purpose
+				case in_data_cleanup:
+					if(entry->urb) {
+						//cp_lkm_usb_urb_cnt(-1);
+						usb_free_urb (entry->urb);
+					}
+					dev_kfree_skb_any(skb);
+					break;
+	   
+				case unlink_start:
+				default:
+					//printk("!!data: unknown skb state: %d\n",entry->state);
+					break;
+			}
+		}
+	}
+
+	//restock recv urbs to usb layer if we processed any
+	if(can_restock) {
+		cp_lkm_usb_rx_data_restock(cpbdev);
+	}
+
+	//see if we need to resume the tx side
+	if(tx_work_done) {
+		spin_lock_irqsave (&cpbdev->out_q.lock, flags);
+		cpbdev->tx_proc_cnt += tx_work_done;
+
+		if(tx_work_done > cpbdev->tx_usb_q_count) {
+			cpbdev->tx_usb_q_count = 0;
+		}
+		else{
+			cpbdev->tx_usb_q_count -= tx_work_done;
+		}
+		if(cpbdev->tx_usb_q_count <= cpbdev->tx_resume_threshold) {
+			if(cpbdev->tx_paused){
+				//unpause all cpdevs
+				cp_lkm_usb_dev_pause(cpbdev, false);
+				// cancel usb_pause_stuck_timer
+				cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_STOP);
+			}
+
+		}
+		spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
+	}
+
+	//if(work_done > cpdev->dbg_total_max_work){
+	//	cpdev->dbg_total_max_work = work_done;
+	//}
+
+	//can't return greater than the passed in budget
+	if(work_done > budget) {
+		work_done = budget;
+	}
+
+	return work_done; 
+	//return 1;
+}
+#endif
+
+static int cp_lkm_usb_common_process_data_done(struct cp_lkm_usb_base_dev* cpbdev, int budget)
+{
+	unsigned long flags;
+	int work_done = -1;
+	bool rescheduled;
+	bool ran_data_done = false;
+	if(NULL == cpbdev) {
+		//printk("%s() !!!!!!!!!!!!!!!!no ctxt\n", __FUNCTION__);
+		return work_done;
+	}
+
+	spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+	if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
+		cpbdev->processing_state = USB_PROCESS_STATE_ACTIVE;
+		spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
+		work_done = cp_lkm_usb_process_data_done(cpbdev, budget);
+		spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
+		ran_data_done = true;
+		cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
+	}
+	spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
+	if (ran_data_done) {
+		rescheduled = cp_lkm_schedule_data_process(cpbdev,true,true,false);
+		if (rescheduled) {
+			work_done = budget;
+			//cpdev->dbg_total_d_resched++;
+		}
+		else if(work_done){
+			work_done--;
+			//cpdev->dbg_total_d_comp++;
+		}
+	}
+	else{
+		//cpdev->dbg_total_sch_sk++;
+	}
+	return work_done;
+}
+
+
+static void cp_lkm_usb_process_data_done_tasklet (unsigned long param)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
+
+	cp_lkm_usb_common_process_data_done(cpbdev, CP_LKM_PM_NAPI_WEIGHT);
+}
+
+
+static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpbdev)
+{
+	//struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
+	//int cur_token;
+	struct urb  *urb;
+	//int     ep_index;
+	int q_len;
+	struct cp_lkm_base_ep* bep;
+	int retval;
+	int q_cnt;
+
+	// timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
+	if (timer_pending(&cpbdev->rx_delay)) {
+		return;
+	}
+
+	// restock the recv queues on any ep's that are listening 
+	bep = cp_lkm_usb_get_bep(cpbdev, cpbdev->data_in_bep_num);
+	if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
+		return;
+	}
+	if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
+		return;
+	}
+
+	if(bep->con_flags & CP_LKM_USB_RECV) {
+		//only post 1 for recv's
+		q_len = 1;
+	}
+	else{
+		//its a listen
+		q_len = CP_LKM_USB_MAX_RX_QLEN;
+	}
+
+	// Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
+	// there is a problem with usb, so only try up to q_len times to insert them.
+	retval = 0;
+	q_cnt = bep->q_cnt;
+
+	while(q_cnt < q_len) {
+		urb = usb_alloc_urb (0, GFP_ATOMIC);
+		if (!urb) {
+			if (q_cnt == 0) {
+				cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
+			}
+			break;
+		}
+		//cp_lkm_usb_urb_cnt(1);
+		retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, true);
+		if (retval < 0) {
+			//cp_lkm_usb_urb_cnt(-1);
+			usb_free_urb (urb);
+			break;
+		}
+		q_cnt++;
+	}
+}
+
+static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev)
+{
+	struct urb  *urb;
+	int q_len;
+	struct cp_lkm_base_ep* bep;
+	int retval;
+	int q_cnt;
+	struct list_head *entry, *nxt;
+
+	// timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
+	if (timer_pending(&cpbdev->rx_delay)) {
+		return;
+	}
+
+	// restock the recv queues on any ep's that are listening 
+	list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
+			continue;
+		}
+		if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
+			continue;
+		}
+		if(bep->ep_num == cpbdev->data_in_bep_num) {
+			continue;
+		}
+
+		if(bep->con_flags & CP_LKM_USB_RECV) {
+			//only post 1 for recv's
+			q_len = 1;
+		}
+		else{
+			//its a listen
+			q_len = CP_LKM_USB_MAX_OTHER_QLEN;
+		}
+
+		// Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
+		// there is a problem with usb, so only try up to q_len times to insert them.
+		retval = 0;
+		q_cnt = bep->q_cnt;
+
+		while(q_cnt < q_len) {
+			urb = usb_alloc_urb (0, GFP_ATOMIC);
+			if (!urb) {
+				if (q_cnt == 0) {
+					cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
+				}
+				break;
+			}
+			//cp_lkm_usb_urb_cnt(1);
+			retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, false);
+			if (retval < 0) {
+				//cp_lkm_usb_urb_cnt(-1);
+				usb_free_urb (urb);
+				break;
+			}
+			q_cnt++;
+		}
+	}
+}
+
+//unlink all urbs with the given ep, or all if ep is NULL
+static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep)
+{
+	unsigned long   	flags;
+	struct sk_buff  	*skb;
+	int 		count = 0;
+
+	spin_lock_irqsave (&q->lock, flags);
+	while (!skb_queue_empty(q)) {
+		struct skb_data 	*entry;
+		struct urb  	*urb;
+		int 		retval;
+
+		skb_queue_walk(q, skb) {
+			entry = (struct skb_data *) skb->cb;
+			urb = entry->urb;
+			if(urb && (entry->state != unlink_start) && (entry->bep == bep || bep == NULL)) {
+				goto found;
+			}
+		}
+		break;
+found:
+		entry->state = unlink_start;
+
+		/*
+		 * Get reference count of the URB to avoid it to be
+		 * freed during usb_unlink_urb, which may trigger
+		 * use-after-free problem inside usb_unlink_urb since
+		 * usb_unlink_urb is always racing with .complete
+		 * handler(include defer_bh).
+		 */
+		usb_get_urb(urb);
+		spin_unlock_irqrestore(&q->lock, flags);
+		// during some PM-driven resume scenarios,
+		// these (async) unlinks complete immediately
+		//usb_kill_urb(urb);
+		retval = usb_unlink_urb (urb);
+		//g_unlink_cnt++;
+		if (retval != -EINPROGRESS && retval != 0){
+			//netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
+		} else{
+			count++;
+		}
+		usb_put_urb(urb);
+		spin_lock_irqsave(&q->lock, flags);
+	}
+	spin_unlock_irqrestore (&q->lock, flags);
+	return count;
+}
+
+
+static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work)
+{
+	set_bit (work, &bep->err_flags);
+	if (!schedule_work (&cpbdev->kevent)) {
+		//deverr (dev, "kevent %d may have been dropped", work);
+	} else {
+		//devdbg (dev, "kevent %d scheduled", work);
+	}
+}
+
+// Workqueue callback function. This runs in thread context
+static void cp_lkm_usb_kevent (struct work_struct *work)
+{
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)container_of(work, struct cp_lkm_usb_base_dev, kevent);
+	int status;
+	struct cp_lkm_base_ep* bep;
+	struct list_head *entry, *nxt;
+
+
+	//grab global lock while testing dev state so it can't change on us.
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	if(!cp_lkm_usb_is_base_attached(cpbdev)){
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		return;
+	}
+
+	//don't want to hold global lock while doing this since don't know how long this will take, see next note
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+
+	//NOTE: if kernel preemption is enabled and the disconnect gets called right here, bad things could happen if the cpdev->udev
+	//  	is released. Fortunately, cp_lkm_usb_disconnect() calls cancel_work_sync() before releasing it. This will either cancel this
+	//  	function if it isn't currently running, or will wait until it exits before returning if it is running. This protects us.
+	
+	list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		/* usb_clear_halt() needs a thread context */
+		if (test_bit (EVENT_TX_HALT, &bep->err_flags)) {
+			cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, bep);
+			status = usb_clear_halt (cpbdev->udev, bep->pipe);
+			DEBUG_TRACE("%s() EVENT_TX_HALT status:%d", __FUNCTION__, status);
+			if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
+				//if (netif_msg_tx_err (dev))
+				//  deverr (dev, "can't clear tx halt, status %d",
+				DEBUG_TRACE("%s() failed EVENT_TX_HALT status:%d", __FUNCTION__, status);
+				//  	status);
+			} else {
+				clear_bit (EVENT_TX_HALT, &bep->err_flags);
+				//if (status != -ESHUTDOWN)
+				//  netif_wake_queue (dev->net);
+			}
+		}
+	}
+
+	list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		if (test_bit (EVENT_RX_HALT, &bep->err_flags)) {
+			cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, bep);
+			status = usb_clear_halt (cpbdev->udev, bep->pipe);
+			DEBUG_TRACE("%s() EVENT_RX_HALT status:%d", __FUNCTION__, status);
+			if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
+				DEBUG_TRACE("%s() failed EVENT_RX_HALT status:%d", __FUNCTION__, status);
+				//if (netif_msg_rx_err (dev))
+				//  deverr (dev, "can't clear rx halt, status %d",
+				//  	status);
+			} else {
+				clear_bit (EVENT_RX_HALT, &bep->err_flags);
+				//grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
+				spin_lock(&cp_lkm_usb_mgr.lock);
+				if (cp_lkm_usb_is_base_attached(cpbdev)){
+					cp_lkm_schedule_rx_restock(cpbdev,bep);
+				}
+				spin_unlock(&cp_lkm_usb_mgr.lock);
+
+			}
+		}
+	}
+	/* tasklet could resubmit itself forever if memory is tight */
+	list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
+		bep = list_entry(entry, struct cp_lkm_base_ep, list);
+		if (test_bit (EVENT_RX_MEMORY, &bep->err_flags)) {
+			DEBUG_TRACE("%s() EVENT_RX_MEMORY", __FUNCTION__);
+	
+			clear_bit (EVENT_RX_MEMORY, &bep->err_flags);
+
+			//grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
+			spin_lock(&cp_lkm_usb_mgr.lock);
+			if (cp_lkm_usb_is_base_attached(cpbdev) && bep->q_cnt == 0){
+				cp_lkm_schedule_rx_restock(cpbdev,bep);
+
+			}
+			spin_unlock(&cp_lkm_usb_mgr.lock);
+		}
+	}
+	//if (test_bit (EVENT_LINK_RESET, &cpdev->flags)) {
+	//  struct driver_info  *info = dev->driver_info;
+	//  int 		retval = 0;
+	//
+	//  clear_bit (EVENT_LINK_RESET, &dev->flags);
+	//  if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
+	//  	devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
+	//  		retval,
+	//  		dev->udev->bus->bus_name, dev->udev->devpath,
+	//  		info->description);
+	//  }
+	//}
+
+	//if (dev->flags)
+	//  devdbg (dev, "kevent done, flags = 0x%lx",
+	//  	dev->flags);
+}
+
+static void cp_lkm_usb_ctrl_complete(struct urb *urb)
+{	
+	unsigned long flags;
+	struct sk_buff  	*skb = (struct sk_buff *) urb->context;
+	struct skb_data 	*entry = (struct skb_data *) skb->cb;
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
+	
+	//remove skb from the list first thing so no other code conext looking at the
+	//list (such as unlink_urbs) can mess with it. 
+	spin_lock_irqsave(&cpbdev->ctrlq.lock, flags);
+	__skb_unlink(skb, &cpbdev->ctrlq);
+	spin_unlock_irqrestore(&cpbdev->ctrlq.lock,flags);
+
+	skb->len = urb->actual_length;
+
+	//skip status and error checking if the device has unplugged
+	if(!cp_lkm_usb_is_base_attached(cpbdev)) {
+		urb->status = -ENODEV;
+		goto ctrl_done;
+	}
+
+	if (urb->status != 0) {
+		switch (urb->status) {
+			case -EPIPE:
+				break;
+
+				/* software-driven interface shutdown */
+			case -ECONNRESET:   	// async unlink
+			case -ESHUTDOWN:		// hardware gone
+				break;
+
+			case -ENODEV:
+				//printk("ctrl fail, no dev\n");
+				break;
+
+			case -EPROTO:
+			case -ETIME:
+			case -EILSEQ:
+				//CA: decided not to throttle on ctrl channel transfers since they are a different beast
+				//if (!timer_pending (&cpdev->rx_delay)) {
+				//    mod_timer (&cpdev->rx_delay, jiffies + THROTTLE_JIFFIES);
+					//if (netif_msg_link (dev))
+					//  devdbg (dev, "tx throttle %d",
+					//  		urb->status);
+				//}
+				//netif_stop_queue (dev->net);
+				break;
+			default:
+				//if (netif_msg_tx_err (dev))
+				//  devdbg (dev, "tx err %d", entry->urb->status);
+				break;
+		}
+	}
+
+ctrl_done:
+	urb->dev = NULL;
+	entry->state = ctrl_done;
+	entry->status = urb->status;
+	entry->urb = NULL;
+	if(urb->setup_packet) {
+		kfree(urb->setup_packet);
+	}
+	//cp_lkm_usb_urb_cnt(-1);
+	usb_free_urb (urb);
+	cp_lkm_usb_done_and_defer_other(cpbdev, skb);
+}
+
+
+static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in)
+{
+	struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	int retval = NET_XMIT_SUCCESS;
+	struct urb *urb = NULL;
+	struct skb_data *entry;
+	unsigned long flags;
+	int pipe;
+	u8* tmp8;
+	u16* tmp16;
+	struct usb_ctrlrequest *req = NULL;
+	
+	if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
+		//printk("%s() no ctxt\n", __FUNCTION__);
+		goto ctrl_done;
+	}
+
+	cpbdev = cpdev->cpbdev;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+
+	if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
+		retval = -ENOMEM;
+		goto ctrl_done;
+	}
+	//cp_lkm_usb_urb_cnt(1);
+
+	if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
+		//cp_lkm_usb_urb_cnt(-1);
+		usb_free_urb(urb);
+		retval = -ENOMEM;
+		goto ctrl_done;
+	}
+
+	//The upper layer driver put all the ctrl stuff at the end of the buffer (in correct le order)
+	//This layer puts it in a separate buffer
+	tmp8 = (u8*)skb_in->data;
+	req->bRequestType = *tmp8;
+	skb_pull(skb_in, 1);
+
+	tmp8 = (u8*)skb_in->data;
+	req->bRequest = *tmp8;
+	skb_pull(skb_in, 1);
+
+	tmp16 = (u16*)skb_in->data;
+	req->wValue = *tmp16;
+	skb_pull(skb_in, 2);
+
+	tmp16 = (u16*)skb_in->data;
+	req->wIndex = *tmp16;
+	skb_pull(skb_in, 2);
+
+	tmp16 = (u16*)skb_in->data;
+	req->wLength = *tmp16;
+	skb_pull(skb_in, 2);
+	//printk("%s() RT:%x, R:%x, V:%x, I:%x, L:%x\n", __FUNCTION__, req->bRequestType, req->bRequest, req->wValue, req->wIndex, req->wLength);
+
+	entry = (struct skb_data *) skb_in->cb;
+	entry->urb = urb;
+	entry->cpbdev = cpbdev;
+	entry->state = ctrl_start;
+	entry->status = 0;
+	entry->bep = NULL;
+	entry->unique_id = cpdev->unique_id;
+
+	if(req->bRequestType & USB_DIR_IN) {
+		DEBUG_TRACE("%s() ctrl in len: %d", __FUNCTION__,skb_in->len);
+		pipe = usb_rcvctrlpipe(cpbdev->udev, 0);
+	}
+	else{
+		DEBUG_TRACE("%s() ctrl out len: %d", __FUNCTION__,skb_in->len);
+		pipe = usb_sndctrlpipe(cpbdev->udev, 0);
+	}
+
+	usb_fill_control_urb(urb, cpbdev->udev, pipe,
+				 (void *)req, skb_in->data, skb_in->len,
+				 cp_lkm_usb_ctrl_complete, skb_in);
+
+	//cp_lkm_usb_cnts(ctrl_start,1);
+	spin_lock_irqsave (&cpbdev->ctrlq.lock, flags);
+	retval = usb_submit_urb (urb, GFP_ATOMIC);
+	switch (retval) {
+		case 0:
+			//net->trans_start = jiffies;
+			//success: queue it
+			__skb_queue_tail (&cpbdev->ctrlq, skb_in);
+			skb_in = NULL;
+			urb = NULL;
+			req = NULL;
+			break;
+		case -ENODEV:
+			break;
+		case -EPROTO:
+		case -EPIPE:
+			break;
+		default:
+			break;
+	}
+	spin_unlock_irqrestore (&cpbdev->ctrlq.lock, flags);
+
+ctrl_done:
+	if(req) {
+		kfree(req);
+	}
+	if(urb) {
+		//cp_lkm_usb_urb_cnt(-1);
+		usb_free_urb(urb);
+	}
+	if(skb_in) {
+		//cp_lkm_usb_cnts(ctrl_start,-1);
+		dev_kfree_skb_any (skb_in);
+	}
+
+	DEBUG_TRACE("%s() retval %d", __FUNCTION__, retval);
+
+	return retval;
+}
+
+
+#define THROTTLE_JIFFIES	(HZ/8)
+/* 
+ * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here. 
+*/ 
+static void cp_lkm_usb_xmit_complete (struct urb *urb)
+{
+	unsigned long flags;
+	struct sk_buff *skb = (struct sk_buff *) urb->context;
+	struct skb_data *entry = (struct skb_data *) skb->cb;
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
+	struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)entry->bep;
+	bool is_data = false;
+	struct cp_lkm_usb_dev* cpdev;
+
+	//remove skb from the list first thing so no other code context looking at the
+	//list (such as unlink_urbs) can mess with it. 
+	spin_lock_irqsave(&cpbdev->out_q.lock,flags);
+	__skb_unlink(skb, &cpbdev->out_q);
+	spin_unlock_irqrestore(&cpbdev->out_q.lock,flags);
+
+	bep->q_cnt--;
+
+	if(bep->ep_num == cpbdev->data_out_bep_num) {
+		is_data = true;
+	}
+
+	// we save mux id of the cpdev that sent each tx pckt. 
+	cpdev = cp_lkm_usb_find_dev(entry->unique_id);
+
+	//skip status and error checking if the device has unplugged
+	if(!cp_lkm_usb_is_base_attached(cpbdev)) {
+		goto xmit_done;
+	}
+
+	if (urb->status != 0) {
+		UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
+		switch (urb->status) {
+			case -EPIPE:
+				//don't have to clear halts on ctrl ep
+				if (bep->ep_num != 0) {
+					cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_TX_HALT);
+				}
+				break;
+
+				/* software-driven interface shutdown */
+			case -ECONNRESET:   	// async unlink
+			case -ESHUTDOWN:		// hardware gone
+				break;
+
+			case -ENODEV:
+				break;
+
+				// like rx, tx gets controller i/o faults during khubd delays
+				// and so it uses the same throttling mechanism.
+			case -EPROTO:
+			case -ETIME:
+			case -EILSEQ:
+				if (!timer_pending (&cpbdev->rx_delay)) {
+					mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
+					//if (netif_msg_link (dev))
+					//  devdbg (dev, "tx throttle %d",
+					//  		urb->status);
+				}
+				//netif_stop_queue (dev->net);
+				break;
+			default:
+				//if (netif_msg_tx_err (dev))
+				//  devdbg (dev, "tx err %d", entry->urb->status);
+				break;
+		}
+	}
+
+xmit_done:
+	entry->state = out_done;
+
+	if(is_data) {
+		//cpdev->dbg_total_tx_irq++;
+		cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_TX);
+	}
+	else{
+		cp_lkm_usb_done_and_defer_other(cpbdev, skb);
+	}
+}
+
+static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb_in, int src, struct cp_lkm_ep* ep)
+{
+	struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	struct cp_lkm_base_ep* bep;
+	int length;
+	int retval = NET_XMIT_SUCCESS;
+	struct urb *urb = NULL;
+	struct skb_data *entry;
+	unsigned long flags;
+	struct sk_buff* skb_out = NULL;
+	int wres;
+
+	if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
+		//printk("%s() no ctxt\n", __FUNCTION__);
+		dev_kfree_skb_any(skb_in);
+		return -1;
+	}
+
+	cpbdev = cpdev->cpbdev;
+	
+	//the network doesn't have a pointer to the ep readily available so he passes in NULL for ep so we can
+	//fetch the well known ep for the data out ep
+	length = 0;
+	if(src == CP_LKM_WRAPPER_SRC_DATA && ep == NULL){
+		ep = cp_lkm_usb_get_ep(cpdev,cpdev->data_out_ep_num);
+		length = skb_in->len;
+	}
+	bep = ep->bep;
+
+	while(1) {
+		skb_out = NULL;
+		urb = NULL;
+		retval = NET_XMIT_SUCCESS;
+
+		//DEBUG_ERROR("%s() wrap it skb_in:%p", __FUNCTION__, skb_in);
+
+		//only use wrappers on the data endpoint
+		if(ep->ep_num == cpdev->data_out_ep_num) {
+			//DEBUG_ERROR("%s() wrap it", __FUNCTION__);
+			//spin_lock_irqsave (&cp_lkm_usb_mgr.lock, flags);
+			wres = cp_lkm_wrapper_send(cpbdev->wrapper_ctxt, src, cpdev->mux_id, skb_in, &skb_out); 
+			skb_in = NULL; //we no longer own skb so null its pointer for future call if we loop
+			//spin_unlock_irqrestore (&cp_lkm_usb_mgr.lock, flags);
+			if (wres == CP_LKM_WRAPPER_RES_ERROR) {
+				DEBUG_ERROR("%s() wrapper error wres:0x%x, skb_out:%p", __FUNCTION__, wres, skb_out);
+				UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
+				retval = -ENOMEM;
+				goto xmit_done;
+			}
+		}
+		else{
+			//Not a data ep, send the skb and then we are done
+			skb_out = skb_in;
+			skb_in = NULL;
+			wres = CP_LKM_WRAPPER_RES_DONE;
+		}
+	
+		//If we get here, send returned either done or again. skb_out can be NULL if there is nothing to
+		//send, so check that first
+		if(NULL == skb_out) {
+//  		  DEBUG_INFO("%s() no wrapped data", __FUNCTION__);
+			goto xmit_done;
+		}
+   
+		if(cp_lkm_is_broadcom && ((uintptr_t)(skb_out->data) & 0x3)) {
+			//broadcom unaligned packets that are multiples of 512 plus 3,4 or 5 bytes (515,516,517,1027,1028,1029,etc)
+			//are corrupted for some reason, so need to copy into an aligned buffer
+			int r = skb_out->len & 0x000001FF; //poor man's mod
+			if (r >= 3 && r <= 5) {
+				struct sk_buff* skb_new = skb_copy_expand(skb_out, 0, 0, GFP_ATOMIC);
+				if(!skb_new) {
+					retval = -ENOMEM;
+					goto xmit_done;
+				}
+				//printk("%s() unaligned: %p, aligned: %p, len: %d, r: %d\n",__FUNCTION__,skb_out->data, skb_new->data, skb_out->len, r);
+				dev_kfree_skb_any(skb_out);
+				skb_out=skb_new;
+			}
+		}
+
+		if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
+			//if (netif_msg_tx_err (dev))
+			//  devdbg (dev, "no urb");
+			DEBUG_ERROR("%s() urb alloc failed", __FUNCTION__);
+			UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
+			retval = -ENOMEM;
+			goto xmit_done;
+		}
+		//cp_lkm_usb_urb_cnt(1);
+		entry = (struct skb_data *) skb_out->cb; 
+		entry->urb = urb;
+		entry->cpbdev = cpbdev;
+		entry->bep = bep;
+		entry->state = out_start;
+		entry->unique_id = cpdev->unique_id;
+		//cp_lkm_usb_cnts(out_start,1);
+	
+		if(bep->type == UE_BULK) {
+			usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb_out->data, 
+							   skb_out->len, cp_lkm_usb_xmit_complete, skb_out);
+		}
+		else{
+			usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb_out->data, skb_out->len, 
+							  cp_lkm_usb_xmit_complete, skb_out, bep->interval);
+		}
+
+		if (!(cpbdev->feature_flags & CP_LKM_FEATURE_NO_ZERO_PACKETS)) {
+			urb->transfer_flags |= URB_ZERO_PACKET;
+		}
+		
+	//    DEBUG_INFO("%s()", __FUNCTION__);
+	//    DEBUG_INFO("%s() send to ep: 0x%x type:%d, pipe:0x%x", __FUNCTION__, ep->ep_num, ep->type, ep->pipe);
+	
+		spin_lock_irqsave (&cpbdev->out_q.lock, flags);
+		retval = usb_submit_urb (urb, GFP_ATOMIC);
+		switch (retval) {
+			case 0:
+				//net->trans_start = jiffies;
+				//success: queue it
+				__skb_queue_tail (&cpbdev->out_q, skb_out);
+				bep->q_cnt++;
+				skb_out = NULL;
+				urb = NULL;
+				if(ep->ep_num == cpdev->data_out_ep_num) {
+					cpbdev->tx_usb_q_count++;
+					if(cpbdev->tx_usb_q_count >= CP_LKM_USB_TX_PAUSE_Q_PKTS){
+						if(!cpbdev->tx_paused) {
+							//pause all cpdevs
+							cp_lkm_usb_dev_pause(cpbdev, true);
+							cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
+						}
+					}
+				}
+				break;
+			case -EPIPE:
+				UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
+				//don't clear halts on ctrl ep
+				if(ep->ep_num != 0) {
+					cp_lkm_usb_defer_kevent(cpbdev, bep, EVENT_TX_HALT);
+				}
+				break;
+			case -ENODEV:
+				break;
+			case -EPROTO:
+			default:
+				//if (netif_msg_tx_err (dev))
+				UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
+				//  devdbg (dev, "tx: submit urb err %d", retval);
+				break;
+		}
+		spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
+
+xmit_done:
+		if (retval) {
+			DEBUG_TRACE("%s() failed to send: %d", __FUNCTION__, retval);
+			//cp_lkm_usb_cnts(out_start,-1);
+		}
+
+		//if these are non null then they weren't sent so free them
+		if (skb_out){
+			dev_kfree_skb_any (skb_out);
+		}
+		if(urb) {
+			//cp_lkm_usb_urb_cnt(-1);
+			usb_free_urb (urb);
+		}
+
+		//Bail out of while loop unless the wrapper asked to be called again
+		if(wres != CP_LKM_WRAPPER_RES_AGAIN) {
+			break; 
+		}
+
+		length = 0;
+
+	}
+	return retval;
+}
+
+static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb)
+{
+	struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
+	struct cp_lkm_usb_base_dev* cpbdev;
+	int res;
+
+	if(NULL == cpdev){
+		DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
+		dev_kfree_skb_any(skb);
+		return -1;
+	}
+	cpbdev = cpdev->cpbdev;
+	if(cpbdev->tx_paused || CP_LKM_USB_ACTIVE != cpdev->state) {
+		DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
+		dev_kfree_skb_any(skb);
+		return -1;
+	}
+	res = cp_lkm_usb_start_xmit_common(ctx, skb, CP_LKM_WRAPPER_SRC_DATA, NULL);
+	return res;
+}
+
+static int cp_lkm_usb_to_cplkm_status(int usb_status)
+{
+	int cplkm_status;
+	switch(usb_status) {
+		case 0:
+			cplkm_status = CP_LKM_STATUS_OK;
+			break;
+		default:
+			//printk("usb err: %d\n", usb_status);
+			cplkm_status = CP_LKM_STATUS_ERROR;
+			break;
+	}
+	return cplkm_status;
+}
+
+static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
+{
+	struct skb_data *entry;
+	struct cp_lkm_msg_hdr hdr;
+	int status;
+	struct cp_lkm_base_ep* bep;
+	struct cp_lkm_usb_dev* cpdev = NULL;
+	struct list_head *tmp, *nxt;
+	struct cp_lkm_ep *ep;
+
+	if(!cp_lkm_usb_is_base_attached(cpbdev)){
+		//printk("%s(), cpbdev: %p not attached. state: %d\n",__FUNCTION__,cpbdev,cpbdev->base_state);
+		dev_kfree_skb_any (skb_in);
+		return;
+	}
+	entry = (struct skb_data *)skb_in->cb;
+	bep = entry->bep;
+
+	//Note: pkts on non-data endpoints when running with clones present a problem because there are no headers on these
+	//      pkts to tell us which clone ep to send this to. Fortunately, the modem stack serializes clone instances so
+	//      only one can be accessing the non-data endpoints at a time. In order to get any responses from the module
+	//      over their endpoint, they must be either listening or have posted a recv.  We use this fact to find the
+	//      ep we need to send the recv back on.
+	list_for_each_safe(tmp, nxt, &bep->eps) {
+		ep = list_entry(tmp, struct cp_lkm_ep, list_bep);
+		if (ep->con_flags & (CP_LKM_USB_LISTEN | CP_LKM_USB_RECV)) {
+			cpdev = ep->cpdev;
+			if (ep->con_flags & CP_LKM_USB_RECV) {
+				//can only have one recv pending on non-data endpoints for a given ep number.
+				//therefor when the clone is done, the base is done
+				ep->con_flags &= ~CP_LKM_USB_RECV;  
+				bep->con_flags &= ~CP_LKM_USB_RECV;			
+			}
+			//printk("%s(), other data cpdev: %p, ep: %p, num: 0x%x, flags: 0x%x\n",__FUNCTION__,cpdev,ep, ep->ep_num,ep->con_flags);
+			break;
+		}
+	}
+
+	if (!cpdev) {
+		//printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
+		dev_kfree_skb_any (skb_in);
+		return;
+	}
+
+	status = cp_lkm_usb_to_cplkm_status(entry->status);
+	//printk("%s() other data uid: %d, ep_num:0x%x, status:%d, len: %d\n", __FUNCTION__, cpdev->unique_id,bep->ep_num, entry->status, skb_in->len);
+
+	memset(&hdr,0,sizeof(hdr));
+	hdr.instance_id = cpdev->unique_id;
+	hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
+	hdr.status = status;
+	hdr.len = skb_in?skb_in->len:0;
+	hdr.arg1 = bep->ep_num;
+	cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in); 
+
+	return;
+}
+
+
+static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
+{
+	struct skb_data *entry;
+	struct cp_lkm_msg_hdr hdr;
+	int status;
+	static struct cp_lkm_usb_dev* cpdev = NULL;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+	if(!cp_lkm_usb_is_base_attached(cpbdev)){
+		dev_kfree_skb_any (skb_in);
+		return;
+	}
+
+	entry = (struct skb_data *)skb_in->cb;
+	cpdev = cp_lkm_usb_find_dev(entry->unique_id);
+	if (!cpdev) {
+		//printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
+		dev_kfree_skb_any (skb_in);
+		return;
+	}
+
+	status = cp_lkm_usb_to_cplkm_status(entry->status);
+	memset(&hdr,0,sizeof(hdr));
+	hdr.instance_id = cpdev->unique_id;
+	hdr.cmd = CP_LKM_USB_CMD_CTRL_RECV;
+	hdr.status = status;
+	hdr.len = skb_in?skb_in->len:0;
+	hdr.arg1 = 0;  //ctrl channel ep is always 0
+
+	cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in); 
+	DEBUG_TRACE("%s() ctrl response status:%d", __FUNCTION__, entry->status);
+
+	return;
+}
+
+
+//This function runs in an interrupt context so it can't be preempted. This means cpdev can't
+//be deleted out from under
+static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
+{
+	struct sk_buff *skb_out;
+	int res;
+	int dst;
+	struct skb_data *entry;
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_base_ep* bep;
+	int ep_num;
+	int mux_id;
+
+	// WARNING: The memory this pointer points to will be freed by the wrapper, so copy everything you need 
+	//  		out of it here before going into the while loop
+	entry = (struct skb_data *)skb_in->cb;
+	bep = entry->bep;
+	ep_num = bep->ep_num;
+
+	//printk("%s() cpbdev: %p, bep: %p base_state: %d\n", __FUNCTION__, cpbdev, bep, cpbdev->base_state);
+
+	if(!cp_lkm_usb_is_base_attached(cpbdev)){
+		dev_kfree_skb_any (skb_in);
+		return;
+	}
+
+	while(1) {
+		skb_out = NULL;
+
+		mux_id = 0;
+
+		res = cp_lkm_wrapper_recv(cpbdev->wrapper_ctxt, &dst, &mux_id, skb_in, &skb_out);
+
+		if (dst != CP_LKM_WRAPPER_DST_CTRL && dst != CP_LKM_WRAPPER_DST_DATA) {
+			// this is something other than data that we don't know what to do with, so drop it.
+			goto recv_done;
+		}
+
+		cpdev = cp_lkm_usb_find_muxed_dev(cpbdev, mux_id);
+
+		skb_in = NULL;
+	
+		if (NULL == cpdev) {
+			//LOG("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
+			DEBUG_WARN("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
+			goto recv_done;
+		}
+
+		if(res == CP_LKM_WRAPPER_RES_ERROR) {
+			UPDATE_STATS(cpdev->edi->pm_stats64_ctx, rx_dropped, 1);
+			goto recv_done;
+		}
+
+		//printk("%s() cpdev: %p, ep_num: 0x%x, dst: %d, mux_id: %d, state: %d, res: %d\n", __FUNCTION__, cpdev, ep_num, dst, mux_id, cpdev->state, res);
+
+		//DEBUG_INFO("%s() while() - skb_out:%p, dst:%d, res:%d", __FUNCTION__, skb_out, dst, res);
+
+		//if nothing to send, see if we can bail or if need to call again
+		if(NULL == skb_out){
+			goto recv_done;
+		}
+	
+		if(dst == CP_LKM_WRAPPER_DST_CTRL) {
+			//printk("%s() ctrl pkt cpdev: %p\n", __FUNCTION__, cpdev);
+			if (skb_out->len) { // watch for 0 length short packets
+				struct cp_lkm_msg_hdr hdr;
+
+				DEBUG_TRACE("%s() recv app pkt", __FUNCTION__);
+				memset(&hdr,0,sizeof(hdr));
+				hdr.instance_id = cpdev->unique_id;
+				hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
+				hdr.status = CP_LKM_STATUS_OK;
+				hdr.len = skb_out->len;
+				hdr.arg1 = ep_num;
+
+				cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_out); 
+				skb_out = NULL;
+			}
+		}
+		//dst == CP_LKM_WRAPPER_DST_DATA
+		else{ 
+			//printk("%s() data pkt cpdev: %p\n", __FUNCTION__, cpdev);
+			if (skb_out->len && cpdev->edi->pm_recv){   			 
+				//printk("%s() data pkt send to pm cpdev: %p, first byte: 0x%x\n", __FUNCTION__, cpdev, skb_out->data[0]);
+				cpdev->edi->pm_recv(cpdev->edi->pm_recv_ctx, skb_out);
+				skb_out = NULL;
+			}
+		}
+
+recv_done:
+		if(skb_out) {
+			dev_kfree_skb_any(skb_out);
+		}
+
+		//if wrapper didn't ask to be called back, then done
+		if(res != CP_LKM_WRAPPER_RES_AGAIN) {
+			break;
+		}
+
+	}
+
+	return;
+}
+
+/* 
+ * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here. 
+*/ 
+static void cp_lkm_usb_recv_complete (struct urb *urb)
+{
+	unsigned long flags;
+	struct sk_buff *skb = (struct sk_buff *) urb->context;
+	struct skb_data *entry = (struct skb_data *) skb->cb;
+	struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
+	struct cp_lkm_usb_dev* cpdev_stats_only;
+	int urb_status = urb->status;
+	struct cp_lkm_base_ep* bep = entry->bep;
+	bool is_data = false;
+	//if(urb->status) {
+	//  printk("recv_done: status: %d, len:%d\n", urb->status, urb->actual_length);
+	//}
+
+	// we don't know what cpdev recv packets are destined for when running muxed clones, so report all errors
+	// to the base device (for non cloned cases, this will always be the correct cpdev)
+	cpdev_stats_only = cp_lkm_usb_find_dev(cpbdev->base_id);
+
+	//remove skb from the list first thing so no other code conext looking at the
+	//list (such as unlink_urbs) can mess with it. 
+	spin_lock_irqsave(&cpbdev->in_q.lock,flags);
+	__skb_unlink(skb, &cpbdev->in_q);
+	spin_unlock_irqrestore(&cpbdev->in_q.lock,flags);
+
+	skb_put (skb, urb->actual_length);
+	if(bep->ep_num == cpbdev->data_in_bep_num) {
+		is_data = true;
+		entry->state = in_data_done;
+		//note we don't decrement the data ep cnt until we process the pkt
+	} else{
+		bep->q_cnt--;
+		entry->state = in_other_done;
+	}
+	entry->status = urb->status;
+
+	//skip status and error checking if the device has unplugged
+	if(!cp_lkm_usb_is_base_attached(cpbdev)) {
+		entry->status = -ENODEV;
+		goto recv_done;
+	}
+
+	switch (urb_status) {
+		// success
+		case 0:
+			break;
+
+			// stalls need manual reset. this is rare ... except that
+			// when going through USB 2.0 TTs, unplug appears this way.
+			// we avoid the highspeed version of the ETIMEOUT/EILSEQ
+			// storm, recovering as needed.
+		case -EPIPE:
+			UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
+			//don't clear halts on ctrl ep
+			if(bep->ep_num != 0) {
+				cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
+			}
+			goto block;
+
+			// software-driven interface shutdown
+		case -ECONNRESET:   	// async unlink
+		case -ESHUTDOWN:		// hardware gone
+			goto block;
+
+		case -ENODEV:
+			//printk("recv_done nodev:%d\n", ENODEV);
+			goto block;
+
+			// we get controller i/o faults during khubd disconnect() delays.
+			// throttle down resubmits, to avoid log floods; just temporarily,
+			// so we still recover when the fault isn't a khubd delay.
+		case -EPROTO:
+		case -ETIME:
+		case -EILSEQ:
+			UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
+			if (!timer_pending (&cpbdev->rx_delay)) {
+				mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
+			}
+block:
+			if(bep->ep_num == cpbdev->data_in_bep_num) {
+				bep->q_cnt--;
+				entry->state = in_data_cleanup;
+			}
+			else{
+				entry->state = in_other_cleanup;
+			}
+
+			break;
+
+			// data overrun ... flush fifo?
+		case -EOVERFLOW:
+			UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_over_errors, 1);
+
+			// FALLTHROUGH
+
+		default:
+			if(bep->ep_num == cpbdev->data_in_bep_num) {
+				bep->q_cnt--;
+				entry->state = in_data_cleanup;
+			}
+			else{
+				entry->state = in_other_cleanup;
+			}
+			UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
+			break;
+	}
+
+	// on responses to a requested recv from the app driver, we need to always return something even on error so force it here
+	if(bep->con_flags & CP_LKM_USB_RECV) {
+		if(is_data){
+			entry->state = in_data_done; //this should never happen, data endpoints always listen, they don't post recv's
+		}
+		else{
+			entry->state = in_other_done;
+		}
+	}
+
+recv_done:
+	//do not use the 'entry' struct after this call. It is part of the skb and the skb will be freed when the _bh function runs.
+	//if you need something from it save it off before calling this
+	if(is_data) {
+		//cpdev->dbg_total_rx_irq++;
+		//printk("%s(), got data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
+		cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_RX);
+	}
+	else{
+		//printk("%s(), got other data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
+		cp_lkm_usb_done_and_defer_other(cpbdev, skb);
+	}
+}
+
+//static int g_num_adjusts = 0;
+//static int g_num_recv_pkts = 0;
+//static int g_num_iters = 0;
+static int cp_lkm_usb_submit_recv(struct cp_lkm_usb_base_dev* cpbdev , struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data)
+{
+	struct sk_buff  *skb;
+	struct skb_data *entry;
+	int 			retval = 0;
+	unsigned long   lockflags;
+	size_t size;
+	int hdr_size = 0;
+	int hdr_offset = 0;
+	int pad = 0;  //some platforms require alignment override. pad takes care of that.
+
+	//g_num_recv_pkts++;
+	//g_num_iters++;
+	//if(g_num_iters > 10000){
+	//	printk("%s() num pkts: %d, num adjusts: %d\n",__FUNCTION__,g_num_recv_pkts,g_num_adjusts);
+	//	g_num_iters = 0; 
+	//}
+	size = bep->max_transfer_size;
+	if (data) {
+		hdr_size = cpbdev->pm_hdr_size;
+		hdr_offset = cpbdev->pm_hdr_offset;
+	}
+
+	if(cp_lkm_is_broadcom && (hdr_offset & 0x3)) {
+		//Jira issue FW-14929: On broadcom, we have to keep the buffers four byte aligned else the USB block 
+		//corrupts the data (no idea why).
+		//Round up the hdr_offset to nearest 4 byte boundary. This means pkts may not be aligned as expected,
+		//so recieve function will need to either realign with a copy, or send up to the stack unaligned 
+		// See cp_lkm_pm_net_recv() to see how we decided to deal with it (subject to change).
+		pad = 4 - (hdr_offset&0x3);
+		//g_num_adjusts++;
+	}
+
+	if ((skb = alloc_skb (size+hdr_size+pad, flags)) == NULL) {
+		//if (netif_msg_rx_err (dev))
+		//    devdbg (dev, "no rx skb");
+		cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
+		return -ENOMEM;
+	}
+	if (data) {
+		skb_reserve(skb, hdr_offset+pad);
+		//printk("%s(), data: %p, len: %d, whs:%d, hs:%d, ho:%d\n",__FUNCTION__,skb->data,skb->len,wrapper_hdr_size,hdr_size,hdr_offset);
+	}
+	entry = (struct skb_data *) skb->cb;
+	entry->urb = urb;
+	entry->cpbdev = cpbdev;
+	if(data) {
+		entry->state = in_data_start;
+	}
+	else{
+		entry->state = in_other_start;
+	}
+
+	entry->status = 0;
+	entry->bep = bep;
+
+	if(bep->type == UE_BULK) {
+		usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb->data, size, 
+						   cp_lkm_usb_recv_complete, skb);
+	}
+	else{
+		usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb->data, size, 
+						  cp_lkm_usb_recv_complete, skb, bep->interval);
+	}
+	//cp_lkm_usb_cnts(entry->state,1);
+	spin_lock_irqsave (&cpbdev->in_q.lock, lockflags);
+	if (cp_lkm_usb_is_base_attached(cpbdev) && !test_bit (EVENT_RX_HALT, &bep->err_flags)) {
+		DEBUG_TRACE("%s() ep:0x%x, size:%d, type:%d, pipe:0x%x",__FUNCTION__, bep->ep_num, size, bep->type, bep->pipe);
+		retval = usb_submit_urb (urb, GFP_ATOMIC);
+		switch (retval) {
+			case -EPIPE:
+				//don't clear halts on ctrl ep
+				if(bep->ep_num != 0) {
+					cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
+				}
+				break;
+			case -ENOMEM:
+				cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
+				break;
+			case -ENODEV:
+				//if (netif_msg_ifdown (dev))
+				//  devdbg (dev, "device gone");
+				//netif_device_detach (dev->net);
+				break;
+			case -EPROTO:
+			default:
+				//if (netif_msg_rx_err (dev))
+				//  devdbg (dev, "rx submit, %d", retval);
+				cp_lkm_schedule_rx_restock(cpbdev,bep);
+				break;
+			case 0:
+				__skb_queue_tail (&cpbdev->in_q, skb);
+				bep->q_cnt++;
+				//if(cpdev->in_q.qlen == 1 && ep->index == CP_LKM_DATA_INDEX){
+				//  printk("rx q empty\n");
+				//}
+
+		}
+	} else {
+		//if (netif_msg_ifdown (dev))
+		//  devdbg (dev, "rx: stopped");
+		retval = -ENOLINK;
+	}
+	spin_unlock_irqrestore (&cpbdev->in_q.lock, lockflags);
+	if (retval) {
+		DEBUG_TRACE("%s() FAILED ep_num:0x%x ep_type:%d, retval: %d",__FUNCTION__, bep->ep_num, bep->type, retval);
+		//cp_lkm_usb_cnts(entry->state,-1);
+		dev_kfree_skb_any (skb);
+	}
+
+	return retval;
+}
+
+
+static int cp_lkm_usb_init(void)
+{
+	DEBUG_TRACE("%s()", __FUNCTION__);
+	memset(&cp_lkm_usb_mgr, 0x00, sizeof(struct cp_lkm_usb_ctx));
+	cp_lkm_usb_mgr.common.open = cp_lkm_usb_open;
+	cp_lkm_usb_mgr.common.close = cp_lkm_usb_close;
+	cp_lkm_usb_mgr.common.handle_msg = cp_lkm_usb_handle_msg;
+	cp_lkm_usb_mgr.common.handle_ioctl = cp_lkm_usb_handle_ioctl;
+	INIT_LIST_HEAD(&cp_lkm_usb_mgr.dev_list);
+
+	cp_lkm_common_ctx_init(&cp_lkm_usb_mgr.common);
+
+	spin_lock_init(&cp_lkm_usb_mgr.lock);
+	//sema_init(&cp_lkm_usb_mgr.thread_sem, 1);
+
+	if(!strcmp(PRODUCT_PLATFORM, "brcm_arm")) {
+		LOG("cp_lkm: Broadcom platform");
+		cp_lkm_is_broadcom = 1;
+	}
+
+	LOG("cp_lkm: Product chipset %s",PRODUCT_INFO_CHIPSET);
+	LOG("cp_lkm: Product platform %s",PRODUCT_PLATFORM);
+
+	//Things work better if the napi weight here matchs the global weight set in service_manager/services/firewall.py
+	//This is even true if we don't use napi here since ethernet on some platforms use it
+	if ((strcmp(PRODUCT_PLATFORM,"ramips")==0) && (strcmp(PRODUCT_INFO_CHIPSET, "3883")!=0)){
+		//all ralink (mediatek) platforms except for 3883 use the low settings
+		//use_high = false;
+		CP_LKM_PM_NAPI_WEIGHT = 32;
+	}
+	else{
+		//use_high = true;
+		CP_LKM_PM_NAPI_WEIGHT = 64;
+	}
+
+	//set up default settings for all platforms
+	CP_LKM_USB_NAPI_MAX_WORK = CP_LKM_PM_NAPI_WEIGHT;
+	CP_LKM_USB_MAX_RX_QLEN = CP_LKM_USB_NAPI_MAX_WORK;
+	CP_LKM_USB_MAX_OTHER_QLEN = 2;
+	CP_LKM_USB_TX_PAUSE_Q_PKTS = CP_LKM_USB_NAPI_MAX_WORK;   
+	CP_LKM_USB_TX_RESUME_Q_PKTS = CP_LKM_USB_TX_PAUSE_Q_PKTS/4;
+	CP_LKM_USB_TX_SCHED_CNT = 1;
+	CP_LKM_USB_RX_SCHED_CNT = 1;  
+	CP_LKM_USB_RESTOCK_MULTIPLE = 1; //restock rx as we process them
+	CP_LKM_USB_TASKLET_CNT = 10;
+	CP_LKM_USB_WORKQUEUE_CNT = 5;
+	CP_LKM_USB_PROCESS_DIVISOR = 4;
+
+	LOG("cp_lkm: Processor: %s, Max work: %d, NAPI budget: %d, QLEN: %d.",PRODUCT_INFO_CHIPSET, CP_LKM_USB_NAPI_MAX_WORK, CP_LKM_PM_NAPI_WEIGHT, CP_LKM_USB_MAX_RX_QLEN);
+
+	return 0;
+
+}
+
+static int cp_lkm_usb_cleanup(void)
+{
+	//module is unloading, clean up everything
+	// empty pending posted messages
+	cp_lkm_cleanup_msg_list(&cp_lkm_usb_mgr.common);
+	
+	cp_lkm_usb_close(&cp_lkm_usb_mgr.common);
+	return 0;
+}
+
+static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx)
+{
+	//struct cp_lkm_usb_ctx* mgr;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+	//mgr = (struct cp_lkm_usb_ctx*)ctx;
+
+	return 0;
+}
+
+static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx)
+{
+	//unsigned long flags;
+	//struct cp_lkm_usb_dev* cpdev;
+	//struct cp_lkm_usb_close_intf ci;
+	//struct cp_lkm_usb_unplug_intf ui;
+	LOG("%s() called unexpectedly.", __FUNCTION__);
+
+	//NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
+	//                          things are in a bad state and the router will be rebooting. We decided not 
+	//                          to clean things up here because this code got into an infinite loop in 
+	//                          certain fail situations, which prevented the router from rebooting. 
+	//                          Revisit if close ever becomes a normal event.
+
+	/*
+	while(1) {
+		spin_lock(&cp_lkm_usb_mgr.lock);
+
+		cpdev = cp_lkm_usb_get_head_dev();
+
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		if(!cpdev) {
+			return 0;
+		}
+
+		//TODO - when this closed we have a modem plugged, we will be deleting the top half of the driver while the bottom half is 
+		//  	 still plugged. Figure out how to force the driver to disconnect the modem
+		ci.unique_id = cpdev->unique_id;
+		cp_lkm_usb_close_intf(&ci);
+
+		//the unplug removes the device from the list which prevents us from infinite looping here
+		ui.unique_id = cpdev->unique_id;
+		cp_lkm_usb_unplug_intf(&ui);
+	}
+
+	cp_lkm_cleanup_msg_list(ctx);
+	*/
+	return 0;
+}
+
+static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
+{
+	int retval = -1;
+	struct cp_lkm_ep* ep;
+	struct cp_lkm_usb_dev* cpdev;
+	struct cp_lkm_usb_base_dev* cpbdev;
+
+	//grab lock to protect global device list before searching (don't want to search it if another thread is adding or removing a cpdev)
+	spin_lock(&cp_lkm_usb_mgr.lock);
+	cpdev = cp_lkm_usb_find_dev(hdr->instance_id);
+
+	//grab thread semaphore so disconnect can't run and delete the cpdev while we are running here
+	if(!cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
+		spin_unlock(&cp_lkm_usb_mgr.lock);
+		dev_kfree_skb_any (skb);
+		//printk("%s() no device or no probe yet\n", __FUNCTION__);
+		return 0;
+	}
+	cpbdev = cpdev->cpbdev;
+	switch(hdr->cmd) {
+		case CP_LKM_USB_CMD_DATA_SEND:
+			{
+				ep = cp_lkm_usb_get_ep(cpdev, hdr->arg1);
+				if(ep) {
+					//printk("%s(), send other data cpbdev: %p, cpdev: %p, bep: %p, ep: %p, num: 0x%x\n",__FUNCTION__,cpdev->cpbdev,cpdev,ep->bep,ep,ep->ep_num);
+					retval = cp_lkm_usb_start_xmit_common(cpdev, skb, CP_LKM_WRAPPER_SRC_CTRL, ep);
+					skb = NULL;
+				}
+				else{
+					DEBUG_TRACE("%s() Invalid EP number 0x%x", __FUNCTION__, hdr->arg1);
+					retval = -1;
+				}
+			}
+			break;
+		case CP_LKM_USB_CMD_CTRL_SEND:
+			{
+				retval = cp_lkm_usb_start_ctrl_xmit(cpdev, skb);
+				skb = NULL;
+			}
+			break;
+	}
+
+	spin_unlock(&cp_lkm_usb_mgr.lock);
+
+	if(skb) {
+		dev_kfree_skb_any (skb);
+	}
+	return retval;
+}
+
+static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
+{
+	int retval = -1;
+	//printk("%s(), cmd:0x%x\n", __FUNCTION__, _IOC_NR(cmd));
+
+	switch(cmd) {
+	case CP_LKM_IOCTL_USB_PLUG_INTF:
+		{
+			struct cp_lkm_usb_plug_intf* pi = (struct cp_lkm_usb_plug_intf*)k_argp;
+			retval = cp_lkm_usb_plug_intf(pi);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_SET_WRAPPER:
+		{
+			struct cp_lkm_usb_set_wrapper* sw = (struct cp_lkm_usb_set_wrapper*)k_argp;
+			retval = cp_lkm_usb_set_wrapper(sw);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_SET_MUX_ID:
+		{
+			struct cp_lkm_usb_set_mux_id* smi = (struct cp_lkm_usb_set_mux_id*)k_argp;
+			retval = cp_lkm_usb_set_mux_id(smi);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_OPEN_INTF:
+		{
+			struct cp_lkm_usb_open_intf* oi = (struct cp_lkm_usb_open_intf*)k_argp;
+			retval = cp_lkm_usb_open_intf(oi);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_CLOSE_INTF:
+		{
+			struct cp_lkm_usb_close_intf* ci = (struct cp_lkm_usb_close_intf*)k_argp;
+			retval = cp_lkm_usb_close_intf(ci);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_UNPLUG_INTF:
+		{
+			struct cp_lkm_usb_unplug_intf* ui = (struct cp_lkm_usb_unplug_intf*)k_argp;
+			retval = cp_lkm_usb_unplug_intf(ui);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_EP_ACTION:
+		{
+			struct cp_lkm_usb_ep_action* ea = (struct cp_lkm_usb_ep_action*)k_argp;
+			retval = cp_lkm_usb_ep_action(ea);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_PM_LINK:
+		{
+			struct cp_lkm_usb_pm_link *upl = (struct cp_lkm_usb_pm_link *)k_argp;
+			retval = cp_lkm_usb_pm_link(upl);
+		}
+		break;
+	case CP_LKM_IOCTL_USB_IS_ALIVE_INTF:
+		{
+			struct cp_lkm_usb_is_alive_intf* alivei = (struct cp_lkm_usb_is_alive_intf*)k_argp;
+			retval = cp_lkm_usb_is_alive_intf(alivei);
+		}
+	}
+
+	return retval;
+}
+
+
+/*******************************  kernel module PM instance functionality **********************************/
+struct cp_lkm_pm_ctx {
+	struct cp_lkm_common_ctx common;
+	struct list_head pm_list;
+	spinlock_t pm_list_lock;
+};
+
+struct cp_lkm_pm_ctx cp_lkm_pm_mgr;
+
+
+static void cp_lkm_pm_filter_empty_list(struct cp_lkm_pm_common *pm)
+{
+
+	struct cp_lkm_pm_filter *filter;
+	struct list_head *entry, *tmp;
+
+	list_for_each_safe(entry, tmp, &pm->filter_list) {
+		filter = list_entry(entry, struct cp_lkm_pm_filter, list);
+		list_del(&filter->list);
+		kfree(filter);
+	}
+}
+
+static bool cp_lkm_pm_filter_ok(struct cp_lkm_pm_common *pm, unsigned char *buf, unsigned int buf_len)
+{
+	bool allow = true; // default allow the egress packet
+
+	struct list_head *pos;
+
+	struct in_device *in_dev;
+	struct in_ifaddr *ifa;
+	struct iphdr *ipv4_hdr;
+	u32 ipv4_src_addr = 0;
+	u32 ipv4_net_addr = 0;
+	u32 ipv4_net_mask = 0;
+
+	ipv4_hdr = (struct iphdr *)buf;
+
+	// these are the include filters (white list) - exclude filters (black list) are not currently supported
+	// exclude filters may need to be processed in another loop through the filters
+	list_for_each(pos, &pm->filter_list) {
+		struct cp_lkm_pm_filter *filter = list_entry(pos, struct cp_lkm_pm_filter, list);
+		switch(filter->type) {
+		case CP_LKM_PM_FILTER_TYPE_IP_SRC_WAN_SUBNET_INCLUDE:
+			if (4 == ipv4_hdr->version) {
+				// ipv4
+				allow = false;
+				ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
+				if(ipv4_src_addr == 0){
+					//DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
+					allow = true;
+				}
+				else{
+					// get network device IP address and check against src packet ip address
+					rcu_read_lock();
+					in_dev = rcu_dereference(pm->net_dev->ip_ptr);
+					// in_dev has a list of IP addresses (because an interface can have multiple - check them all)
+					for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) {
+						ipv4_net_addr = __be32_to_cpu(ifa->ifa_local);
+						ipv4_net_mask = __be32_to_cpu(ifa->ifa_mask);
+						if ((ipv4_net_addr & ipv4_net_mask) == (ipv4_src_addr & ipv4_net_mask)) {
+							// allow the packet
+							allow = true;
+							break;
+						}
+					}
+					rcu_read_unlock();
+				}
+			}/* benk needs to be tested before ok to execute
+			 else if (6 == ipv4_hdr->version) {
+				struct in6_addr *addr = (struct in6_addr *)&buf[2 * sizeof(u32)];
+				if (ipv6_chk_prefix(addr, pm->net_dev)) {
+					allow = true;
+				}
+			} */
+			break;
+		case CP_LKM_PM_FILTER_TYPE_IP_SRC_SUBNET_INCLUDE:
+			if (4 == ipv4_hdr->version) {
+				// ipv4
+				allow = false;
+				ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
+				if(ipv4_src_addr == 0){
+					//DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
+					allow = true;
+				}
+				else if ((filter->subnet.ipv4_addr & filter->subnet.ipv4_mask) == (ipv4_src_addr & filter->subnet.ipv4_mask)) {
+					allow = true;
+				}
+			}
+	
+		default:
+			break;
+		}
+
+		if (allow) {
+			break;
+		}
+	}
+	
+	if (!allow) {
+		DEBUG_WARN("%s() dropping packet - src:0x%x\n", __FUNCTION__, ipv4_src_addr);
+	} 
+
+	return allow;
+}
+/*******************************  kernel module pm common functionality **********************************/
+int cp_lkm_common_init(struct cp_lkm_pm_common *pmc)
+{
+	// allocate stats struct
+	pmc->pcpu_stats64 = netdev_alloc_pcpu_stats(struct cp_lkm_pm_stats64);
+	if (!pmc->pcpu_stats64) {
+		return -ENOMEM;
+	}
+
+
+	pmc->pm_link_count = 0;
+	spin_lock_init(&pmc->pm_link_lock);
+	INIT_LIST_HEAD(&pmc->filter_list);
+
+	return 0;
+}
+
+void cp_lkm_common_deinit(struct cp_lkm_pm_common *pmc)
+{
+	if (!pmc->pcpu_stats64) {
+		return;
+	}
+	free_percpu(pmc->pcpu_stats64);
+	pmc->pcpu_stats64 = NULL;
+}
+// The pm_link_lock is used to coordinate activity between xmit, poll, and link/unlink
+// It is okay to poll and xmit at the same time, but we don't want to do either if we are linking or unlinking.
+// link/unlink sets the pm_link_count negative to block both poll and xmit. If pm_link_count is not negative then
+// both poll and xmit are free to grab the link at any time and at the same time.
+//retval: 
+//  0 = you have the token, proceed
+// -1 = you don't have the token, do not pass go 
+int cp_lkm_common_inc_link_lock(struct cp_lkm_pm_common* pmc)
+{
+	unsigned long flags;
+	int retval = 0;
+	spin_lock_irqsave(&pmc->pm_link_lock, flags);
+	if(pmc->pm_link_count < 0) {
+		retval = -1;
+	}
+	else{
+		pmc->pm_link_count++;
+	}
+	spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
+	return retval;
+}
+
+int cp_lkm_common_dec_link_lock(struct cp_lkm_pm_common* pmc)
+{
+	unsigned long flags;
+	int retval = 0;
+	spin_lock_irqsave(&pmc->pm_link_lock, flags);
+	if(pmc->pm_link_count > 0) {
+		pmc->pm_link_count--;
+	}
+	else{
+		//should never hit this
+		retval = -1;
+	}
+	spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
+	return retval;
+}
+
+/*******************************  kernel module net PM functionality **********************************/
+
+// common structure for ethernet and IP protocol managers
+struct cp_lkm_pm_net {
+	struct cp_lkm_pm_common common;
+	struct ethhdr eth_hdr;
+
+};
+
+static struct rtnl_link_stats64 *cp_lkm_pm_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+	struct cp_lkm_pm_net *pm_net;
+	int i;
+	struct cp_lkm_pm_stats64 *pstats;
+
+	pm_net = netdev_priv(netdev);
+
+	for_each_possible_cpu(i) {
+		u64 rx_packets, rx_bytes, rx_errors, rx_dropped, rx_over_errors;
+		u64 tx_packets, tx_bytes, tx_errors, tx_dropped;
+		unsigned int start;
+		pstats = per_cpu_ptr(pm_net->common.pcpu_stats64, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&pstats->syncp);
+			rx_packets = pstats->rx_packets;
+			tx_packets = pstats->tx_packets;
+			rx_bytes = pstats->rx_bytes;
+			tx_bytes = pstats->tx_bytes;
+			rx_errors = pstats->rx_errors;
+			tx_errors = pstats->tx_errors;
+			rx_dropped = pstats->rx_dropped;
+			tx_dropped = pstats->tx_dropped;
+			rx_over_errors = pstats->rx_over_errors;
+		} while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
+
+		stats->rx_packets += rx_packets;
+		stats->tx_packets += tx_packets;
+		stats->rx_bytes += rx_bytes;
+		stats->tx_bytes += tx_bytes;
+		stats->rx_errors += rx_errors;
+		stats->tx_errors += tx_errors;
+		stats->rx_dropped += rx_dropped;
+		stats->tx_dropped += tx_dropped;
+		stats->rx_over_errors += rx_over_errors;
+	}
+
+	return stats;
+}
+
+static int cp_lkm_pm_net_open(struct net_device *dev)
+{
+	struct cp_lkm_pm_net *pm_net;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+
+	pm_net = netdev_priv(dev);
+	netif_start_queue(dev);
+
+	// is this link up?
+	return 0;
+}
+
+static int cp_lkm_pm_net_close(struct net_device *dev)
+{
+	struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
+	struct cp_lkm_msg_hdr hdr;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+
+	// link change
+	netif_stop_queue(dev);
+
+	// post message to indicate link down
+	memset(&hdr,0,sizeof(hdr));
+	hdr.instance_id = pm_net->common.unique_id;
+	hdr.cmd = CP_LKM_PM_LINK_DOWN;
+	hdr.status = CP_LKM_STATUS_OK;
+	cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
+	LOG("Link Down indicated - id:%d\n", hdr.instance_id);
+
+
+	return 0;
+}
+
+static int cp_lkm_pm_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
+	bool filter_ok = true;
+	int link_res;
+
+	//see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
+	link_res = cp_lkm_common_inc_link_lock(&pm_net->common);
+	if(link_res < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (!pm_net->common.edi) {
+		// cannot do anything without edi
+		dev_kfree_skb_any(skb);
+		goto net_xmit_done;
+	}
+
+	//DEBUG_INFO("%s() - %s len:%d", __FUNCTION__, pm_net->common.net_dev->name, skb->len);
+	UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_bytes, (skb->len - sizeof(struct ethhdr)));
+	UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_packets, 1);
+	/* Drop packet if interface is not attached */
+	if (0 == pm_net->common.attached)
+		goto drop;
+
+	if (!pm_net->common.edi->usb_send) {
+		goto drop;
+	}
+
+	filter_ok = cp_lkm_pm_filter_ok(&pm_net->common, skb->data + sizeof(struct ethhdr), skb->len - sizeof(struct ethhdr));
+	if (!filter_ok) {
+		pm_net->common.filter_drop_cnt++;
+		DEBUG_WARN("%s() filter dropped packet cnt:%u", __FUNCTION__, pm_net->common.filter_drop_cnt);
+		goto drop;
+	}
+	
+	switch(pm_net->common.type) {
+	case CP_LKM_PM_TYPE_IP_DHCP:
+	case CP_LKM_PM_TYPE_IP_STATIC:
+		skb_pull(skb, sizeof(struct ethhdr)); // strip off the ethernet header
+		break;
+	default:
+		break;
+	}
+ 
+	// send data to USB module
+	pm_net->common.edi->usb_send(pm_net->common.edi->usb_send_ctx, skb);
+	goto net_xmit_done;
+
+drop:
+	DEBUG_INFO("%s() - dropped", __FUNCTION__);
+	UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_dropped, 1);
+	dev_kfree_skb_any(skb);
+
+net_xmit_done:
+	cp_lkm_common_dec_link_lock(&pm_net->common);
+	return NETDEV_TX_OK;
+}
+
+
+#if 0
+static u8 cp_lkm_pm_test_find(u8* pkt, u32 pkt_len, u8* pattern, u32 pattern_len)
+{
+	s32 i;
+	for(i = 0; i < (pkt_len - pattern_len); i++) {
+		if (memcmp(&pkt[i],pattern,pattern_len) == 0) {
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int cp_lkm_pm_test(struct sk_buff *skb)
+{
+static u8 first_pkt = 1;
+static u8 started = 0;
+static unsigned long total_data = 0;
+static unsigned long start_time = 0;
+static unsigned long stop_time = 0;
+
+static unsigned long invalid_pkts = 0;
+static unsigned long total_pkts = 0;
+
+	int drop = 0;
+	unsigned char *ptr = skb->data;
+	u32 pkt_len = skb->len;
+	u8 prot;
+	//u8 type;
+	u16 udp_len;
+	u16 dst_port;
+
+	if (pkt_len < 20) {
+		return 0;
+	}
+	//function is set up to parse IP pkts, may be called with ether framed pkts as well.
+	//auto detect ether hdr and remove it
+	if (ptr[0] != 0x45) {
+		//ether header
+		if(ptr[14] == 0x45){
+			ptr+=14;
+			pkt_len -= 14;
+		}
+		//vlan hdr
+		else if (ptr[12] == 0x81 && ptr[18] == 0x45) {
+			ptr+=18;
+			pkt_len -=18;
+		}
+	}
+
+	if (ptr[0] != 0x45) {
+		invalid_pkts++;
+	}
+
+	//printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
+	//printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
+	if (pkt_len >= 28) {
+		prot = ptr[9];
+		if (prot == 0x11) {
+			ptr += 20; //skip ip header
+			pkt_len -= 20;
+			dst_port = ntohs(*((u16*)(&ptr[2])));
+			udp_len = ntohs(*((u16*)(&ptr[4])));
+			//printk("Got UDP pkt\n");
+			if (started && dst_port == 5001) {
+				drop = 1;
+				if (first_pkt == 1) {
+					first_pkt = 0;
+					total_data = 0;
+					start_time = jiffies;
+					invalid_pkts = 0;
+					total_pkts = 0;
+				}
+				total_data += (udp_len+34); //add ip and ether hdrs
+				stop_time = jiffies;
+				total_pkts++;
+			}
+			else if(dst_port == 5002) {
+				drop = 1;
+				ptr += 8; //skip udp header
+				printk("SHIM START PORT len: %d data: 0x%x, start=%x, stop=%x\n",udp_len, ptr[0], start_time, stop_time);
+				if(cp_lkm_pm_test_find(ptr, udp_len, "START", 5)){
+					printk("Got IPERF START\n");
+					first_pkt = 1;
+					started = 1;
+					cp_lkm_wrapper_start_debug();
+				}
+				else if (cp_lkm_pm_test_find(ptr, udp_len, "STOP", 4)) {
+					u32 delta_time = (stop_time - start_time)*1000/HZ;
+					u32 bits_per_sec = (total_data/delta_time)*8000; //in bytes per milisecond, need bits per second
+					delta_time -= 2; //iperf has 2 second delay waiting for an ack we won't send
+					started = 0;
+					printk("Got IPERF STOP: Total data: %u, Total pkts: %u, Total invalid: %u, Total time: %u msec, BitsPerSec: %u\n",total_data, total_pkts, invalid_pkts, delta_time,bits_per_sec);
+					cp_lkm_wrapper_stop_debug();
+				}
+			}
+		}
+	}
+	return drop;
+}
+#endif
+
+// called in soft interrupt context - otherwise some protection around pm_net is required
+//int num_ip_copies = 0;
+//int num_eth_copies = 0;
+//int num_pkts = 0;
+//int num_iters = 0;
+//int num_unaligned = 0;
+static int cp_lkm_pm_net_recv(void *ctx, struct sk_buff *skb)
+{
+	struct cp_lkm_pm_net *pm_net;
+	int err;
+	int recv_bytes;
+	struct sk_buff *skb_new;
+	int align = 0;  //set to 1 to always send 4 byte aligned IP pkts to network stack
+	int pad = 20;   //number of bytes to put on front of new skbs
+
+	//DEBUG_INFO("%s()", __FUNCTION__);
+	if(NULL == ctx) {
+		dev_kfree_skb_any(skb);
+		return 0;
+	}
+
+	//num_pkts++;
+	//num_iters++;
+	pm_net = (struct cp_lkm_pm_net *)ctx;
+
+	//printk("%s() pm_net: %p\n", __FUNCTION__, pm_net);
+
+
+	skb->dev = pm_net->common.net_dev;
+
+	switch(pm_net->common.type) {
+		case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+        case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+        case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+			//this strips the ether header off the packet
+			skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
+			//Need IP hdr aligned for IP stack to avoid unaligned access interrupts
+			if(align && ((uintptr_t)(skb->data) & 0x3)) {
+				//num_eth_copies++;
+				skb_new = skb_copy_expand(skb, pad, 0, GFP_ATOMIC);
+				dev_kfree_skb_any(skb);
+				skb=skb_new;
+			}
+			if (!skb) {
+				// packet dropped
+				UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
+				return -ENOMEM;
+			}
+			break;
+
+		case CP_LKM_PM_TYPE_IP_DHCP:
+		case CP_LKM_PM_TYPE_IP_STATIC:
+			// Need to add ether header first for processing, then remove it. Need IP hdr aligned when done.
+			// 
+			// Note: avoid the temptation to skip adding the ether header and doing manually what the call 
+			//       to eth_type_trans() does. We did that and it bit us (see Jira issue FW-16149)
+			//       The kernel expects the ether header to be present in the skb buff even though the data ptr 
+			//       has been moved past it. Also, if the skb has been cloned, then we are dealing with an 
+			//       aggregated modem protocol (multiple pkts per skb), so we have to make a copy to guarantee
+			//       our tmp ether header isn't written into the data space of the previous pkt from the set.
+			//  	 
+			if((align && ((uintptr_t)(skb->data) & 0x3)) || (skb_headroom(skb) < ETH_HLEN) || skb_cloned(skb)){
+				//printk("copy: align: %d, head: %d, cloned: %d, len: %d\n", ((uintptr_t)(skb->data) & 0x3), skb_headroom(skb), skb_cloned(skb), skb->len);
+				//num_ip_copies++;
+				skb_new = skb_copy_expand(skb, 16+pad, 0, GFP_ATOMIC);
+				dev_kfree_skb_any(skb);
+				skb=skb_new;
+			}
+
+			if (!skb) {
+				// packet dropped
+				UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
+				return -ENOMEM;
+			}
+
+			if (0x60 == (skb->data[0] & 0xF0)) {  //mask off version bits of first byte of IP packet to check for ip version
+				// set the hdr protocol type to IPV6
+				pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IPV6);
+			} else { 
+				// probably ipv4, but not explicitly checking
+				// set the hdr protocol type to IPV4
+				pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IP);
+			}
+			memcpy(skb_push(skb, sizeof(struct ethhdr)), (unsigned char *)&pm_net->eth_hdr, sizeof(struct ethhdr));
+			//this strips the ether hdr off the packet
+			skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
+			break;
+
+		default:
+			DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, pm_net->common.type);
+			// packet dropped
+			UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_errors, 1);
+			kfree(skb);
+			return NET_RX_DROP;
+	}
+
+	recv_bytes = skb->len;
+
+	//if (cp_lkm_pm_test(skb) == 1) {
+	//  dev_kfree_skb_any(skb);
+	//  return NET_RX_SUCCESS;
+	//}
+
+	//if((int)(skb->data) & 0x3){
+		//printk("Unaligned IP pkt!!!!!!!!!!!!\n");
+		//num_unaligned++;
+	//}
+
+
+	//if(num_iters >= 10000) {
+	//  num_iters = 0;
+	//  printk("num_ip_copies: %d, num_eth_copies: %d, num_unaligned: %d, num_pkts: %d\n",num_ip_copies,num_eth_copies,num_unaligned,num_pkts);
+	//}
+
+	netif_rx(skb);
+	err = NET_RX_SUCCESS;
+
+	UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_packets, 1);
+	UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_bytes, recv_bytes);
+
+	return 0;
+}
+
+
+static void cp_lkm_pm_net_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
+{
+	struct cp_lkm_pm_net *pm_net;
+	int pad;
+	int tmp_size;
+	int pm_hdr = ETH_HLEN;
+	int pm_extra = 6;
+
+	*hdr_size = 0;
+	*hdr_offset = 0;
+
+	pm_net = (struct cp_lkm_pm_net *)ctx;
+	if(!pm_net) {
+		return;
+	}
+	//temp return here
+	//return;
+
+	//calculate how much header space there is before the IP hdr.
+	//this is needed to align the IP hdr properly for optimal performance
+	switch(pm_net->common.type) {
+		case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+        case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+        case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+			//pkts will need room for the wrapper header and the ether hdr.
+			//both headers will be present at the same time.
+			tmp_size = wrapper_hdr_size + pm_hdr + pm_extra;
+			pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
+			*hdr_size = tmp_size + pad;
+			*hdr_offset = pad+pm_extra;
+			break;
+
+		case CP_LKM_PM_TYPE_IP_DHCP:
+		case CP_LKM_PM_TYPE_IP_STATIC:
+			//pkts will need room for the wrapper header or the ether hdr
+			//both headers won't be present at the same time. The wrapper is present
+			//up through the USB side of the shim. We (the pm) add a temp ether header
+			//for processing after the wrapper header is removed
+			tmp_size = max(wrapper_hdr_size, pm_hdr+pm_extra);
+			pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
+			*hdr_size = tmp_size + pad;
+			*hdr_offset = *hdr_size - wrapper_hdr_size;
+			break;
+		default:
+			break;
+	}
+}
+
+
+static u32 cp_lkm_pm_net_get_link(struct net_device *dev)
+{
+	struct cp_lkm_pm_net *pm_net;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+	pm_net = netdev_priv(dev);
+	if(!pm_net) {
+		return 0;
+	}
+	return pm_net->common.attached;
+}
+
+
+#ifndef KERNEL_2_6_21
+static const struct net_device_ops cp_lkm_pm_net_device_ops = {
+	.ndo_open = cp_lkm_pm_net_open,
+	.ndo_start_xmit = cp_lkm_pm_net_xmit,
+	.ndo_stop = cp_lkm_pm_net_close,
+	.ndo_get_stats64 = cp_lkm_pm_get_stats64
+};
+#endif
+
+static const struct ethtool_ops cp_lkm_pm_net_ethtool_ops = {
+	.get_link = cp_lkm_pm_net_get_link,
+};
+
+static void cp_lkm_pm_net_setup(struct net_device *net_dev)
+{
+	struct cp_lkm_pm_net *pm_net;
+
+	DEBUG_INFO("%s()", __FUNCTION__);
+	pm_net = netdev_priv(net_dev);
+	ether_setup(net_dev);
+
+#ifdef KERNEL_2_6_21
+	net_dev->open = cp_lkm_pm_net_open;
+	net_dev->hard_start_xmit = cp_lkm_pm_net_xmit;
+	net_dev->stop = cp_lkm_pm_net_close;
+#else
+	net_dev->netdev_ops = &cp_lkm_pm_net_device_ops;
+	net_dev->needed_headroom = 48; 
+	net_dev->needed_tailroom = 8;
+#endif
+
+	net_dev->ethtool_ops = &cp_lkm_pm_net_ethtool_ops;
+ 
+}
+
+static int cp_lkm_pm_net_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name, unsigned char *mac)
+{
+	int err;
+	struct cp_lkm_pm_net *pm_net;
+	struct net_device *net_dev;
+	unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17,0)
+	net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, NET_NAME_UNKNOWN, cp_lkm_pm_net_setup);
+#else
+	net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, cp_lkm_pm_net_setup);
+#endif
+	if (!net_dev) {
+		DEBUG_INFO("%s() alloc failed: %s", __FUNCTION__, name);
+		return -ENOMEM;
+	}
+
+	pm_net= netdev_priv(net_dev);
+
+	err = cp_lkm_common_init(&pm_net->common);
+	if (err) {
+		free_netdev(net_dev);
+		return err;
+	}
+
+	pm_net->common.net_dev = net_dev;
+	pm_net->common.unique_id = uid;
+	pm_net->common.type = type;
+	pm_net->common.edi = NULL;
+
+	//printk("%s(%p) pm-uid: %d, pm_net: %p\n", __FUNCTION__, mgr, uid, pm_net);
+
+	switch (type) {
+		case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+        case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+        case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+			if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN)) {
+				random_ether_addr(net_dev->dev_addr);
+			} else {
+				memcpy (net_dev->dev_addr, mac, ETH_ALEN);
+			}
+
+            /////////////////////////Need to only do if driver says so.
+            if (type == CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP) {
+                net_dev->flags |= IFF_NOARP; 
+            }
+			break;
+		case CP_LKM_PM_TYPE_IP_DHCP:
+		case CP_LKM_PM_TYPE_IP_STATIC:
+			// random addr for DHCP functionality
+			if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) || !memcmp(mac, "\x00\x30\x44\x00\x00\x00", ETH_ALEN)) {
+				random_ether_addr(net_dev->dev_addr);
+			} else {
+				memcpy (net_dev->dev_addr, mac, ETH_ALEN);
+			}
+
+			net_dev->flags |= IFF_NOARP; 
+			memcpy(pm_net->eth_hdr.h_dest, net_dev->dev_addr, ETH_ALEN);
+			random_ether_addr(pm_net->eth_hdr.h_source);
+			break;
+		default:
+			DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, type);
+			cp_lkm_common_deinit(&pm_net->common);
+			free_netdev(net_dev);
+			return -EINVAL;
+	}
+
+	DEBUG_INFO("%s register netdev", __FUNCTION__);
+	err = register_netdev(net_dev);
+	if (err < 0) {
+		DEBUG_INFO("%s netdev registration error", __FUNCTION__);
+		cp_lkm_common_deinit(&pm_net->common);
+		free_netdev(net_dev);
+		return err;
+	}
+
+	netif_device_attach(pm_net->common.net_dev);
+
+	netif_stop_queue(pm_net->common.net_dev);
+
+	pm_net->common.attached = 1;
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_add(&pm_net->common.list, &mgr->pm_list);
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	return 0;
+}
+ 
+static int cp_lkm_pm_net_detach(struct cp_lkm_pm_ctx *mgr, int uid)
+{
+
+	// find the object in the list
+	struct list_head *pos;
+	struct cp_lkm_pm_common *pm = NULL;
+	unsigned long flags;
+
+	DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_for_each(pos, &mgr->pm_list){
+		struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
+		if(pm_tmp->unique_id == uid) {
+			pm = pm_tmp;
+			break;
+		}
+	}
+
+	if (!pm) {
+		// already detached
+		spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+		DEBUG_INFO("%s() already detached", __FUNCTION__);
+		return 0;
+	}
+
+	// remove the object
+	list_del(&pm->list);
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	if (pm->attached) {
+		DEBUG_INFO("%s() detaching", __FUNCTION__);
+		netif_device_detach(pm->net_dev);
+		pm->attached = 0;
+	}
+
+	unregister_netdev(pm->net_dev);
+
+	// clean the filter list
+	cp_lkm_pm_filter_empty_list(pm);
+
+	cp_lkm_common_deinit(pm);
+	free_netdev(pm->net_dev); // this also frees the pm since it was allocated as part of the net_dev
+
+	return 0;
+}
+
+static int cp_lkm_pm_net_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
+{
+	// find the object in the list
+	struct list_head *pos;
+	struct cp_lkm_pm_common *pm = NULL;
+	unsigned long flags;
+	//printk("%s(%p) activate: %d\n", __FUNCTION__, mgr, activate);
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_for_each(pos, &mgr->pm_list){
+		struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
+		if(pm_tmp->unique_id == uid) {
+			pm = pm_tmp;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	if (!pm) {
+		// couldn't find object - already unplugged
+		DEBUG_INFO("%s() already unplugged", __FUNCTION__);
+		return 0;
+	}
+	
+	if (activate) {
+		//netif_start_queue(pm->net_dev);
+		if (pm->edi) {
+			pm->edi->pm_recv_ctx = pm;
+		}
+		netif_wake_queue(pm->net_dev);
+	} else {
+		netif_stop_queue(pm->net_dev);
+		if (pm->edi) {
+			pm->edi->pm_recv_ctx = NULL;
+			//printk("pm_recv_ctx null\n");
+		}
+
+		// remove the filters - will be added back in before activate
+		cp_lkm_pm_filter_empty_list(pm);
+	}
+
+	return 0;
+}
+
+int cp_lkm_pm_net_pause(void *ctx)
+{
+	struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
+	if(!ctx) {
+		return 0;
+	}
+	netif_stop_queue(pm->net_dev);
+	return 0;
+
+}
+int cp_lkm_pm_net_resume(void *ctx)
+{
+	struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
+	if(!ctx) {
+		return 0;
+	}
+	//netif_start_queue(pm->net_dev);
+	netif_wake_queue(pm->net_dev);
+	return 0;
+}
+
+
+/*******************************  kernel module PPP/tty PM functionality **********************************/
+struct cp_lkm_pm_ppp {
+	struct cp_lkm_pm_common common;
+	u8 *no_carrier_ptr;
+	bool in_frame;
+
+	struct tty_struct *tty; // pointer to the tty for this device
+	int minor;
+	int open_count;
+};
+
+#define CP_TTY_MINORS 10
+#define CP_TTY_DEVICE_NAME "ttyCP"
+
+#define PPP_MGR_NO_CARRIER "NO CARRIER"
+#define PPP_FLAG 0x7E
+
+static struct cp_lkm_pm_ppp *cp_lkm_pm_ppp_table[CP_TTY_MINORS];
+static struct tty_driver *cp_lkm_pm_tty_driver = NULL;
+static struct tty_port cp_lkm_pm_tty_port[CP_TTY_MINORS];
+
+static void cp_lkm_pm_ppp_finalize(void *arg)
+{
+	struct cp_lkm_pm_ppp *pm_ppp = (struct cp_lkm_pm_ppp *)arg;
+	tty_unregister_device(cp_lkm_pm_tty_driver, pm_ppp->minor);
+	cp_lkm_pm_ppp_table[pm_ppp->minor] = NULL;
+	if (pm_ppp->common.edi) {
+		pm_ppp->common.edi = NULL;
+	}
+	// clean the filter list
+	cp_lkm_pm_filter_empty_list(&pm_ppp->common);
+}
+
+static int cp_lkm_pm_ppp_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name)
+{
+	int minor;
+	int err;
+	unsigned long flags;
+	struct cp_lkm_pm_ppp *pm_ppp;
+
+	DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
+
+	//printk("%s() uid: %d, type: %d\n", __FUNCTION__, uid, type);
+
+	// find an empty minor device slot and register
+	for (minor = 0; minor < CP_TTY_MINORS && cp_lkm_pm_ppp_table[minor]; minor++);
+
+	if (minor == CP_TTY_MINORS) {
+		DEBUG_WARN("%s(%p) - out of devices", __FUNCTION__, mgr);
+		return -ENODEV;
+	}
+ 
+	if (!(pm_ppp = memref_alloc_and_zero(sizeof(struct cp_lkm_pm_ppp), cp_lkm_pm_ppp_finalize))) {
+		DEBUG_WARN("%s(%p) - no memory", __FUNCTION__, mgr);
+		return -ENOMEM;
+	}
+ 
+	err = cp_lkm_common_init(&pm_ppp->common);
+	if (err) {
+		return -ENOMEM;
+	}
+	pm_ppp->common.type = type;
+	pm_ppp->common.unique_id = uid;
+
+	pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
+
+	pm_ppp->minor = minor;
+
+	cp_lkm_pm_ppp_table[minor] = pm_ppp;
+	sprintf(name, "%s%d", CP_TTY_DEVICE_NAME, minor);
+
+	//printk("%s(%p) attached\n", __FUNCTION__, &pm_ppp->common);
+	pm_ppp->common.attached = 1;
+	pm_ppp->open_count = 0;
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_add(&pm_ppp->common.list, &mgr->pm_list);
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	tty_port_register_device(&cp_lkm_pm_tty_port[minor], cp_lkm_pm_tty_driver, minor, NULL);
+
+	return 0;
+}
+ 
+static int cp_lkm_pm_ppp_detach(struct cp_lkm_pm_ctx *mgr, int uid)
+{
+
+	// find the object in the list
+	struct list_head *pos;
+	struct cp_lkm_pm_common *pm = NULL;
+	struct cp_lkm_pm_ppp *pm_ppp;
+	unsigned long flags;
+
+	DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
+	//printk("%s() uid: %d\n", __FUNCTION__, uid);
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_for_each(pos, &mgr->pm_list){
+		struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
+		if(pm_tmp->unique_id == uid) {
+			pm = pm_tmp;
+			break;
+		}
+	}
+
+	if (!pm) {
+		// already detached
+		spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+		DEBUG_INFO("%s() already detached", __FUNCTION__);
+		return 0;
+	}
+
+	// remove the object
+	list_del(&pm->list);
+
+	pm_ppp = (struct cp_lkm_pm_ppp *)pm;
+
+	//printk("%s() !attached\n", __FUNCTION__);
+	pm->attached = 0;
+
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	// clean the filter list
+	cp_lkm_pm_filter_empty_list(pm);
+
+	cp_lkm_common_deinit(pm);
+
+	memref_deref(pm_ppp);
+
+	return 0;
+}
+
+static int cp_lkm_pm_ppp_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
+{
+	// find the object in the list
+	struct list_head *pos;
+	struct cp_lkm_pm_common *pm = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_for_each(pos, &mgr->pm_list){
+		struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
+		if(pm_tmp->unique_id == uid) {
+			pm = pm_tmp;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	if (!pm) {
+		// already detached
+		DEBUG_INFO("%s() already detached", __FUNCTION__);
+		return 0;
+	}
+	//printk("%s(%p) activate: %d, attached: %d\n", __FUNCTION__, pm, activate, pm->attached);
+
+	if (activate) {
+		if (pm->edi) {
+			pm->edi->pm_recv_ctx = pm;
+		}
+	} else {
+		if (pm->edi) {
+			pm->edi->pm_recv_ctx = NULL;
+			//printk("pm_recv_ctx null\n");
+		}
+		// clean the filter list
+		cp_lkm_pm_filter_empty_list(pm);
+	}
+
+	return 0;
+}
+
+
+static int cp_lkm_pm_tty_open(struct tty_struct * tty, struct file * filp)
+{
+	struct cp_lkm_pm_ppp *pm_ppp;
+	int index;
+	unsigned long flags;
+
+	DEBUG_INFO("%s()", __FUNCTION__);
+
+	index = tty->index;
+
+	// get the pm_ppp associated with this tty pointer 
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	pm_ppp = cp_lkm_pm_ppp_table[index];
+	if (!pm_ppp /*|| tty->driver_data */|| !pm_ppp->common.attached) {
+		spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+		return -EINVAL;
+	}
+
+	if (pm_ppp->open_count++) {
+		spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+		return 0;
+	}
+
+	memref_ref(pm_ppp);
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	// save our structure within the tty structure 
+	tty->driver_data = pm_ppp;
+	pm_ppp->tty = tty; 
+ 
+	// XXX 3.10 hack
+	//tty->low_latency = 0;
+
+	return 0;
+}
+
+static void cp_lkm_pm_tty_close(struct tty_struct * tty, struct file * filp)
+{
+	struct cp_lkm_pm_ppp *pm_ppp;
+	unsigned long flags;
+
+	DEBUG_INFO("%s()", __FUNCTION__);
+	
+	pm_ppp = tty->driver_data;
+	if(!pm_ppp) {
+		return;
+	}
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	if (--pm_ppp->open_count) {
+		spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+		return;
+	}
+	tty->driver_data = NULL;
+	pm_ppp->tty = NULL;
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	memref_deref(pm_ppp);
+}
+static bool cp_lkm_pm_ppp_check_match(struct cp_lkm_pm_ppp *pm_ppp, u8 ch)
+{
+	if (*(pm_ppp->no_carrier_ptr) == ch) {
+		// character match - advance to next character
+		pm_ppp->no_carrier_ptr++;
+		if (! *(pm_ppp->no_carrier_ptr)) {
+			// end of no carrier string - found oob no carrier
+			return true;
+		}
+		return false;
+	}
+	// characters don't match
+	if (pm_ppp->no_carrier_ptr != (u8 *)PPP_MGR_NO_CARRIER) {
+		// characters don't match - start over
+		pm_ppp->no_carrier_ptr = (u8 *)PPP_MGR_NO_CARRIER;
+		// check not matching character against first character of no carrier - 1 level of recursion
+		return cp_lkm_pm_ppp_check_match(pm_ppp, ch);
+	} 
+
+	return false;
+}
+
+static bool cp_lkm_pm_ppp_is_no_carrier(struct cp_lkm_pm_ppp *pm_ppp, struct sk_buff *skb)
+{
+	// search thru skb for data between frame markers for NO CARRIER
+	bool no_carrier = false;
+	unsigned int len = skb->len;
+	u8 *pos = skb->data;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+
+	while (len--) {
+		if (PPP_FLAG == (*pos)) {
+			pm_ppp->in_frame = !pm_ppp->in_frame;
+		} else if (!pm_ppp->in_frame) {
+			// look for match
+			no_carrier = cp_lkm_pm_ppp_check_match(pm_ppp, *pos);
+			if (no_carrier) {
+				DEBUG_INFO("%s() found no carrier", __FUNCTION__);
+				return true;
+			}
+		} else {
+			pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
+		}
+
+		pos++;
+	}
+
+	return false;
+}
+
+static void cp_lkm_pm_ppp_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
+{
+	*hdr_size = 0;
+	*hdr_offset = 0;
+}
+
+// called in soft interrupt context
+static int cp_lkm_pm_ppp_recv(void *ctx, struct sk_buff *skb)
+{
+#ifdef KERNEL_2_6_21
+	int size;
+#endif
+	struct cp_lkm_pm_ppp *pm_ppp;
+	bool oob_no_carrier;
+
+	if(NULL == ctx || !skb->len) {
+		DEBUG_INFO("%s() - null ctx - dropped", __FUNCTION__);
+		goto done;
+	}
+
+	pm_ppp = (struct cp_lkm_pm_ppp *)ctx;
+
+	if (!pm_ppp) {
+		DEBUG_INFO("%s() - NULL pm_ppp - dropped", __FUNCTION__);
+		goto done;
+	}
+
+	// check for OOB NO CARRIER - signal up through file descriptor
+	oob_no_carrier = cp_lkm_pm_ppp_is_no_carrier(pm_ppp, skb);
+	if (oob_no_carrier) {
+		struct cp_lkm_msg_hdr hdr;
+
+		DEBUG_INFO("%s() - posting no carrier", __FUNCTION__);
+		memset(&hdr,0,sizeof(hdr));
+		hdr.instance_id = pm_ppp->common.unique_id;
+		hdr.cmd = CP_LKM_PM_LINK_DOWN;
+		hdr.status = CP_LKM_STATUS_OK;
+		hdr.len = 0;
+
+		LOG("Received NO CARRIER\n");
+		DEBUG_INFO("%s() - posting link down", __FUNCTION__);
+		cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
+
+		goto done;
+	}
+
+	if (!pm_ppp->tty || !pm_ppp->tty->driver_data) {
+		DEBUG_INFO("%s() - not setup - dropped", __FUNCTION__);
+		goto done;
+	}
+
+#ifdef KERNEL_2_6_21
+	size = tty_buffer_request_room(pm_ppp->tty, skb->len);
+	if(size < skb->len) {
+		// dropped data - or we need to queue for later
+		DEBUG_WARN("%s() - dropping network data", __FUNCTION__);
+		goto done;
+	}
+#endif
+
+	tty_insert_flip_string(pm_ppp->tty->port, skb->data, skb->len);
+	tty_flip_buffer_push(pm_ppp->tty->port);
+
+done:
+	dev_kfree_skb_any(skb);
+	return 0;
+}
+
+// this can be called from interrupt thread or normal kernel thread
+static int cp_lkm_pm_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+	struct cp_lkm_pm_ppp *pm_ppp;
+	struct sk_buff *skb;
+	int link_res;
+	int retval = count;
+
+	if (!count) {
+		//printk("%s() !count \n", __FUNCTION__);
+		return 0;
+	}
+
+	pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
+
+	if (!pm_ppp) {
+		//printk("%s() !pm_ppp \n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	//printk("%s(%p) id:%d, attached: %d\n", __FUNCTION__, &pm_ppp->common, pm_ppp->common.unique_id, pm_ppp->common.attached);
+
+	//see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
+	link_res = cp_lkm_common_inc_link_lock(&pm_ppp->common);
+	if(link_res < 0) {
+		//printk("%s() !link \n", __FUNCTION__);
+		return 0;
+	}
+
+	/* Drop packet if interface is not attached */
+	if (!pm_ppp->common.attached){
+		retval = 0;
+		//printk("%s() !attached: %d \n", __FUNCTION__, pm_ppp->common.attached);
+		goto drop;
+	}
+
+	if (!(pm_ppp->common.edi) || !(pm_ppp->common.edi->usb_send) || !(pm_ppp->common.edi->usb_send_ctx)) {
+		retval = 0;
+		//printk("%s() !edi \n", __FUNCTION__);
+		goto drop;
+	}
+
+	//benk check for enabled filter - send in buffer pointer to ip header
+
+	// alloc skb to send
+	if ((skb = alloc_skb (count, GFP_ATOMIC)) == NULL) {
+		retval = -ENOMEM;
+		goto pm_tty_write_done;
+	}
+	
+	memcpy(skb->data, buf, count);
+	skb->len = count;
+	skb_set_tail_pointer(skb, skb->len);
+
+	// send data to USB module
+	pm_ppp->common.edi->usb_send(pm_ppp->common.edi->usb_send_ctx, skb);
+	retval = count;
+	goto pm_tty_write_done;
+
+drop:
+pm_tty_write_done:
+	cp_lkm_common_dec_link_lock(&pm_ppp->common);
+	//printk("%s() done\n", __FUNCTION__);
+
+	return retval;
+}
+
+static int cp_lkm_pm_tty_write_room(struct tty_struct *tty)
+{
+	struct cp_lkm_pm_ppp *pm_ppp;
+
+	DEBUG_INFO("%s()", __FUNCTION__);
+
+	pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
+
+	if (!pm_ppp) {
+		return -EINVAL;
+	}
+
+	return 2048;
+}
+
+static int cp_lkm_pm_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	struct cp_lkm_pm_ppp *pm_ppp;
+
+	DEBUG_INFO("%s()", __FUNCTION__);
+
+	pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
+
+	if (!pm_ppp) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void cp_lkm_pm_tty_set_termios(struct tty_struct *tty, struct ktermios * old)
+{
+	DEBUG_INFO("%s()", __FUNCTION__);
+
+}
+
+#ifdef KERNEL_2_6_21
+static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg)
+#else
+static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+#endif
+{
+	struct cp_lkm_pm_ppp *pm_ppp;
+
+	DEBUG_TRACE("%s(%x)", __FUNCTION__, cmd);
+
+	pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
+
+	if (!pm_ppp) {
+		return -EINVAL;
+	}
+
+	return -ENOIOCTLCMD;
+}
+
+static struct tty_operations cp_lkm_pm_tty_ops = {
+.open = cp_lkm_pm_tty_open,
+.close = cp_lkm_pm_tty_close,
+.write = cp_lkm_pm_tty_write,
+.write_room = cp_lkm_pm_tty_write_room,
+.chars_in_buffer = cp_lkm_pm_tty_chars_in_buffer,
+.set_termios = cp_lkm_pm_tty_set_termios,
+.ioctl = cp_lkm_pm_tty_ioctl
+
+/* 
+.throttle = 	acm_tty_throttle,
+.unthrottle =   	acm_tty_unthrottle,
+*/  
+};
+
+static int cp_lkm_pm_tty_init(void)
+{
+	int retval;
+	int i;
+
+	for(i = 0; i < CP_TTY_MINORS; i++) {
+		tty_port_init(&cp_lkm_pm_tty_port[i]);
+	}
+
+	cp_lkm_pm_tty_driver = alloc_tty_driver(CP_TTY_MINORS);
+	if (!cp_lkm_pm_tty_driver) {
+		return -ENOMEM;
+	}
+
+	// initialize the tty driver
+	cp_lkm_pm_tty_driver->owner = THIS_MODULE;
+	cp_lkm_pm_tty_driver->driver_name = "cptty";
+	cp_lkm_pm_tty_driver->name = CP_TTY_DEVICE_NAME;
+	cp_lkm_pm_tty_driver->major = 0; // dynamically assign major number
+	cp_lkm_pm_tty_driver->minor_start = 0,
+	cp_lkm_pm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	cp_lkm_pm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	cp_lkm_pm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	cp_lkm_pm_tty_driver->init_termios = tty_std_termios;
+	tty_set_operations(cp_lkm_pm_tty_driver, &cp_lkm_pm_tty_ops);
+
+	retval = tty_register_driver(cp_lkm_pm_tty_driver);
+	if (retval) {
+		DEBUG_ERROR("%s() failed to register cp tty driver", __FUNCTION__);
+		put_tty_driver(cp_lkm_pm_tty_driver);
+		for(i = 0; i < CP_TTY_MINORS; i++) {
+			tty_port_destroy(&cp_lkm_pm_tty_port[i]);
+		}
+	}
+	return retval;
+
+}
+
+static void cp_lkm_pm_tty_cleanup(void)
+{
+	int i;
+	if (cp_lkm_pm_tty_driver) {
+		tty_unregister_driver(cp_lkm_pm_tty_driver);
+		put_tty_driver(cp_lkm_pm_tty_driver);
+		for(i = 0; i < CP_TTY_MINORS; i++) {
+			tty_port_destroy(&cp_lkm_pm_tty_port[i]);
+		}
+		cp_lkm_pm_tty_driver = NULL;
+	}
+}
+
+/*******************************  kernel module PM mgr functionality **********************************/
+
+
+static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx);
+static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx);
+static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
+static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
+
+
+static int cp_lkm_pm_init(void)
+{
+	DEBUG_INFO("%s()", __FUNCTION__);
+
+	memset(&cp_lkm_pm_mgr, 0x00, sizeof(struct cp_lkm_pm_ctx));
+	cp_lkm_pm_mgr.common.open = cp_lkm_pm_open;
+	cp_lkm_pm_mgr.common.close = cp_lkm_pm_close;
+	cp_lkm_pm_mgr.common.handle_msg = cp_lkm_pm_handle_msg;
+	cp_lkm_pm_mgr.common.handle_ioctl = cp_lkm_pm_handle_ioctl;
+	INIT_LIST_HEAD(&cp_lkm_pm_mgr.pm_list);
+	spin_lock_init(&cp_lkm_pm_mgr.pm_list_lock);
+
+	cp_lkm_common_ctx_init(&cp_lkm_pm_mgr.common);
+
+	return 0;
+}
+
+static int cp_lkm_pm_cleanup(void)
+{
+	struct cp_lkm_pm_common *pmi;
+	struct list_head *entry, *tmp;
+	unsigned long flags;
+
+	DEBUG_INFO("%s()", __FUNCTION__);
+
+	// clean up msg list
+	cp_lkm_cleanup_msg_list(&cp_lkm_pm_mgr.common);
+	
+	// cleanup any PM in list
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	list_for_each_safe(entry, tmp, &cp_lkm_pm_mgr.pm_list) {
+		pmi = list_entry(entry, struct cp_lkm_pm_common, list);
+		if (pmi->edi) {
+			pmi->edi->pm_recv_ctx = NULL;
+			//printk("pm_recv_ctx null\n");
+			pmi->edi->pm_stats64_ctx = NULL;
+			pmi->edi = NULL;
+		}
+		list_del(&pmi->list);
+		// clean the filter list
+		cp_lkm_pm_filter_empty_list(pmi);
+
+		spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+		if (pmi->net_dev) {
+			// network device
+			cp_lkm_common_deinit(pmi);
+			unregister_netdev(pmi->net_dev);
+			free_netdev(pmi->net_dev); // this also frees the pmi since it was allocated as part of the net_dev
+		} else {
+			// tty device
+			memref_deref(pmi);
+		}
+
+		spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	}
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	return 0;
+}
+
+static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx)
+{
+//  struct cp_lkm_pm_ctx *pm_mgr;
+
+	DEBUG_INFO("%s(%p)", __FUNCTION__, ctx);
+
+//  pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
+
+	return 0;
+}
+
+static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx)
+{
+	//struct cp_lkm_pm_ctx *pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
+	//struct cp_lkm_pm_common *pm_tmp = NULL;
+	//struct list_head *entry, *tmp;
+	//unsigned long flags;
+
+	LOG("%s() called unexpectedly.", __FUNCTION__);
+
+	//NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
+	//                          things are in a bad state and the router will be rebooting. We decided not 
+	//                          to clean things up here because close code on usb side got into an infinite loop
+	//                          and prevented the router from rebooting. Revisit if close ever becomes a normal event.
+
+	/*
+	spin_lock_irqsave(&pm_mgr->pm_list_lock, flags);
+
+	list_for_each_safe(entry, tmp, &pm_mgr->pm_list) {
+		pm_tmp = list_entry(entry, struct cp_lkm_pm_common, list);
+		spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
+
+		// call detach to clean up network interface
+		if (CP_LKM_PM_TYPE_PPP_CLIENT == pm_tmp->type || CP_LKM_PM_TYPE_PPP_SERVER == pm_tmp->type) {
+			cp_lkm_pm_ppp_detach(pm_mgr, pm_tmp->unique_id);
+		} else {
+			cp_lkm_pm_net_detach(pm_mgr, pm_tmp->unique_id);
+		}
+	}
+
+	spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
+
+	cp_lkm_cleanup_msg_list(ctx);
+	*/
+	return 0;
+}
+
+static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
+{
+	struct cp_lkm_pm_ctx *pm_mgr;
+
+	//printk("%s(%p)\n", __FUNCTION__, ctx);
+
+	pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
+
+
+	// how to write back response with common function?
+	if (skb) {
+		kfree(skb);
+	}
+
+	return 0;
+}
+
+static int cp_lkm_pm_add_filter(struct cp_lkm_pm_ctx *mgr, int uid, struct cp_lkm_pm_filter *filter)
+{
+	// find the object in the list
+	struct list_head *pos;
+	struct cp_lkm_pm_common *pm = NULL;
+	unsigned long flags;
+	struct cp_lkm_pm_filter *new_filter;
+
+	DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_for_each(pos, &mgr->pm_list){
+		struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
+		if(pm_tmp->unique_id == uid) {
+			pm = pm_tmp;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	if (!pm) {
+		DEBUG_WARN("%s() pm not attached", __FUNCTION__);
+		return -ENODEV;
+	}
+
+	new_filter = kmalloc(sizeof(struct cp_lkm_pm_filter), GFP_ATOMIC);
+	if (!new_filter) {
+		DEBUG_WARN("%s() - failed to alloc filter\n", __FUNCTION__);
+		return -1;
+	}
+
+	memcpy(new_filter, filter, sizeof(struct cp_lkm_pm_filter));
+	INIT_LIST_HEAD(&new_filter->list);
+
+	list_add_tail(&new_filter->list, &pm->filter_list);
+
+	return 0;
+}
+
+static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
+{
+	struct cp_lkm_pm_ctx *pm_mgr;
+	int result = 0;
+	struct cp_lkm_pm_attach_ioctl *attach_params;
+	struct cp_lkm_pm_detach_ioctl *detach_params;
+	struct cp_lkm_pm_activate_deactivate_ioctl *activate_params;
+	struct cp_lkm_pm_add_filter_ioctl *filter_params;
+
+	char name[CP_LKM_MAX_IF_NAME];
+	unsigned long not_copied;
+
+	//printk("%s(%p) cmd:%d\n", __FUNCTION__, ctx, _IOC_NR(cmd));
+
+	pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
+
+	switch (cmd) {
+		case CP_LKM_IOCTL_PM_ATTACH:
+			attach_params = (struct cp_lkm_pm_attach_ioctl *)k_argp;
+			not_copied = copy_from_user(name, attach_params->name, CP_LKM_MAX_IF_NAME);
+			if (not_copied) {
+				return -ENOMEM;
+			}
+			DEBUG_INFO("%s(%s) attach", __FUNCTION__, name);
+			switch(attach_params->type) {
+				case CP_LKM_PM_TYPE_PPP_CLIENT:
+				case CP_LKM_PM_TYPE_PPP_SERVER:
+					result = cp_lkm_pm_ppp_attach(pm_mgr, attach_params->type, attach_params->uid, name);
+					if (!result) {
+						not_copied = copy_to_user(attach_params->name, name, CP_LKM_MAX_IF_NAME);
+						if (not_copied) {
+							return -ENOMEM;
+						}
+					}
+					break;
+				case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+				case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+                case CP_LKM_PM_TYPE_IP_STATIC:
+				case CP_LKM_PM_TYPE_IP_DHCP:
+					result = cp_lkm_pm_net_attach(pm_mgr, attach_params->type, attach_params->uid, name, attach_params->mac);
+					break;
+				default:
+					result = -ENOTSUPP;
+					break;
+			}
+			break;
+		case CP_LKM_IOCTL_PM_DETACH:
+			detach_params = (struct cp_lkm_pm_detach_ioctl *)k_argp;
+			DEBUG_INFO("%s() detach uid:%d", __FUNCTION__, detach_params->uid);
+			switch(detach_params->type) {
+				case CP_LKM_PM_TYPE_PPP_CLIENT:
+				case CP_LKM_PM_TYPE_PPP_SERVER:
+					result = cp_lkm_pm_ppp_detach(pm_mgr, detach_params->uid);
+					break;
+				case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+				case CP_LKM_PM_TYPE_IP_STATIC:
+				case CP_LKM_PM_TYPE_IP_DHCP:
+					result = cp_lkm_pm_net_detach(pm_mgr, detach_params->uid);
+					break;
+				default:
+					result = -ENOTSUPP;
+					break;
+			}
+			break;
+		case CP_LKM_IOCTL_PM_ACTIVATE:
+			activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
+			switch(activate_params->type) {
+				case CP_LKM_PM_TYPE_PPP_CLIENT:
+				case CP_LKM_PM_TYPE_PPP_SERVER:
+					result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, true);
+					break;
+				case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+				case CP_LKM_PM_TYPE_IP_STATIC:
+				case CP_LKM_PM_TYPE_IP_DHCP:
+					result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, true);
+					break;
+				default:
+					result = -ENOTSUPP;
+					break;
+			}
+			break;
+		case CP_LKM_IOCTL_PM_DEACTIVATE:
+			activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
+			switch(activate_params->type) {
+				case CP_LKM_PM_TYPE_PPP_CLIENT:
+				case CP_LKM_PM_TYPE_PPP_SERVER:
+					result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, false);
+					break;
+				case CP_LKM_PM_TYPE_ETHERNET_DHCP:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC:
+                case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
+				case CP_LKM_PM_TYPE_IP_STATIC:
+				case CP_LKM_PM_TYPE_IP_DHCP:
+					result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, false);
+					break;
+				default:
+					result = -ENOTSUPP;
+					break;
+			}
+			break;
+		case CP_LKM_IOCTL_PM_ADD_FILTER:
+			filter_params = (struct cp_lkm_pm_add_filter_ioctl *)k_argp;
+			result = cp_lkm_pm_add_filter(pm_mgr, filter_params->uid, &filter_params->filter);
+			break;
+		default: 
+			break;
+	}
+
+	return result;
+}
+
+static bool cp_lkm_pm_usb_do_link_lock(void* ctx1, void* ctx2)
+{
+	struct cp_lkm_pm_common *pm = (struct cp_lkm_pm_common*)ctx1;
+	bool done = false;
+	unsigned long flags;
+	// grab the lock and set the link_count. The link_count is used to keep send and poll from
+	// being called over to the USB layer while we are mucking with the send and poll pointers
+	spin_lock_irqsave(&pm->pm_link_lock, flags);
+	if(pm->pm_link_count <= 0) {
+		pm->pm_link_count = -1;
+		done = true;
+	}
+	spin_unlock_irqrestore(&pm->pm_link_lock, flags);
+
+	return done;
+}
+
+// This function changes the shared edi pointers. 
+// !!!It is the only function in the pm that is permitted to change edi function pointers!!!
+// Other functions can change the ctxt pointers
+static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link)
+{
+	struct list_head *pos;
+	struct cp_lkm_pm_common *pm = NULL;
+	unsigned long flags;
+	struct cp_lkm_edi *tmp_edi;
+
+	spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
+	list_for_each(pos, &cp_lkm_pm_mgr.pm_list){
+		struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
+		if(pm_tmp->unique_id == pm_unique_id) {
+			pm = pm_tmp;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
+
+	if (!pm) {
+		// couldn't find object
+		//printk("%s() unable to find protocol manager with id:%d\n", __FUNCTION__, pm_unique_id);
+		return -EINVAL;
+	}
+
+	//printk("%s() pm_net: %p\n", __FUNCTION__, pm);
+
+	// grab the lock and set the link_count. The link_count is used to keep send and poll from
+	// being called over to the USB layer while we are mucking with the send and poll pointers
+	cp_lkm_do_or_die(pm, NULL, cp_lkm_pm_usb_do_link_lock, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to grab cp pm lock");
+
+	//printk("%s() pm: %p, attached: %d, pm_type: %d\n", __FUNCTION__, pm, pm->attached,pm->type);
+	tmp_edi = pm->edi;
+	pm->edi = NULL;
+	if (link) {
+		if (tmp_edi) {
+			// already linked - unlink from previous edi
+			// just a precaution, should never happen
+			tmp_edi->pm_recv = NULL;
+			tmp_edi->pm_recv_ctx = NULL;
+			tmp_edi->pm_get_hdr_size = NULL;
+
+			//printk("pm_recv_ctx null\n");
+			tmp_edi->pm_send_pause = NULL;
+			tmp_edi->pm_send_resume = NULL;
+
+			tmp_edi->pm_stats64_ctx = NULL;
+
+			//pm->edi = NULL;
+		}
+
+		tmp_edi = edi;
+		tmp_edi->pm_recv_ctx = pm;
+
+		switch(pm->type) {
+		case CP_LKM_PM_TYPE_PPP_CLIENT:
+		case CP_LKM_PM_TYPE_PPP_SERVER:
+			tmp_edi->pm_recv = cp_lkm_pm_ppp_recv;
+			tmp_edi->pm_get_hdr_size = cp_lkm_pm_ppp_get_hdr_size;
+			tmp_edi->pm_stats64_ctx = NULL;
+			break;
+		default:
+			tmp_edi->pm_recv = cp_lkm_pm_net_recv;
+			tmp_edi->pm_get_hdr_size = cp_lkm_pm_net_get_hdr_size;
+			tmp_edi->pm_send_pause = cp_lkm_pm_net_pause;
+			tmp_edi->pm_send_resume = cp_lkm_pm_net_resume;
+			tmp_edi->pm_stats64_ctx = pm;
+			break;
+		}
+
+		pm->edi = tmp_edi;
+
+		// release the link_count on link so things can start flowing.
+		// don't release it on unlink since we don't want things to flow when unlinked
+		spin_lock_irqsave(&pm->pm_link_lock, flags);
+		pm->pm_link_count = 0;
+		spin_unlock_irqrestore(&pm->pm_link_lock, flags);
+
+	} else {
+		if (tmp_edi) {
+			tmp_edi->pm_recv = NULL;
+			tmp_edi->pm_recv_ctx = NULL;
+			tmp_edi->pm_get_hdr_size = NULL;
+
+			//printk("pm_recv_ctx null\n");
+			tmp_edi->pm_send_pause = NULL;
+			tmp_edi->pm_send_resume = NULL;
+			tmp_edi->pm_stats64_ctx = NULL;
+
+			//pm->edi = NULL;
+		}
+	}
+
+	return 0;
+
+}
+
+/******************** common user/kernel communication functions **************/
+
+static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common)
+{
+	DEBUG_WARN("%s()", __FUNCTION__);
+
+	INIT_LIST_HEAD(&common->read_list);
+	spin_lock_init(&common->read_list_lock);
+
+	init_waitqueue_head(&common->inq);
+	common->open_cnt = 0;
+	common->reading_data = false;
+	common->write_skb = NULL;
+}
+
+static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common)
+{
+	struct cp_lkm_read_msg *msg;
+	unsigned long flags;
+	struct list_head *entry, *tmp;
+
+	spin_lock_irqsave(&common->read_list_lock, flags);
+
+	list_for_each_safe(entry, tmp, &common->read_list) {
+		msg = list_entry(entry, struct cp_lkm_read_msg, list);
+		list_del(&msg->list);
+		dev_kfree_skb_any(msg->skb);
+		kfree(msg);
+	}
+	spin_unlock_irqrestore(&common->read_list_lock, flags);
+}
+
+// this may be called from soft interrupt context or normal kernel thread context
+static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb)
+{
+
+	struct cp_lkm_read_msg *msg;
+	unsigned long flags;
+
+	msg = kmalloc(sizeof(struct cp_lkm_read_msg), GFP_ATOMIC);
+	if (!msg) {
+		if (skb) {
+			dev_kfree_skb_any(skb);
+		}
+		return -ENOMEM;
+	}
+
+	msg->skb = skb;
+	memcpy(&msg->hdr, hdr, sizeof(struct cp_lkm_msg_hdr));
+
+	spin_lock_irqsave(&mgr->read_list_lock, flags);
+	list_add_tail(&msg->list, &mgr->read_list);
+	spin_unlock_irqrestore(&mgr->read_list_lock, flags);
+
+	mgr->q_waiting = false;
+
+	// signal poll
+	wake_up_interruptible(&mgr->inq);
+
+	return 0;
+}
+
+int cp_lkm_open(struct inode *inode, struct file *filp)
+{
+
+	int result = 0;
+	struct cp_lkm_common_ctx *common;
+
+	DEBUG_TRACE("%s()", __FUNCTION__);
+
+	try_module_get(THIS_MODULE);
+
+	// set private data
+	if (iminor(inode) == CP_LKM_USB_MGR_MINOR) {
+		filp->private_data = &cp_lkm_usb_mgr;
+		common = &cp_lkm_usb_mgr.common;
+		DEBUG_INFO("%s() open usb manager", __FUNCTION__);
+	} else if (iminor(inode) == CP_LKM_PM_MGR_MINOR) {
+		filp->private_data = &cp_lkm_pm_mgr;
+		common = &cp_lkm_pm_mgr.common;
+		DEBUG_INFO("%s() open pm manager", __FUNCTION__);
+	} else {
+		return -ENOENT;
+	}
+
+	if (common->open_cnt) {
+		return -EBUSY;
+	}
+
+	common->open_cnt++;
+
+	if (common->open) {
+		result = common->open(common);
+	}
+
+	return result;
+}
+
+int cp_lkm_release(struct inode *inode, struct file *filp)
+{
+
+	int result = 0;
+	struct cp_lkm_common_ctx *common;
+	common = (struct cp_lkm_common_ctx *)filp->private_data;
+
+	DEBUG_TRACE("%s() release", __FUNCTION__);
+
+	if (0 == common->open_cnt) {
+		return 0;
+	}
+
+	if (common->close) {
+		result = common->close(common);
+	}
+
+	module_put(THIS_MODULE);
+
+	common->open_cnt--;
+
+	return result;
+}
+
+// first read is the header
+// second read is the data.  If no data, then no second read
+// if error in either stage, negative value is returned and next read will be for header
+// messages are not removed until successfully read header and data (if any)
+ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+
+	struct cp_lkm_common_ctx *common;
+	ssize_t result;
+	struct cp_lkm_read_msg *msg;
+	unsigned long flags;
+	unsigned long not_copied;
+
+//  DEBUG_INFO("%s() reading %d bytes", __FUNCTION__, count);
+	common = (struct cp_lkm_common_ctx *)filp->private_data;
+
+	spin_lock_irqsave(&common->read_list_lock, flags);
+	if (list_empty(&common->read_list)) {
+		spin_unlock_irqrestore(&common->read_list_lock, flags);
+		return -EAGAIN;
+	}
+	msg = list_first_entry(&common->read_list, struct cp_lkm_read_msg, list);
+	spin_unlock_irqrestore(&common->read_list_lock, flags);
+
+	if (!common->reading_data) { // header mode
+		// read header
+		if (sizeof(struct cp_lkm_msg_hdr) != count) {
+			return -EINVAL;
+		}
+
+		not_copied = copy_to_user(buf, &msg->hdr, sizeof(struct cp_lkm_msg_hdr));
+		if (not_copied) {
+			return -ENOMEM;
+		}
+
+		if (!msg->hdr.len) {
+			result = count;
+			goto read_free;
+		}
+
+		// switch to data mode
+		common->reading_data = !common->reading_data;
+		return count;
+	}
+
+	// switch to header mode
+	common->reading_data = !common->reading_data;
+
+	// data mode - handle the data transfer
+	if (msg->hdr.len != count) {
+		return -EINVAL;
+	}
+	
+	not_copied = copy_to_user(buf, msg->skb->data, msg->hdr.len);
+
+	if (not_copied) {
+		return -ENOMEM;
+	}
+
+	result = count;
+
+read_free:
+	spin_lock_irqsave(&common->read_list_lock, flags);
+	list_del(&msg->list);
+	spin_unlock_irqrestore(&common->read_list_lock, flags);
+
+	if (msg->skb) {
+		dev_kfree_skb_any(msg->skb);
+	}
+	kfree(msg);
+
+	return result;
+}
+// the user must write the header first
+// then the user must write the data equivalent to the hdr.len
+// on error, a negative value is returned and the entire message is lost
+// on error, the next write must be header
+ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
+{
+	struct cp_lkm_common_ctx *common;
+	unsigned long not_copied;
+	int result;
+	struct sk_buff *skb = NULL;
+	struct cp_lkm_msg_hdr hdr;
+	struct cp_lkm_msg_hdr *hdrp;
+
+//  DEBUG_INFO("%s() writing %d bytes", __FUNCTION__, count);
+
+	common = (struct cp_lkm_common_ctx *)filp->private_data;
+
+	if (!common->write_skb) {
+		// handle the header
+		if (count != sizeof(struct cp_lkm_msg_hdr)) {
+			return -EINVAL;
+		}
+		not_copied = copy_from_user(&hdr, buf, count);
+		if (not_copied) {
+			return -ENOMEM;
+		}
+
+		if ((skb = alloc_skb (count + hdr.len, GFP_KERNEL)) == NULL) {
+			return -ENOMEM;
+		}
+
+		memcpy(skb->data, &hdr, count);
+
+		// setup skb pointers - skb->data points to message data with header immediately before skb->data
+		skb->len = hdr.len;
+		skb->data += sizeof(struct cp_lkm_msg_hdr);
+		skb_set_tail_pointer(skb, hdr.len);
+
+		if (!hdr.len) {
+			goto send_msg;
+		}
+
+		// save until we get the data
+		common->write_skb = skb;
+
+		return count;
+	}
+
+	// handle the data
+	skb = common->write_skb;
+	common->write_skb = NULL;
+
+	hdrp = (struct cp_lkm_msg_hdr *)(skb->data) - 1;
+	if (count != hdrp->len) {
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
+
+	not_copied = copy_from_user(skb->data, buf, count);
+	if (not_copied) {
+		dev_kfree_skb_any(skb);
+		return -ENOMEM;
+	}
+
+
+send_msg:
+	if (common->handle_msg) {
+		result = common->handle_msg(common, (struct cp_lkm_msg_hdr *)(skb->data) - 1, skb);
+		if (result) {
+			return result;
+		}
+	}
+
+	return count;
+}
+
+unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	unsigned long flags;
+	unsigned int mask = 0;
+	struct cp_lkm_common_ctx *common;
+
+	common = (struct cp_lkm_common_ctx *)filp->private_data;
+
+	poll_wait(filp, &common->inq, wait);
+
+	spin_lock_irqsave(&common->read_list_lock, flags);
+
+	if (!list_empty(&common->read_list)) {
+		mask = POLLIN | POLLRDNORM; // readable
+	}
+
+	spin_unlock_irqrestore(&common->read_list_lock, flags);
+
+	return mask;
+}
+
+#ifdef KERNEL_2_6_21
+int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+	int result = -EINVAL;
+
+	void __user *uargp = (void __user *)arg;
+	void *kargp = NULL;
+	struct cp_lkm_common_ctx *common = (struct cp_lkm_common_ctx *)filp->private_data;
+
+	DEBUG_TRACE("%s(%p) - cmd:%d", __FUNCTION__, filp, _IOC_NR(cmd));
+
+	switch(cmd) {
+	case CP_LKM_IOCTL_SET_LOG_LEVEL:
+		cp_lkm_log_level = (uintptr_t)uargp;
+		LOG("Setting debug log level:%d", cp_lkm_log_level);
+		cp_lkm_wrapper_set_log_level(cp_lkm_log_level);
+		return 0;
+	default:
+		if (_IOC_SIZE(cmd)) {
+			kargp = kmalloc(_IOC_SIZE(cmd), GFP_ATOMIC);
+			if (!kargp) {
+				result = -ENOMEM;
+				goto done;
+			}
+			if (copy_from_user(kargp, uargp, _IOC_SIZE(cmd))) {
+				result = -EFAULT;
+				goto done;
+			}
+		}
+	}
+
+	if (common->handle_ioctl) {
+		result = common->handle_ioctl(common, cmd, kargp);
+	}
+
+
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		if (copy_to_user(uargp, kargp, _IOC_SIZE(cmd))) {
+			result = -EFAULT;
+			goto done;
+		}
+	}
+
+done:
+	if (kargp) {
+		kfree(kargp);
+	}
+
+	return result;
+}
+
+
+static int __init cp_lkm_start(void)
+{
+	int err;
+
+	//printk("%s() Initializing module...\n", __FUNCTION__);
+
+	// initialize global structures
+
+	err = cp_lkm_pm_tty_init();
+	if (err) {
+		return err;
+	}
+
+	cp_lkm_usb_init();
+	
+	cp_lkm_pm_init();
+
+	// Allocating memory for the buffer
+	if ((major = register_chrdev(0, "cp_lkm", &cp_lkm_fops)) < 0) {
+		DEBUG_INFO("%s() failed dynamic registration", __FUNCTION__);
+		cp_lkm_pm_tty_cleanup();
+		return major;
+	}
+
+	cp_lkm_class = class_create(THIS_MODULE, "cp_lkm");
+	if (IS_ERR(cp_lkm_class)) {
+		DEBUG_INFO("%s() failed class create", __FUNCTION__);
+		unregister_chrdev(major, "cp_lkm");
+		cp_lkm_pm_tty_cleanup();
+		return -ENODEV;
+	}
+#ifdef KERNEL_2_6_21
+	cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), "cp_lkm_usb");
+#else
+	cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), NULL, "cp_lkm_usb");
+#endif
+	if (IS_ERR(cp_lkm_dev[0])){
+		DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
+		// clean up previous devices
+		class_destroy(cp_lkm_class);
+		unregister_chrdev(major, "cp_lkm");
+		cp_lkm_pm_tty_cleanup();
+		return -ENODEV;
+	}
+
+#ifdef KERNEL_2_6_21
+	cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), "cp_lkm_pm");
+#else
+	cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), NULL, "cp_lkm_pm");
+#endif
+	if (IS_ERR(cp_lkm_dev[1])){
+		DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
+		// clean up previous devices
+		device_destroy(cp_lkm_class,  MKDEV(major, 0));
+		class_destroy(cp_lkm_class);
+		unregister_chrdev(major, "cp_lkm");
+		cp_lkm_pm_tty_cleanup();
+		return -ENODEV;
+	}
+
+	LOG("cp_lkm: Inserting kernel module"); 
+
+	return 0;
+}
+
+static void __exit cp_lkm_end(void)
+{
+	int i;
+
+	//TODO remove
+	//del_timer_sync (&dbg_memleak_timer);
+
+
+	cp_lkm_pm_cleanup();
+	cp_lkm_usb_cleanup();
+
+	for (i = 0; i < 2; i++) {
+		device_destroy(cp_lkm_class,  MKDEV(major, i));
+	}
+	class_destroy(cp_lkm_class);
+	unregister_chrdev(major, "cp_lkm");
+
+	cp_lkm_pm_tty_cleanup();
+
+	LOG("cp_lkm: Removing kernel module");
+}
+
+module_init(cp_lkm_start);
+module_exit(cp_lkm_end);
+MODULE_LICENSE("GPL");
+
+
diff --git a/cpmodem_shim/cpmodem_test.c b/cpmodem_shim/cpmodem_test.c
new file mode 100755
index 0000000..f7f784d
--- /dev/null
+++ b/cpmodem_shim/cpmodem_test.c
@@ -0,0 +1,78 @@
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+int main()
+{
+	int fdusb, fdpm;
+	int i;
+
+	if ( (fdusb = open("/dev/cp_lkm_usb", O_RDWR)) < 0 ) {
+		printf("%s() open failed\n", __FUNCTION__);
+		return fdusb;
+	}
+
+	if ( (fdpm = open("/dev/cp_lkm_pm", O_RDWR)) < 0 ) {
+		printf("%s() open failed\n", __FUNCTION__);
+		close(fdusb);
+		return fdpm;
+	}
+
+	printf("testing write fd:%d\n", fdusb);
+	char buffer[52];
+	for (i=0; i < 52; i++) {
+		if (i < 26) {
+			buffer[i] = 'A' + i;
+		} else {
+			buffer[i] = 'a' + i;
+		}
+	}
+	int nwrite = write(fdusb, buffer, 52);
+	if (nwrite != 52) {
+		printf("%s() - write error -> %d of %d written\n", __FUNCTION__, nwrite, 52);
+	}
+
+	printf("testing write fd:%d\n", fdpm);
+	char bufferpm[26];
+	for (i=0; i < 26; i++) {
+		bufferpm[i] = 'a' + i;
+	}
+	nwrite = write(fdpm, bufferpm, 26);
+	if (nwrite != 26) {
+		printf("%s() - write error -> %d of %d written\n", __FUNCTION__, nwrite, 26);
+	}
+
+	char read_buffer[52];
+	int nread = read(fdusb, read_buffer, 52);
+	if (nread != 52) {
+		printf("%s() - read error -> %d of %d read\n", __FUNCTION__, nread, 52);
+	}
+	if (memcmp(buffer, read_buffer, 52)) {
+		printf("%s() - read/write memcmp failed\n", __FUNCTION__);
+	} else {
+		printf("%s() - read/write memcmp succeeded\n", __FUNCTION__);
+	}
+
+	nread = read(fdpm, read_buffer, 26);
+	if (nread != 26) {
+		printf("%s() - read error -> %d of %d read\n", __FUNCTION__, nread, 26);
+	}
+	if (memcmp(bufferpm, read_buffer, 26)) {
+		printf("%s() - read/write memcmp failed %s\n", __FUNCTION__, read_buffer);
+	} else {
+		printf("%s() - read/write memcmp succeeded\n", __FUNCTION__);
+	}
+
+
+	printf("%s() - testing ioctl\n", __FUNCTION__);
+
+	int ioctl_resl = ioctl(fdusb, 322, 0xdeadbeef);
+	printf("%s() - ioctl resl:%d\n", __FUNCTION__, ioctl_resl);
+
+	close(fdusb);
+	close(fdpm);
+
+	return 0;
+}
diff --git a/cpmodem_shim/include/cpmodem_shim.h b/cpmodem_shim/include/cpmodem_shim.h
new file mode 100644
index 0000000..bf12cec
--- /dev/null
+++ b/cpmodem_shim/include/cpmodem_shim.h
@@ -0,0 +1,280 @@
+/*
+ * FILE NAME cpmodem_shim.h
+ *
+ * BRIEF MODULE DESCRIPTION
+ *  Frankendriver - USB to ethernet, ip or PPP controlled via a block driver.
+ *
+ *  Author: CradlePoint Technology, Inc.  <source@cradlepoint.com>
+ *          Ben Kendall <benk@cradlepoint.com>
+ *          Cory Atkin <catkin@cradlepoint.com>
+ *
+ * Copyright 2012, CradlePoint Technology, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ */
+
+#ifndef __PKG_CPLKM_H__
+#define __PKG_CPLKM_H__
+
+#include <linux/if_ether.h>
+#include <linux/types.h> 
+
+#ifndef __KERNEL__
+#define list_head list
+#endif
+
+// OR'd mask bits for setting the log mask
+#define LOG_DEBUG_LEVEL_ERROR	(1 << 0)
+#define LOG_DEBUG_LEVEL_WARN	(1 << 1)
+#define LOG_DEBUG_LEVEL_INFO	(1 << 2)
+#define LOG_DEBUG_LEVEL_TRACE	(1 << 3)
+#define LOG_DEBUG_LEVEL_PRINTF	(1 << 4)
+
+// LOG_DEBUG_LEVEL_ASSERT is always on 
+
+//state of the usb device as a whole
+typedef enum{
+    CP_LKM_USB_INIT =    0,  //ioctl created the object, but we haven't gotten the probe yet from USB
+    CP_LKM_USB_CTRL =    1,  //got probe from usb, can now pass ctrl pkts between USB device and the app space driver
+    CP_LKM_USB_ACTIVE =  2,  //ioctl told us the data connection is now active
+    CP_LKM_USB_DEAD =    3,  //Got disconnect from usb, can't use this bad boy anymore (waiting for ioctl to clean us up)
+	CP_LKM_USB_INVALID = 0xff
+}cp_lkm_usb_state_t;
+
+//state of the usb device data processing state machine
+typedef enum{
+	USB_PROCESS_STATE_IDLE = 0,  //no softirq processing going on
+	USB_PROCESS_STATE_ACTIVE,    //softirq machine processing 
+	USB_PROCESS_STATE_PAUSED     //machine paused (link, unlink, unplug)
+}cp_lkm_usb_process_state_t;
+
+//These must match the cpusb_wrapper_t enum in pkgCPUSB.h
+typedef enum
+{
+    CP_LKM_WRAPPER_NONE = 0,
+    CP_LKM_WRAPPER_ASIX,
+    CP_LKM_WRAPPER_ASIX_88179,
+    CP_LKM_WRAPPER_LG,
+    CP_LKM_WRAPPER_DIRECT_IP,
+    CP_LKM_WRAPPER_MSRNDIS,
+    CP_LKM_WRAPPER_PEGASUS,
+    CP_LKM_WRAPPER_NCM,
+	CP_LKM_WRAPPER_QMAP,          //New Qualcomm muxing and aggregation protocol
+    CP_LKM_WRAPPER_NUM_WRAPPERS //NOTE: this one needs to always be last
+}cp_lkm_wrapper_t;
+
+#define CP_LKM_IOCTL_MAGIC 0xC1	// pick something not in /usr/src/linux/Documentation/ioctl/ioctl-number.txt
+#define CP_LKM_IOCTL_USB_PLUG_INTF 		_IOW(CP_LKM_IOCTL_MAGIC, 0, struct cp_lkm_usb_plug_intf)
+#define CP_LKM_IOCTL_USB_OPEN_INTF		_IOW(CP_LKM_IOCTL_MAGIC, 1, struct cp_lkm_usb_open_intf)
+#define CP_LKM_IOCTL_USB_CLOSE_INTF		_IOW(CP_LKM_IOCTL_MAGIC, 2, struct cp_lkm_usb_close_intf)
+#define CP_LKM_IOCTL_USB_UNPLUG_INTF	_IOW(CP_LKM_IOCTL_MAGIC, 3, struct cp_lkm_usb_unplug_intf)
+#define CP_LKM_IOCTL_USB_EP_ACTION  	_IOW(CP_LKM_IOCTL_MAGIC, 4, struct cp_lkm_usb_ep_action)
+#define CP_LKM_IOCTL_USB_PM_LINK		_IOW(CP_LKM_IOCTL_MAGIC, 5, struct cp_lkm_usb_pm_link)
+#define CP_LKM_IOCTL_USB_SET_WRAPPER	_IOW(CP_LKM_IOCTL_MAGIC, 6, struct cp_lkm_usb_set_wrapper)
+#define CP_LKM_IOCTL_USB_SET_MUX_ID		_IOW(CP_LKM_IOCTL_MAGIC, 7, struct cp_lkm_usb_set_mux_id)
+#define CP_LKM_IOCTL_USB_IS_ALIVE_INTF	_IOW(CP_LKM_IOCTL_MAGIC, 8, struct cp_lkm_usb_is_alive_intf)
+
+#define CP_LKM_IOCTL_PM_ATTACH			_IOWR(CP_LKM_IOCTL_MAGIC, 10, struct cp_lkm_pm_attach_ioctl)
+#define CP_LKM_IOCTL_PM_DETACH			_IOW(CP_LKM_IOCTL_MAGIC, 11, struct cp_lkm_pm_detach_ioctl)
+#define CP_LKM_IOCTL_PM_ACTIVATE		_IOW(CP_LKM_IOCTL_MAGIC, 12, struct cp_lkm_pm_activate_deactivate_ioctl)
+#define CP_LKM_IOCTL_PM_DEACTIVATE		_IOW(CP_LKM_IOCTL_MAGIC, 13, struct cp_lkm_pm_activate_deactivate_ioctl)
+
+#define CP_LKM_IOCTL_PM_ADD_FILTER		_IOW(CP_LKM_IOCTL_MAGIC, 14, struct cp_lkm_pm_add_filter_ioctl)
+
+#define CP_LKM_IOCTL_SET_LOG_LEVEL 		_IOW(CP_LKM_IOCTL_MAGIC, 20, int)
+
+// protocol manager types - keep in sync with protocol_manager_type_t in pkgProtocolManager.h
+typedef enum {
+	CP_LKM_PM_TYPE_PPP_CLIENT,//Default for modems
+    CP_LKM_PM_TYPE_PPP_SERVER,
+	CP_LKM_PM_TYPE_IP_STATIC,
+	CP_LKM_PM_TYPE_IP_DHCP,
+	CP_LKM_PM_TYPE_ETHERNET_STATIC,
+    CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP,
+	CP_LKM_PM_TYPE_ETHERNET_DHCP,
+	CP_LKM_PM_TYPE_PPPOE_STATIC,
+	CP_LKM_PM_TYPE_PPPOE_DHCP,
+	CP_LKM_PM_TYPE_PPTP_STATIC,
+	CP_LKM_PM_TYPE_PPTP_DHCP,
+	CP_LKM_PM_TYPE_L2TP_STATIC,
+	CP_LKM_PM_TYPE_L2TP_DHCP,
+	CP_LKM_PM_TYPE_TUN_TAP_STATIC,
+	CP_LKM_PM_TYPE_TUN_TAP_DHCP,
+	CP_LKM_PM_TYPE_MAX
+} cp_lkm_pm_type_t;
+
+typedef enum
+{
+    CP_LKM_USB_CMD_DATA_SEND = 0,  //send a usb data pkt to shim
+    CP_LKM_USB_CMD_DATA_RECV,      //recv a usb data pkt from shim
+    CP_LKM_USB_CMD_CTRL_SEND,      //send a usb ctrl pkt to shim
+    CP_LKM_USB_CMD_CTRL_RECV,       //recv a usb ctrl pkt from the shim
+}cp_lkm_usb_cmd_t;
+
+#define CP_LKM_MAX_IF_NAME 16 // really 15 + NULL
+struct cp_lkm_pm_attach_ioctl {
+	unsigned int uid;
+	cp_lkm_pm_type_t type;
+	char *name;
+	unsigned char mac[ETH_ALEN];
+};
+
+struct cp_lkm_pm_detach_ioctl {
+	unsigned int uid;
+	cp_lkm_pm_type_t type;
+};
+
+struct cp_lkm_pm_activate_deactivate_ioctl {
+	unsigned int uid;
+	cp_lkm_pm_type_t type;
+};
+
+// data structures to implement shim traffic filter
+typedef enum {
+	CP_LKM_PM_FILTER_TYPE_NONE = 0,
+	CP_LKM_PM_FILTER_TYPE_IP_SRC_WAN_SUBNET_INCLUDE, // allow egress packets src'd from the wan subnet
+	CP_LKM_PM_FILTER_TYPE_IP_SRC_SUBNET_INCLUDE, // allow egress packets src'd from the specified subnet
+	CP_LKM_PM_FILTER_TYPE_MAX
+} cp_lkm_pm_filter_type_t;
+
+
+struct ip_src_subnet_include {
+	unsigned int ipv4_addr;
+	unsigned int ipv4_mask;
+};
+
+struct cp_lkm_pm_filter {
+	cp_lkm_pm_filter_type_t type;
+	union {
+		// filter parameter structures based on type
+		struct ip_src_subnet_include subnet;
+	};
+	struct list_head list;
+};
+
+struct cp_lkm_pm_add_filter_ioctl {
+	unsigned int uid;
+	struct cp_lkm_pm_filter filter;
+};
+
+// CP_LKM_PM commands
+enum {
+	CP_LKM_PM_LINK_DOWN = 0
+};
+
+enum
+{
+    CP_LKM_STATUS_OK = 0,
+    CP_LKM_STATUS_TIMEOUT,
+    CP_LKM_STATUS_ERROR
+};
+
+struct cp_lkm_msg_hdr {
+	int instance_id;
+	int cmd;
+    int arg1;    //cmd specific args
+    int arg2;    //cmd specific args
+    int status;
+	int len; // not including this structure
+};
+
+//struct defs for each command
+struct cp_lkm_usb_plug_intf
+{
+    int bus;
+    int addr;
+    int vid;
+    int pid;
+    int ep_in;         //data bulk in ep number
+    int ep_out;        //data bulk out ep number
+    int intf_num;
+    int alt_intf_num;
+    int unique_id;
+    int max_transfer_size;
+	int feature_flags;
+	int clone_num;  
+	int mux_id;  
+};
+
+#define CP_LKM_FEATURE_NO_ZERO_PACKETS (1 << 0)
+#define CP_LKM_FEATURE_CLONE_MUXED_INTF (1 << 1)
+
+struct cp_lkm_usb_open_intf
+{
+    int unique_id;
+};
+
+struct cp_lkm_usb_close_intf
+{
+    int unique_id;
+};
+
+struct cp_lkm_usb_unplug_intf
+{
+    int unique_id;
+};
+
+struct cp_lkm_usb_is_alive_intf
+{
+    int unique_id;
+};
+
+typedef enum
+{
+    EP_ACTION_NONE = 0,
+    EP_ACTION_CREATE,
+    EP_ACTION_LISTEN,
+    EP_ACTION_LISTEN_STOP,
+    EP_ACTION_RECV,
+	EP_ACTION_FLUSH_CONTROL,
+	EP_ACTION_SET_MAX_TX_SIZE
+}cp_lkm_usb_ep_action_t;
+
+struct cp_lkm_usb_ep_action
+{
+    int unique_id;
+    cp_lkm_usb_ep_action_t action;
+    int ep_num; 
+    int ep_type;             //UE_BULK or UE_INTERRUPT
+    int interval;            //interrupt end points
+    int max_transfer_size;
+};
+
+struct cp_lkm_usb_pm_link
+{
+	int usb_unique_id; 	// usb device to link or unlink
+	int pm_unique_id; 	// pm to link or unlink
+	int link; 			// link or unlink the usb device and pm
+};
+
+struct cp_lkm_usb_set_wrapper
+{
+	int unique_id; 	// usb device to link or unlink
+	cp_lkm_wrapper_t wrapper;
+	void* wrapper_info;             //pointer to a chunk of something, must be copied out by shim
+	int wrapper_info_len;
+	int clone_num;
+	int mux_id;
+};
+
+struct cp_lkm_usb_set_mux_id
+{
+	int unique_id; 	
+	int clone_num;
+	int mux_id;
+};
+
+#endif
diff --git a/cpmodem_shim/include/cpmodem_wrapper.h b/cpmodem_shim/include/cpmodem_wrapper.h
new file mode 100644
index 0000000..e3d6fb2
--- /dev/null
+++ b/cpmodem_shim/include/cpmodem_wrapper.h
@@ -0,0 +1,134 @@
+/*
+ * FILE NAME cpmodem_wrapper.h
+ *
+ * BRIEF MODULE DESCRIPTION
+ *  Header file indicating API requirements for interfacing to custom wrapper module
+ *
+ *  Author: CradlePoint Technology, Inc.  <source@cradlepoint.com>
+ *          Ben Kendall <benk@cradlepoint.com>
+ *          Cory Atkin <catkin@cradlepoint.com>
+ *
+ * Copyright 2012-2021, CradlePoint Technology, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ */
+
+// OR'd mask bits for setting the log mask - these must match cpmodem_shim.h
+#define LOG_DEBUG_LEVEL_ERROR	(1 << 0)
+#define LOG_DEBUG_LEVEL_WARN	(1 << 1)
+#define LOG_DEBUG_LEVEL_INFO	(1 << 2)
+#define LOG_DEBUG_LEVEL_TRACE	(1 << 3)
+#define LOG_DEBUG_LEVEL_PRINTF	(1 << 4)
+// LOG_DEBUG_LEVEL_ASSERT is always on 
+
+
+//Traffic control for wrappers
+#define CP_LKM_WRAPPER_SRC_DATA        0x01   //data coming from the network
+#define CP_LKM_WRAPPER_SRC_CTRL        0x02   //data coming from the cpcommon stack
+#define CP_LKM_WRAPPER_DST_DATA        0x03   //data going to network stack
+#define CP_LKM_WRAPPER_DST_CTRL        0x04   //data going to cpcommon stack
+#define CP_LKM_WRAPPER_DST_FLOW_PAUSE  0x05   //pause tx traffic (i.e. SRC_DATA)
+#define CP_LKM_WRAPPER_DST_FLOW_RESUME 0x06   //resume tx traffic
+#define CP_LKM_WRAPPER_DST_UNKNOWN     0x07   //don't know what this is, drop it
+
+//results for wrappers
+#define CP_LKM_WRAPPER_RES_DONE   0x01   //Wrapper is done with success
+#define CP_LKM_WRAPPER_RES_AGAIN  0x02   //Wrapper needs to be called again to continue processing
+#define CP_LKM_WRAPPER_RES_ERROR  0x03   //Wrapper is done with an error
+
+//state of the usb device as a whole
+typedef enum{
+   CP_LKM_WRAPPER_INIT =    0,  //ioctl created the object, but we haven't gotten the probe yet from USB
+   CP_LKM_WRAPPER_CTRL =    1,  //got probe from usb, can now pass ctrl pkts between USB device and the app space driver
+   CP_LKM_WRAPPER_ACTIVE =  2,  //ioctl told us the data connection is now active
+	CP_LKM_WRAPPER_INVALID = 0xff
+} cp_lkm_wrapper_state_t;
+
+//These must match the cp_lkm_wrapper_t enum in cpmodem_shim.h
+typedef enum
+{
+   CP_LKM_WRAPPER_TYPE_NONE = 0,
+   CP_LKM_WRAPPER_TYPE_ASIX,
+   CP_LKM_WRAPPER_TYPE_ASIX_88179,
+   CP_LKM_WRAPPER_TYPE_LG,
+   CP_LKM_WRAPPER_TYPE_DIRECT_IP,
+   CP_LKM_WRAPPER_TYPE_MSRNDIS,
+   CP_LKM_WRAPPER_TYPE_PEGASUS,
+   CP_LKM_WRAPPER_TYPE_NCM,
+	CP_LKM_WRAPPER_TYPE_QMAP,          //New Qualcomm muxing and aggregation protocol
+   CP_LKM_WRAPPER_TYPE_NUM_WRAPPERS //NOTE: this one needs to always be last
+} cp_lkm_wrapper_type_t;
+
+
+/* 
+   Alloc an opaque wrapper context
+   wrapper_info is wrapper specific data sent down from the app space driver. cp_lkm retains
+   ownership of the memory and frees it when the device unplugs, so the wrapper must copy any data it needs to hang onto.
+*/
+void* cp_lkm_wrapper_instance_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
+void cp_lkm_wrapper_instance_free(void* ctxt);
+
+/*
+ * Apply the wrapper to a send pkt before sending to USB 
+ * ctxt    : wrapper ctxt from cp_lkm_wrapper_instance_alloc() 
+ * src     : CP_LKM_WRAPPER_SRC_DATA or CP_LKM_WRAPPER_SRC_CTRL 
+ * skb_in  : buffer with send data. This function takes ownership of skb_in and is responsible for freeing it when done. 
+ * skb_out : pointer to skb to send to usb. If NULL, nothing to send, else send it. The calling function takes ownership 
+ *           of skb_out and is responsbil for freeing it.
+ * wrapper_state: CP_LKM_WRAPPER_ACTIVE if data connection is active, CP_LKM_WRAPPER_CTRL if no data connection. Some wrappers may 
+ *            need to wrap differently depending on the state 
+ *  
+ * return value: CP_LKM_WRAPPER_RES_DONE  - Done processing skb_in. if skb_out is non NULL, send to usb.
+ *               CP_LKM_WRAPPER_RES_AGAIN - Still processing skb_in. if skb_out is non NULL send to usb, call send again with
+ *                                          skb_in set to NULL (since send took ownership on last call).
+ *               CP_LKM_WRAPPER_RES_ERROR - There was an error with the buffer, update error pkt counts if needed.
+ *                                          (send has taken ownership, and will free the skb).
+ *  
+ */
+int cp_lkm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+
+/*
+ * Process the recv pkt before sending up to the app driver (ctrl) or to the network stack (data)  
+ * ctxt    : wrapper ctxt from cp_lkm_wrapper_instance_alloc() 
+ * skb_in  : buffer with recv data. This function takes ownership of skb_in and is responsible for freeing it when done. 
+ * skb_out : pointer to skb to send to ctrl or data. If NULL, nothing to send. The calling function takes ownership 
+ *           of skb_out and is responsible for freeing it.
+ * wrapper_state: CP_LKM_WRAPPER_ACTIVE if data connection is active, CP_LKM_WRAPPER_CTRL if no data connection. Some wrappers may 
+ *            need to wrap differently depending on the state
+ *  
+ * return value: The return value has the result and the destination OR'd into it. 
+ *       result: 
+ *               CP_LKM_WRAPPER_RES_DONE  - Done processing skb_in. if skb_out is non NULL, send to destination.
+ *               CP_LKM_WRAPPER_RES_AGAIN - Still processing skb_in. if skb_out is non NULL send to destination, then call send again with
+ *                                          skb_in set to NULL (since send took ownership on last call) to get the next result and dest.
+ *               CP_LKM_WRAPPER_RES_ERROR - There was an error with the buffer, update error pkt counts if needed.
+ *                                          (send has taken ownership, and will free the skb).
+ *        dest:  CP_LKM_WRAPPER_DST_DATA - send to protocol stack
+ *               CP_LKM_WRAPPER_DST_CTRL = send to app space driver
+ *  
+*/
+int cp_lkm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+
+/*
+ * Let the wrapper know the state of the interface with unique identifier id 
+ *  
+*/
+#define CP_LKM_WRAPPER_DEFAULT_ID 0
+void cp_lkm_wrapper_set_state(void* ctxt, int id, cp_lkm_wrapper_state_t wrapper_state);
+
+void cp_lkm_wrapper_set_log_level(int level);
+int cp_lkm_wrapper_hdr_size(void* ctxt);
+