cpmodem: add missing dependencies
Change-Id: Ieed637bd424a601c37edc52ef1e47acff8d2e8c0
diff --git a/cpmodem_shim/Kbuild b/cpmodem_shim/Kbuild
index 880321d..8a2e07a 100644
--- a/cpmodem_shim/Kbuild
+++ b/cpmodem_shim/Kbuild
@@ -1,2 +1,3 @@
obj-m += cpmodem_shim.o
+cpmodem_shim-y := cpmodem_shim_main.o cpmodem_wrapper.o
ccflags-y += $(EXTRA_CFLAGS)
diff --git a/cpmodem_shim/cpmodem_shim.c b/cpmodem_shim/cpmodem_shim_main.c
similarity index 100%
rename from cpmodem_shim/cpmodem_shim.c
rename to cpmodem_shim/cpmodem_shim_main.c
diff --git a/cpmodem_shim/cpmodem_wrapper.c b/cpmodem_shim/cpmodem_wrapper.c
new file mode 100644
index 0000000..d93f24c
--- /dev/null
+++ b/cpmodem_shim/cpmodem_wrapper.c
@@ -0,0 +1,1971 @@
+/*
+ * FILE NAME cpmodem_wrapper.c
+ *
+ * BRIEF MODULE DESCRIPTION
+ * Custom USB modem wrapper module
+ *
+ * Author: CradlePoint Technology, Inc. <source@cradlepoint.com>
+ * Ben Kendall <benk@cradlepoint.com>
+ * Cory Atkin <catkin@cradlepoint.com>
+ *
+ * Copyright 2012-2023, CradlePoint Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to:
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA 02111-1301 USA
+ */
+
+
+// Necessary includes for this device driver
+#include <linux/module.h> // Needed by all modules
+#include <linux/kernel.h> // Needed for KERN_xxxx
+#include <linux/init.h> // Needed for the macros
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <cpmodem_wrapper.h>
+
+
+#define RUNTIME_DEBUG_TRACE (1 << 0)
+#define RUNTIME_DEBUG_INFO (1 << 1)
+#define RUNTIME_DEBUG_WARN (1 << 2)
+#define RUNTIME_DEBUG_ERROR (1 << 3)
+
+//#undef RUNTIME_DEBUG
+//#define RUNTIME_DEBUG ( RUNTIME_DEBUG_TRACE | RUNTIME_DEBUG_INFO | RUNTIME_DEBUG_WARN | RUNTIME_DEBUG_ERROR )
+
+static int cp_lkm_wrapper_log_level = 0;
+
+#ifdef RUNTIME_DEBUG
+static const char *cp_lkm_wrapper_runtime_debug_level_str[] = {
+ "ASSERT",
+ "TRACE",
+ "INFO",
+ "WARN",
+ "ERROR",
+};
+#else
+static const char *cp_lkm_wrapper_debug_log_level_str[] = {
+ "ASSERT",
+ "ERROR",
+ "WARN",
+ "INFO",
+ "TRACE",
+ "PRINTF"
+};
+#endif
+
+static int cp_out_get_level_index(int level)
+{
+ int level_index = 0;
+ while (level) {
+ level = level >> 1;
+ level_index++;
+ }
+ return level_index;
+}
+
+static void cp_out(int level, const char * file, int line, const char *fmt, ...)
+{
+ int file_str_len = 0;
+ char *file_pos = (char *)file;
+ char *fmt1;
+ va_list arg;
+ int level_index = 0;
+ const char *level_str = NULL;
+
+ if (level) { // level of 0 is ASSERT and log - always output
+ level_index = cp_out_get_level_index(level);
+
+#ifdef RUNTIME_DEBUG
+ if (!(RUNTIME_DEBUG & level)) {
+ return;
+ }
+ level_str = cp_lkm_wrapper_runtime_debug_level_str[level_index];
+#else
+ if (!(cp_lkm_wrapper_log_level & level)) {
+ return;
+ }
+ level_str = cp_lkm_wrapper_debug_log_level_str[level_index];
+#endif
+ }
+
+ va_start(arg, fmt);
+
+ if (file) {
+ char *pos = (char *)file;
+ while ((pos = strchr(pos, '/'))) {
+ pos++;
+ file_pos = pos;
+ }
+
+ file_str_len = strlen(file_pos);
+ }
+
+ fmt1 = kmalloc(strlen(fmt) + file_str_len + 12 + 6, GFP_ATOMIC); // +6 for debug type indication
+ if (!fmt1) {
+ return;
+ }
+ if (level_str) {
+ if (file) {
+ sprintf(fmt1, "%6s %s(%4d):%s\n", level_str, file_pos, line, fmt);
+ } else {
+ sprintf(fmt1, "%6s %s\n", level_str, fmt);
+ }
+ } else {
+ if (file) {
+ sprintf(fmt1, "%s(%4d):%s\n", file_pos, line, fmt);
+ } else {
+ sprintf(fmt1, "%s\n", fmt);
+ }
+ }
+ vprintk(fmt1, arg);
+ kfree(fmt1);
+ va_end(arg);
+}
+
+#ifdef RUNTIME_DEBUG
+// assert is always defined if RUNTIME_DEBUG is defined
+#define DEBUG_ASSERT(a, args...) \
+ if (!(a)) { \
+ cp_out(0, __FILE__, __LINE__, args); \
+ dump_stack(); \
+ while(1) { }; \
+ }
+#define DEBUG_TRACE(args...) cp_out(RUNTIME_DEBUG_TRACE, __FILE__, __LINE__, args)
+#define DEBUG_INFO(args...) cp_out(RUNTIME_DEBUG_INFO, __FILE__, __LINE__, args)
+#define DEBUG_WARN(args...) cp_out(RUNTIME_DEBUG_WARN, __FILE__, __LINE__, args)
+#define DEBUG_ERROR(args...) cp_out(RUNTIME_DEBUG_ERROR, __FILE__, __LINE__, args)
+
+#else
+#define DEBUG_ASSERT(a, args...)
+#define DEBUG_TRACE(args...) cp_out(LOG_DEBUG_LEVEL_TRACE, __FILE__, __LINE__, args)
+
+#define DEBUG_INFO(args...) cp_out(LOG_DEBUG_LEVEL_INFO, __FILE__, __LINE__, args)
+
+#define DEBUG_WARN(args...) cp_out(LOG_DEBUG_LEVEL_WARN, __FILE__, __LINE__, args)
+
+#define DEBUG_ERROR(args...) cp_out(LOG_DEBUG_LEVEL_ERROR, __FILE__, __LINE__, args)
+
+#define DEBUG_PRINTF(args...) cp_out(LOG_DEBUG_LEVEL_PRINTF, __FILE__, __LINE__, args)
+
+#endif
+
+#define LOG(args...) cp_out(0, NULL, 0, args)
+
+
+void cp_lkm_wrapper_set_log_level(int level)
+{
+ DEBUG_TRACE("%s(%d)", __FUNCTION__, level);
+
+ cp_lkm_wrapper_log_level = level;
+}
+
+/******************************* usb wrapper module functionality **********************************/
+
+typedef int (*cp_lkm_wrapper_send_op)(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+typedef int (*cp_lkm_wrapper_recv_op)(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+
+static int cp_lkm_generic_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_generic_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_asix_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_asix_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_asix88179_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_asix88179_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_dip_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_dip_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_ncm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_ncm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_msrndis_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_msrndis_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_pegasus_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_pegasus_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_qmap_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+static int cp_lkm_qmap_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
+
+
+static void* cp_lkm_msrndis_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
+static void* cp_lkm_ncm_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
+static void* cp_lkm_asix88179_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
+static void* cp_lkm_qmap_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
+
+#define CP_LKM_WRAPPER_STATE_INIT 0
+#define CP_LKM_WRAPPER_STATE_SPLIT 1
+
+#define WRAPPER_WRITE_U8(ptr,val) (*((u8*)(ptr)) = val)
+#define WRAPPER_WRITE_U16(ptr,val) (*((u16*)(ptr)) = val)
+#define WRAPPER_WRITE_U32(ptr,val) (*((u32*)(ptr)) = val)
+
+#define WRAPPER_READ_U8(ptr) (*((u8*)(ptr)))
+#define WRAPPER_READ_U16(ptr) (*((u16*)(ptr)))
+#define WRAPPER_READ_U32(ptr) (*((u32*)(ptr)))
+
+struct cp_lkm_wrapper_state_map{
+ int id;
+ cp_lkm_wrapper_state_t wrapper_state;
+};
+#define MAX_STATE_MAPS 16
+
+struct cp_lkm_wrapper_context
+{
+ cp_lkm_wrapper_type_t wrapper;
+ int send_state; //generic send state that can be used by all wrappers
+ int recv_state; //generic recv state that can be used by all wrappers
+ cp_lkm_wrapper_send_op send;
+ cp_lkm_wrapper_recv_op recv;
+ int hdr_size;
+ spinlock_t lock;
+ struct cp_lkm_wrapper_state_map state_maps[MAX_STATE_MAPS];
+ int num_state_maps;
+ struct sk_buff_head skb_ctrl_recv_list;
+ struct sk_buff_head skb_data_recv_list;
+ struct sk_buff_head skb_ctrl_send_list;
+ struct sk_buff_head skb_data_send_list;
+};
+
+static void cp_lkm_wrapper_common_init(struct cp_lkm_wrapper_context* cpwc)
+{
+ cpwc->recv_state = CP_LKM_WRAPPER_STATE_INIT;
+ cpwc->send_state = CP_LKM_WRAPPER_STATE_INIT;
+ spin_lock_init(&cpwc->lock);
+ skb_queue_head_init(&cpwc->skb_ctrl_recv_list);
+ skb_queue_head_init(&cpwc->skb_ctrl_send_list);
+ skb_queue_head_init(&cpwc->skb_data_recv_list);
+ skb_queue_head_init(&cpwc->skb_data_send_list);
+}
+
+static void cp_lkm_wrapper_clean_list(struct sk_buff_head* list)
+{
+ struct sk_buff *skb;
+ while((skb = skb_dequeue(list)) != NULL){
+ DEBUG_INFO("%s() found a straggler", __FUNCTION__);
+
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void cp_lkm_wrapper_common_cleanup(struct cp_lkm_wrapper_context* cpwc)
+{
+ cp_lkm_wrapper_clean_list(&cpwc->skb_ctrl_recv_list);
+ cp_lkm_wrapper_clean_list(&cpwc->skb_ctrl_send_list);
+ cp_lkm_wrapper_clean_list(&cpwc->skb_data_recv_list);
+ cp_lkm_wrapper_clean_list(&cpwc->skb_data_send_list);
+}
+
+static struct sk_buff* cp_lkm_wrapper_skb_make_space(struct sk_buff* skb_in, int headspace, int tailspace)
+{
+ int headroom = skb_headroom(skb_in);
+ int tailroom = skb_tailroom(skb_in);
+ int space = headspace + tailspace;
+ if(skb_in == NULL) {
+ DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
+ return NULL;
+ }
+
+ if ((!skb_cloned(skb_in)) && ((headroom + tailroom) >= space)) {
+ if (headroom < headspace || tailroom < tailspace) {
+ //printk("%s() move it\n", __FUNCTION__);
+ skb_in->data = memmove(skb_in->head + headspace, skb_in->data, skb_in->len);
+ skb_set_tail_pointer(skb_in, skb_in->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+ //printk("%s() copy it\n", __FUNCTION__);
+ skb2 = skb_copy_expand(skb_in, headspace, tailspace, GFP_ATOMIC);
+ dev_kfree_skb_any(skb_in);
+ skb_in = skb2;
+ }
+ return skb_in;
+}
+
+// generic helper function for getting the state for id from the ctxt
+static cp_lkm_wrapper_state_t cp_lkm_generic_wrapper_get_state(void* ctxt, int id)
+{
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ int i;
+ cp_lkm_wrapper_state_t wrapper_state = CP_LKM_WRAPPER_INVALID;
+ for (i = 0; i < cpwc->num_state_maps; i++) {
+ if (cpwc->state_maps[i].id == id) {
+ wrapper_state = cpwc->state_maps[i].wrapper_state;
+ break;
+ }
+ }
+ //printk("%s() id: %d, state: %d\n",__FUNCTION__,id,wrapper_state);
+ return wrapper_state;
+}
+
+static int cp_lkm_generic_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ DEBUG_TRACE("%s()", __FUNCTION__);
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+static int cp_lkm_generic_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ int result = CP_LKM_WRAPPER_RES_DONE;
+ cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
+
+ //printk("%s() state: %d\n", __FUNCTION__, wrapper_state);
+ *skb_out = skb_in;
+
+ if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
+ //PPP modems will often use the data endpoints for AT while connecting and then PPP data once connected.
+ //That's why we need to check the state here
+ DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
+ *dst = CP_LKM_WRAPPER_DST_CTRL;
+ }
+ else{
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+ }
+ return result;
+}
+
+#define ASIX_ENABLE_PADDING 0xffff0000
+#define ASIX_HDR_MASK 0x0000ffff
+#define ASIX_16BIT_EVEN_MASK 0xfffe
+
+//============================================== wrapper specific functions
+static int cp_lkm_asix_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ int pad_len;
+ u32 pkt_len;
+ u32 padding = ASIX_ENABLE_PADDING;
+
+ pad_len = ((skb_in->len + sizeof(u32)) % 512) ? 0 : sizeof(u32);
+
+ *skb_out = NULL;
+
+ if(!skb_in) {
+ DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ //DEBUG_INFO("%s() wrapping", __FUNCTION__);
+
+ skb_in = cp_lkm_wrapper_skb_make_space(skb_in, sizeof(u32), pad_len);
+ if (!skb_in){
+ DEBUG_INFO("%s() couldn't expand", __FUNCTION__);
+ *skb_out = NULL;
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ //generate the len for the header
+ pkt_len = ((skb_in->len ^ ASIX_HDR_MASK) << 16) + skb_in->len;
+ skb_push(skb_in, sizeof(u32));
+ cpu_to_le32s(&pkt_len);
+ memcpy(skb_in->data, &pkt_len, sizeof(u32));
+
+ if (pad_len) {
+ cpu_to_le32s(&padding);
+ memcpy(skb_tail_pointer(skb_in), &padding, sizeof(u32));
+ skb_put(skb_in, sizeof(u32));
+ }
+ //DEBUG_INFO("%s() wrapped", __FUNCTION__);
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+static int cp_lkm_asix_wrapper_recv(void *ctxt, int *dst, int *mux_id, struct sk_buff *skb_in, struct sk_buff **skb_out)
+{
+ u8 *head;
+ u32 hdr;
+ char *pkt;
+ struct sk_buff *pkt_skb;
+ u16 size;
+ struct cp_lkm_wrapper_context *cpwc = (struct cp_lkm_wrapper_context *)ctxt;
+ int result = CP_LKM_WRAPPER_RES_DONE;
+ cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
+
+ *skb_out = NULL;
+
+ //skb_in is NULL when we returned 'again' previously and so the caller is recalling us. This means there should be
+ //a queue'd skb for us to process.
+ if(!skb_in) {
+ DEBUG_TRACE("%s() had a pending", __FUNCTION__);
+ skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
+ }
+ if(!skb_in) {
+ //nothing more to do
+ DEBUG_TRACE("%s() done", __FUNCTION__);
+ goto asix_recv_done;
+ }
+ if(skb_in->len < sizeof(u32)){
+ DEBUG_ERROR("%s() not enough data", __FUNCTION__);
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto asix_recv_done;
+ }
+
+ //read the hdr off the front
+ head = (u8 *) skb_in->data;
+ memcpy(&hdr, head, sizeof(u32));
+ le32_to_cpus(&hdr);
+ pkt = head + sizeof(u32);
+ skb_pull(skb_in, sizeof(u32));
+
+ //the complement sizes don't match, what to do? just keep going
+ if ((short)(hdr & ASIX_HDR_MASK) !=
+ ~((short)((hdr & ~(ASIX_HDR_MASK)) >> 16))) {
+ DEBUG_INFO("%s(), bad length", __FUNCTION__);
+ }
+ // get the packet length
+ size = (u16) (hdr & ASIX_HDR_MASK);
+
+ //if exact fit, send it
+ if ((skb_in->len) - ((size + 1) & ASIX_16BIT_EVEN_MASK) == 0){
+ DEBUG_TRACE("%s(), exact fit", __FUNCTION__);
+ *skb_out = skb_in;
+ skb_in = NULL; //so we don't free it below
+ goto asix_recv_done;
+ }
+
+ if (size > ETH_FRAME_LEN || size > skb_in->len) {
+ //deverr(dev,"asix_rx_fixup() Bad RX Length %d", size);
+ DEBUG_ERROR("%s() too big or buff too small", __FUNCTION__);
+
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto asix_recv_done;
+ }
+
+ //multiple pkts in this one. Have to copy them
+ pkt_skb = skb_clone(skb_in, GFP_ATOMIC);
+ if (!pkt_skb) {
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto asix_recv_done;
+ }
+ pkt_skb->len = size;
+ pkt_skb->data = pkt;
+ skb_set_tail_pointer(pkt_skb, size);
+ *skb_out = pkt_skb;
+
+ //This skb has multiple pkts. We just cloned the first pkt into pkt_skb above. Move past that data and if there
+ //is any more data left, enqueue it and return 'again' so we can process it.
+ skb_pull(skb_in, (size + 1) & ASIX_16BIT_EVEN_MASK);
+
+ //if have more (at least hdr size worth), requeue and tell caller to come again sometime
+ if (skb_in->len <= sizeof(u32)){
+ DEBUG_ERROR("%s() overflowed", __FUNCTION__);
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto asix_recv_done;
+ }
+
+ DEBUG_TRACE("%s() more to do", __FUNCTION__);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
+ skb_in = NULL;
+ result = CP_LKM_WRAPPER_RES_AGAIN;
+
+asix_recv_done:
+ if(skb_in) {
+ dev_kfree_skb_any(skb_in);
+ }
+ //if error, clear the out skb if any
+ if(result == CP_LKM_WRAPPER_RES_ERROR) {
+ if(*skb_out) {
+ dev_kfree_skb_any(*skb_out);
+ *skb_out = NULL;
+ }
+ }
+ DEBUG_TRACE("%s() done result: 0x%x skb_out:%p", __FUNCTION__, result, *skb_out);
+ if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
+ DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
+ *dst = CP_LKM_WRAPPER_DST_CTRL;
+ }
+ else{
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+ }
+
+ return result;
+}
+
+// asix88179 defines
+#define RX_HDR_CRC_ERR (1 << 31) // should this be 29?
+#define RX_HDR_DROP_ERR (1 << 30) // should this be 31?
+#define RX_HDR_L3CSUM_ERR 2
+#define RX_HDR_L4CSUM_ERR 1
+#define RX_HDR_L4_TYPE_UDP 4
+#define RX_HDR_L4_TYPE_TCP 16
+#define RX_HDR_L4_TYPE_MASK 0x1c
+
+struct cp_lkm_asix88179_wrapper_context {
+ struct cp_lkm_wrapper_context common;
+ u32 max_transfer_len;
+ u32 *pkt_hdr;
+ int pkt_cnt;
+};
+
+#define ASIX_88179_ENABLE_PADDING 0x80008000
+#define ASIX_88179_13BIT_MASK 0x1fff
+#define ASIX_88179_8BIT_BOUNDARY_MASK 0xFFF8
+
+static int cp_lkm_asix88179_wrapper_send(void *ctxt, int src, int mux_id, struct sk_buff *skb_in, struct sk_buff **skb_out)
+{
+ struct sk_buff *skb2;
+ struct cp_lkm_asix88179_wrapper_context *asix88179_wc = (struct cp_lkm_asix88179_wrapper_context *)ctxt;
+ u32 hdr1;
+ u32 hdr2;
+ int frame_size = asix88179_wc->max_transfer_len;
+ u32 mss;
+ *skb_out = NULL;
+
+ mss = skb_shinfo(skb_in)->gso_size;
+
+ hdr1 = skb_in->len;
+ hdr2 = mss;
+ if (((skb_in->len + 8) % frame_size) == 0) {
+ hdr2 |= ASIX_88179_ENABLE_PADDING; // enable padding
+ }
+
+ // make space for both headers
+ skb2 = cp_lkm_wrapper_skb_make_space(skb_in, sizeof(u32) * 2, 0);
+ if (!skb2) {
+ // skb_in is already freed in cp_lkm_wrapper_skb_make_space
+ printk("%s() - could not make space\n", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ skb_in = skb2;
+
+ cpu_to_le32s(&hdr2);
+ skb_push(skb_in, sizeof(u32));
+ skb_copy_to_linear_data(skb_in, &hdr2, sizeof(u32));
+
+ cpu_to_le32s(&hdr1);
+ skb_push(skb_in, sizeof(u32));
+ skb_copy_to_linear_data(skb_in, &hdr1, sizeof(u32));
+
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+static void cp_lkm_asix88179_check_csum(struct sk_buff *skb, u32 *pkt_hdr)
+{
+ u32 err_ind = *pkt_hdr;
+ bool hdr_err = (err_ind & RX_HDR_L3CSUM_ERR) || (err_ind & RX_HDR_L4CSUM_ERR);
+ bool csum_valid = ((err_ind & RX_HDR_L4_TYPE_MASK) == RX_HDR_L4_TYPE_TCP) || ((err_ind & RX_HDR_L4_TYPE_MASK) == RX_HDR_L4_TYPE_UDP);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (!hdr_err && csum_valid) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static unsigned long total_pkt_cnt = 0;
+static unsigned long total_pkt_processed = 0;
+
+static int cp_lkm_asix88179_wrapper_recv(void *ctxt, int *dst, int *mux_id, struct sk_buff *skb_in, struct sk_buff **skb_out)
+{
+ u32 hdr;
+ u16 hdr_off;
+ struct cp_lkm_asix88179_wrapper_context* cp_88179wc = (struct cp_lkm_asix88179_wrapper_context*)ctxt;
+ struct cp_lkm_wrapper_context *cpwc = (struct cp_lkm_wrapper_context *)ctxt;
+ int result = CP_LKM_WRAPPER_RES_DONE;
+ struct sk_buff *pkt_skb;
+ u16 pkt_len;
+ bool crc_runt;
+ unsigned int end_len;
+
+ cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
+
+ *skb_out = NULL;
+
+ DEBUG_TRACE("%s()", __FUNCTION__);
+
+ //skb_in is NULL when we returned 'again' previously and so the caller is recalling us. This means there should be
+ //a queue'd skb for us to process.
+ if(!skb_in) {
+ DEBUG_TRACE("%s() had a pending", __FUNCTION__);
+ skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
+ } else {
+ DEBUG_TRACE("%s() 1st pkt of queue, skb_in->len=%x", __FUNCTION__, skb_in->len);
+ skb_trim(skb_in, skb_in->len - 4);
+ memcpy(&hdr, skb_tail_pointer(skb_in), sizeof(u32));
+ le32_to_cpus(&hdr);
+
+ cp_88179wc->pkt_cnt = (u16)hdr;
+ total_pkt_cnt += cp_88179wc->pkt_cnt;
+ hdr_off = (u16)(hdr >> 16);
+ cp_88179wc->pkt_hdr = (u32 *)(skb_in->data + hdr_off);
+ le32_to_cpus(cp_88179wc->pkt_hdr);
+ }
+ if(!skb_in) {
+ //nothing more to do
+ DEBUG_TRACE("%s() done", __FUNCTION__);
+ goto asix_recv_done;
+ }
+ if(skb_in->len < sizeof(u32)){
+ DEBUG_ERROR("%s() not enough data", __FUNCTION__);
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto asix_recv_done;
+ }
+
+ while (cp_88179wc->pkt_cnt--) {
+
+ pkt_len = (*cp_88179wc->pkt_hdr >> 16) & ASIX_88179_13BIT_MASK;
+ end_len = (pkt_len + 7) & ASIX_88179_8BIT_BOUNDARY_MASK;
+
+ DEBUG_TRACE("%s() rx_hdr = %x, pkt_cnt=%x, pkt_hdr=%x, pkt_len=%x", __FUNCTION__, hdr, cp_88179wc->pkt_cnt, cp_88179wc->pkt_hdr, pkt_len);
+ // Check CRC or runt packet
+ crc_runt = (*cp_88179wc->pkt_hdr & RX_HDR_CRC_ERR) || (*cp_88179wc->pkt_hdr & RX_HDR_DROP_ERR);
+ if (crc_runt) {
+ skb_pull(skb_in, end_len);
+ cp_88179wc->pkt_hdr++;
+ le32_to_cpus(cp_88179wc->pkt_hdr);
+
+ DEBUG_TRACE("%s() crc error or runt", __FUNCTION__);
+ continue;
+ }
+
+ total_pkt_processed++;
+
+ //multiple packets in this one. Have to copy them
+ pkt_skb = skb_clone(skb_in, GFP_ATOMIC);
+ if (!pkt_skb) {
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto asix_recv_done;
+ }
+
+ pkt_skb->data = skb_in->data + 2;
+ pkt_skb->len = pkt_len;
+ pkt_skb->truesize = pkt_len + sizeof(struct sk_buff);
+ skb_set_tail_pointer(pkt_skb, pkt_len);
+ cp_lkm_asix88179_check_csum(pkt_skb, cp_88179wc->pkt_hdr);
+ *skb_out = pkt_skb;
+
+ if (cp_88179wc->pkt_cnt != 0) {
+ //This skb has multiple pkts. We just cloned the first pkt into pkt_skb above. Move past that data and if there
+ //is any more data left, enqueue it and return 'again' so we can process it.
+ skb_pull(skb_in, end_len);
+ cp_88179wc->pkt_hdr++;
+ le32_to_cpus(cp_88179wc->pkt_hdr);
+
+ DEBUG_TRACE("%s() more to do", __FUNCTION__);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
+ skb_in = NULL;
+ result = CP_LKM_WRAPPER_RES_AGAIN;
+ }
+ break;
+ }
+
+asix_recv_done:
+ if(skb_in) {
+ dev_kfree_skb_any(skb_in);
+ }
+ //if error, clear the out skb if any
+ if(result == CP_LKM_WRAPPER_RES_ERROR) {
+ if(*skb_out) {
+ dev_kfree_skb_any(*skb_out);
+ *skb_out = NULL;
+ }
+ }
+ DEBUG_TRACE("%s() done result: 0x%x skb_out:%p", __FUNCTION__, result, *skb_out);
+ if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
+ DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
+ *dst = CP_LKM_WRAPPER_DST_CTRL;
+ } else{
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+ }
+
+ return result;
+}
+
+static void* cp_lkm_asix88179_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
+{
+ struct cp_lkm_asix88179_wrapper_context* asix88179_wc;
+ struct cp_lkm_wrapper_context* wc;
+
+ asix88179_wc = kzalloc(sizeof(struct cp_lkm_asix88179_wrapper_context), GFP_KERNEL);
+ if(!asix88179_wc) {
+ return NULL;
+ }
+
+ if(wrapper_info) {
+ asix88179_wc->max_transfer_len = *((u32*)(wrapper_info));
+ DEBUG_INFO("%s(), max transfer:%d", __FUNCTION__, asix88179_wc->max_transfer_len);
+ } else {
+ DEBUG_ERROR("%s(),no max transfer set", __FUNCTION__);
+ }
+
+ wc = (struct cp_lkm_wrapper_context*)asix88179_wc;
+ cp_lkm_wrapper_common_init(wc);
+ wc->wrapper = wrapper;
+ wc->send = cp_lkm_asix88179_wrapper_send;
+ wc->recv = cp_lkm_asix88179_wrapper_recv;
+
+ return asix88179_wc;
+
+}
+
+// ===== pegasus wrapper
+static int cp_lkm_pegasus_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ int padlen = 0;
+ u32 packet_len;
+ u32 hdrlen = 2;
+
+ *skb_out = NULL;
+
+ if(skb_in == NULL) {
+ DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ //DEBUG_INFO("%s() wrapping", __FUNCTION__);
+
+ skb_in = cp_lkm_wrapper_skb_make_space(skb_in, hdrlen, padlen);
+ if (!skb_in){
+ DEBUG_ERROR("%s() couldn't expand", __FUNCTION__);
+ *skb_out = NULL;
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ //generate the mirror'd len for the header
+ packet_len = skb_in->len;
+ skb_push(skb_in, sizeof(u16));
+ WRAPPER_WRITE_U16(skb_in->data, cpu_to_le16(packet_len));
+
+ //DEBUG_INFO("%s() wrapped", __FUNCTION__);
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+static int cp_lkm_pegasus_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 hdr_size;
+ u32 pkt_size;
+
+ //DEBUG_INFO("%s() unwrap it", __FUNCTION__);
+
+ *skb_out = NULL;
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+ hdr_size = 2;
+
+ if(skb_in == NULL) {
+ //nothing more to do
+ DEBUG_TRACE("%s() done", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ // If don't have enough for the headers, it is an error
+ if(skb_in->len < hdr_size) {
+ dev_kfree_skb_any(skb_in);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ //read the pkt size and make sure have enough data. the pkt size
+ //doesn't include the dip header so add it in for comparison
+ pkt_size = le16_to_cpu(WRAPPER_READ_U16(skb_in->data));
+ if(pkt_size > skb_in->len){
+ DEBUG_ERROR("%s() bad data pkt pkt_size:%d, data size: %d", __FUNCTION__, pkt_size, skb_in->len);
+ dev_kfree_skb_any(skb_in);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ //remove the dip and ethernet hdrs
+ skb_pull(skb_in, hdr_size);
+ *skb_out = skb_in;
+ DEBUG_TRACE("%s() data pkt", __FUNCTION__);
+
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+//===================direct ip wrapper
+#define SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSGID 0x3F
+#define SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_EXTENDED_MSGID 0x0002
+#define SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSG_SPECIFIC_ID 0x00
+#define SIERRA_DIRECTIP_HDR_SIZE 6
+#define SIERRA_DIRECTIP_ETHER_SIZE 14
+static int cp_lkm_dip_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 packet_len;
+ u32 hdr_len;
+ cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
+
+ *skb_out = NULL;
+ //DEBUG_INFO("%s() wrap it", __FUNCTION__);
+
+ if(skb_in == NULL) {
+ DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ //in ctrl mode, we don't put a wrapper on (only after data comes up)
+ if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
+ *skb_out = skb_in;
+ DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+ //DEBUG_INFO("%s() wrapping", __FUNCTION__);
+
+ // Add header:
+ // HIP header: 6 bytes
+ // Fake ethernet hdr: 14 bytes
+ hdr_len = SIERRA_DIRECTIP_HDR_SIZE + SIERRA_DIRECTIP_ETHER_SIZE;
+ skb_in = cp_lkm_wrapper_skb_make_space(skb_in, hdr_len, 0);
+ if (!skb_in){
+ DEBUG_ERROR("%s() couldn't expand", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ packet_len = skb_in->len;
+ packet_len += SIERRA_DIRECTIP_ETHER_SIZE; //add bytes for the ethernet hdr (the dip hdr isn't counted in the len)
+
+ //ethernet protocol
+ skb_push(skb_in, sizeof(u16));
+ WRAPPER_WRITE_U16(skb_in->data, cpu_to_be16(0x0800));
+
+ //bogus ethernet addrs (modem side doesn't care)
+ skb_push(skb_in, 12);
+ memset(skb_in->data, 0, 12);
+
+ //extended msg id
+ skb_push(skb_in, sizeof(u16));
+ WRAPPER_WRITE_U16(skb_in->data, cpu_to_be16(SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_EXTENDED_MSGID));
+
+ //msg specific id
+ skb_push(skb_in, 1);
+ WRAPPER_WRITE_U8(skb_in->data, SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSG_SPECIFIC_ID);
+
+ //msg indication id
+ skb_push(skb_in, 1);
+ WRAPPER_WRITE_U8(skb_in->data, SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSGID);
+
+ //len
+ skb_push(skb_in, sizeof(u16));
+ WRAPPER_WRITE_U16(skb_in->data, cpu_to_be16(packet_len));
+
+ //DEBUG_INFO("%s() data pkt", __FUNCTION__);
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+static int cp_lkm_dip_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 hdr_size;
+ u32 pkt_size;
+ cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
+
+ //DEBUG_INFO("%s() unwrap it", __FUNCTION__);
+
+ *skb_out = NULL;
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+ hdr_size = SIERRA_DIRECTIP_HDR_SIZE + SIERRA_DIRECTIP_ETHER_SIZE;
+
+ if(skb_in == NULL) {
+ //nothing more to do
+ DEBUG_TRACE("%s() done", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ //There are no headers on the pkts when in ctrl mode. Only in data mode
+ if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
+ DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
+ *skb_out = skb_in;
+ *dst = CP_LKM_WRAPPER_DST_CTRL;
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ //from here down, they are data packets
+
+ // If don't have enough for the headers, it is an error
+ if(skb_in->len < hdr_size) {
+ dev_kfree_skb_any(skb_in);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ //read the pkt size and make sure have enough data. the pkt size
+ //doesn't include the dip header so add it in for comparison
+ pkt_size = be16_to_cpu(WRAPPER_READ_U16(skb_in->data));
+ if((pkt_size+SIERRA_DIRECTIP_HDR_SIZE) > skb_in->len){
+ dev_kfree_skb_any(skb_in);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ //remove the dip and ethernet hdrs
+ skb_pull(skb_in, hdr_size);
+ *skb_out = skb_in;
+ DEBUG_TRACE("%s() data pkt", __FUNCTION__);
+
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+//===================== msrndis wrapper
+#define MSRNDIS_REMOTE_NDIS_PACKET_MSG 0x00000001 // data packet
+
+struct cp_lkm_msrndis_wrapper_context{
+ struct cp_lkm_wrapper_context common;
+ u32 max_transfer_len;
+};
+
+// data pkt header
+struct msrndis_data_hdr { // data packet message header (msrndis_hdr preceeds this header) (payload immediately follows)
+ u32 data_offset;
+ u32 data_length;
+ u32 OOB_data_offset;
+ u32 OOB_data_length;
+ u32 num_OOB_data_elements;
+ u32 per_packet_info_offset;
+ u32 per_packet_info_length;
+ u32 reserved[2];
+}__attribute__((packed));
+
+struct msrndis_hdr { // general msrndis header at beginning of all messages
+ u32 message_type;
+ u32 message_length;
+} __attribute__((packed));
+
+static int cp_lkm_msrndis_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 data_hdr_len;
+ u32 msg_hdr_len;
+ struct msrndis_data_hdr hdr;
+ u32 packet_len;
+
+ *skb_out = NULL;
+ DEBUG_TRACE("%s() wrap it", __FUNCTION__);
+
+ if(skb_in == NULL) {
+ DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ // This bad boy has pkt data plus a data hdr plus a msg header (it was created by microsoft after all)
+ packet_len = skb_in->len;
+ data_hdr_len = sizeof(struct msrndis_data_hdr);
+ msg_hdr_len = sizeof(struct msrndis_hdr);
+
+ //need to add space for both headers
+ skb_in = cp_lkm_wrapper_skb_make_space(skb_in, data_hdr_len + msg_hdr_len, 0);
+ if (!skb_in){
+ DEBUG_ERROR("%s() couldn't expand", __FUNCTION__);
+ *skb_out = NULL;
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ //create the data hdr
+ memset(&hdr, 0x00, data_hdr_len);
+ hdr.data_offset = cpu_to_le32(data_hdr_len); //data starts after the data hdr
+ hdr.data_length = cpu_to_le32(packet_len); //the data hdr doesn't include the hdr lenght in length, only the data
+ skb_push(skb_in, data_hdr_len);
+ memcpy(skb_in->data, &hdr, data_hdr_len);
+
+ //Create the msg hdr, the length includes the msg header size as well
+ packet_len = skb_in->len + msg_hdr_len;
+
+ skb_push(skb_in, sizeof(u32));
+ WRAPPER_WRITE_U32(skb_in->data, cpu_to_le32(packet_len));
+
+ skb_push(skb_in, sizeof(u32));
+ WRAPPER_WRITE_U32(skb_in->data, cpu_to_le32(MSRNDIS_REMOTE_NDIS_PACKET_MSG));
+
+ DEBUG_TRACE("%s() data pkt", __FUNCTION__);
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+static int cp_lkm_msrndis_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 data_hdr_len = sizeof(struct msrndis_data_hdr);
+ u32 msg_hdr_len = sizeof(struct msrndis_hdr);
+ struct msrndis_data_hdr hdr;
+ u32 adv = 0;
+ u32 out_len;
+ u32 pkt_len;
+ u32 pkt_type;
+ struct sk_buff *skb_working = NULL;
+
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ struct cp_lkm_msrndis_wrapper_context* msrndis_wc = (struct cp_lkm_msrndis_wrapper_context*)ctxt;
+
+// DEBUG_INFO("%s() unwrap it", __FUNCTION__);
+ *skb_out = NULL;
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+
+ if (skb_in) {
+ cpwc->recv_state = CP_LKM_WRAPPER_STATE_INIT;
+ DEBUG_TRACE("%s() done", __FUNCTION__);
+ if (0 == skb_in->len) {
+ dev_kfree_skb_any(skb_in);
+ skb_in = NULL;
+ } else if (msrndis_wc->max_transfer_len == skb_in->len) {
+ DEBUG_INFO("%s() - max transfer - setting split", __FUNCTION__);
+ cpwc->recv_state = CP_LKM_WRAPPER_STATE_SPLIT;
+ }
+ }
+
+ skb_working = skb_dequeue(&cpwc->skb_data_recv_list);
+
+ if (!skb_working) {
+ skb_working = skb_in;
+ } else if (skb_in) {
+ // append data to skb_working
+ skb_working = cp_lkm_wrapper_skb_make_space(skb_working, 0, skb_in->len);
+ if(!skb_working) {
+ DEBUG_WARN("%s() failed to make space", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ memcpy(skb_tail_pointer(skb_working), skb_in->data, skb_in->len);
+ skb_put(skb_working, skb_in->len);
+ dev_kfree_skb_any(skb_in);
+ }
+
+ if (!skb_working) {
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ if(skb_working->len < msg_hdr_len) {
+ if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
+ DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
+ dev_kfree_skb_any(skb_working);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ // expecting a split packet
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
+
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ pkt_type = le32_to_cpu(WRAPPER_READ_U32(skb_working->data));
+ skb_pull(skb_working, 4);
+
+ pkt_len = le32_to_cpu(WRAPPER_READ_U32(skb_working->data));
+ skb_pull(skb_working, 4);
+
+ // try to determine if this packet len is reasonable
+ if (pkt_len > (4 * 1024) || pkt_len < 0) {
+ // probably bad packet length - drop the packets
+ DEBUG_WARN("%s() - bad packet len:%x", __FUNCTION__, pkt_len);
+ DEBUG_WARN("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
+
+ dev_kfree_skb_any(skb_working);
+// DEBUG_ASSERT(0, "bad packet len:%d", pkt_len);
+
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ if (skb_working->len < data_hdr_len) {
+ if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
+ DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
+ dev_kfree_skb_any(skb_working);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ // expecting a split packet
+ skb_push(skb_working, msg_hdr_len);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
+
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+ memcpy(&hdr, skb_working->data, data_hdr_len);
+ hdr.data_offset = le32_to_cpu(hdr.data_offset);
+ hdr.data_length = le32_to_cpu(hdr.data_length);
+ skb_pull(skb_working, data_hdr_len);
+
+ //account for any gaps between the end of the hdr and the start of data
+ if(hdr.data_offset > data_hdr_len) {
+ adv = hdr.data_offset - data_hdr_len;
+ if(skb_working->len < adv) {
+ if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
+ DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
+ dev_kfree_skb_any(skb_working);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ // expecting a split packet
+ skb_push(skb_working, msg_hdr_len + data_hdr_len);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ skb_pull(skb_working, adv);
+ }
+
+ if(skb_working->len < hdr.data_length) {
+ if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
+ DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
+ dev_kfree_skb_any(skb_working);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ DEBUG_TRACE("%s() data pkt", __FUNCTION__);
+
+ // expecting a split packet
+ skb_push(skb_working, msg_hdr_len + data_hdr_len + adv);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ out_len = hdr.data_length;
+
+ if (MSRNDIS_REMOTE_NDIS_PACKET_MSG != pkt_type) {
+ out_len = msg_hdr_len + data_hdr_len + adv + hdr.data_length;
+ skb_push(skb_working, msg_hdr_len + data_hdr_len + adv);
+
+ }
+
+ *skb_out = skb_clone(skb_working, GFP_ATOMIC);
+ if (!(*skb_out)) {
+ DEBUG_WARN("%s() - couldn't clone skb", __FUNCTION__);
+ dev_kfree_skb_any(skb_working);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ skb_set_tail_pointer(*skb_out, out_len);
+ (*skb_out)->len = out_len;
+
+ skb_pull(skb_working, out_len);
+
+ if (skb_working->len) {
+ DEBUG_INFO("%s() complete pkt with remaining data: %d", __FUNCTION__, skb_working->len);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
+ *dst = (MSRNDIS_REMOTE_NDIS_PACKET_MSG == pkt_type) ? CP_LKM_WRAPPER_DST_DATA : CP_LKM_WRAPPER_DST_CTRL;
+ return CP_LKM_WRAPPER_RES_AGAIN;
+ }
+
+ dev_kfree_skb_any(skb_working);
+ *dst = (MSRNDIS_REMOTE_NDIS_PACKET_MSG == pkt_type) ? CP_LKM_WRAPPER_DST_DATA : CP_LKM_WRAPPER_DST_CTRL;
+ return CP_LKM_WRAPPER_RES_DONE;
+
+}
+
+static void* cp_lkm_msrndis_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
+{
+ struct cp_lkm_msrndis_wrapper_context* msrndis_wc;
+ struct cp_lkm_wrapper_context* wc;
+
+ msrndis_wc = kzalloc(sizeof(struct cp_lkm_msrndis_wrapper_context), GFP_KERNEL);
+ if(!msrndis_wc) {
+ return NULL;
+ }
+
+ if(wrapper_info) {
+ msrndis_wc->max_transfer_len = *((u32*)(wrapper_info));
+ DEBUG_INFO("%s(), max transfer:%d", __FUNCTION__, msrndis_wc->max_transfer_len);
+ }
+ else{
+ DEBUG_ERROR("%s(),no max transfer set", __FUNCTION__);
+ }
+
+ wc = (struct cp_lkm_wrapper_context*)msrndis_wc;
+ cp_lkm_wrapper_common_init(wc);
+ wc->wrapper = wrapper;
+ wc->send = cp_lkm_msrndis_wrapper_send;
+ wc->recv = cp_lkm_msrndis_wrapper_recv;
+
+ return msrndis_wc;
+
+}
+
+
+
+//============== NCM wrapper
+//There are 2 modes of operation for an NCM device, 16 bit and 32 bit. 16 bit block allows for transfer bkocks up to 64K in length,
+//while 32 allows for 4G length blocks. We will be using the 16 bit, which is set in plug.
+
+#define NTB_HEADER_SIGNATURE 0x484D434E //"NCMH" 16 bit transfer blocks signature.
+#define NDP_SIGNATURE_NO_CRC 0x304D434E //"NCM0"
+
+///
+/// THIS STRUCTURE MUST BE THE SAME AS THE ONE IN ncm_modem.h
+///
+struct ncm_ntb_parameters{
+ u16 wLength;
+ u16 bmNtbFormatsSupported;
+ u32 dwNtbInMaxSize;
+ u16 wNdpInDivisor;
+ u16 wNdpInPayloadRemainder;
+ u16 wNdpInAlignment;
+ u16 reserved;
+ u32 dwNtbOutMaxSize;
+ u16 wNdpOutDivisor;
+ u16 wNdpOutPayloadRemainder;
+ u16 wNdpOutAlignment;
+ u16 wNtbOutMaxDatagrams;
+};
+
+//NCM Transfer Header (NTH)
+struct ncm_transfer_header {
+ u32 signature;
+ u16 header_length;
+ u16 sequence_number;
+ u16 ntb_length;//length of entire block
+ u16 ndp_index; //Offset in block in NDP
+}__attribute__ ((packed));
+
+struct ncm_datagram_info {
+ u16 index;
+ u16 length;
+}__attribute__ ((packed));
+
+//NCM Datagram Pointers (NDP)
+struct ncm_datagram_pointers {
+ u32 signature;
+ u16 length; // Size of this NDP. Must be multiple of 4, and at least 0x10
+ u16 next_ndp_index; //offset of next ndp in NTB.
+ struct ncm_datagram_info datagram_info[2]; //Setting to one datagream for now. It's 2 due to the NULL tail item required in list.
+}__attribute__ ((packed));
+
+struct cp_lkm_ncm_wrapper_context{
+ struct cp_lkm_wrapper_context common;
+ u32 nth_seq_num;
+ u32 datagram_offset;
+ struct ncm_ntb_parameters ntb_parms; // usb max transfer size - used to detect runt HIM blocks
+};
+
+static int cp_lkm_ncm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 ndp_padding, datagram_padding, align_factor, total_size, header_size;
+ u32 payload = skb_in->len;
+ void *ptr = NULL;
+ struct cp_lkm_ncm_wrapper_context* ncmwc = (struct cp_lkm_ncm_wrapper_context*)ctxt;
+
+ *skb_out = NULL;
+
+ //should never see this in here
+ if(skb_in == NULL) {
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ header_size = sizeof(struct ncm_transfer_header);
+
+ //Need to align NDP by the align value. offset%align = 0. Add align value -1 and mask off by it's inverse to get aligned offset.
+ //Then subtract current header size to get the padding.
+ align_factor = ncmwc->ntb_parms.wNdpInAlignment - 1;
+ ndp_padding = ((header_size + align_factor) & ~align_factor) - header_size;
+ header_size += ndp_padding;
+
+ header_size += sizeof(struct ncm_datagram_pointers);
+
+ //Alignment for
+
+ //Need to align NDP by the divisor value + the remainder value. offset%divisor = 0. Add divisor value -1 and mask off by it's inverse to get aligned offset.
+ //Then subtract current header size to get the alignment padding and add the remainder to get the total padding.
+ align_factor = ncmwc->ntb_parms.wNdpInDivisor - 1;
+ datagram_padding = (((header_size + align_factor) & ~align_factor) - header_size) + ncmwc->ntb_parms.wNdpInPayloadRemainder;
+
+ header_size += datagram_padding;
+
+ total_size = header_size + payload;
+
+ //Need to account for our max transfer size allowed by modem. Current modem is 400K, should never hit this.
+ if (ncmwc->ntb_parms.dwNtbInMaxSize < total_size) {
+ dev_kfree_skb_any(skb_in);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ //add space for the header to skb_in
+ skb_in = cp_lkm_wrapper_skb_make_space(skb_in, header_size, 0);
+ if(!skb_in) {
+ DEBUG_WARN("%s() couldn't make space", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+
+ //write NCM Pkt hdr
+ ptr = (void *)skb_push(skb_in, header_size);
+ memset(ptr, 0, header_size);
+
+ WRAPPER_WRITE_U32(ptr, cpu_to_le32(NTB_HEADER_SIGNATURE));
+ ptr +=4;
+
+ //Write the header size
+ WRAPPER_WRITE_U16(ptr, cpu_to_le16(sizeof(struct ncm_transfer_header)));
+ ptr +=4; //Moving 2 to skip using optional sequence number
+
+ //Total NTB size
+ WRAPPER_WRITE_U16(ptr, cpu_to_le16(skb_in->len));
+ ptr += 2;
+
+ //Index of first ndp
+ WRAPPER_WRITE_U16(ptr, cpu_to_le16(sizeof(struct ncm_transfer_header) + ndp_padding));
+ ptr += (2 + ndp_padding);
+
+ //Write the ndp
+ WRAPPER_WRITE_U32(ptr, cpu_to_le32(NDP_SIGNATURE_NO_CRC));
+ ptr +=4;
+
+ //Write the ndp size
+ WRAPPER_WRITE_U16(ptr, cpu_to_le16(sizeof(struct ncm_datagram_pointers)));
+ ptr +=4; //Moving past 2 reserved as well
+
+ //Write the datagram index. It's write after the ntb length
+ WRAPPER_WRITE_U16(ptr, cpu_to_le16(header_size));
+ ptr +=2;
+
+ //Write the datagram length.
+ WRAPPER_WRITE_U16(ptr, cpu_to_le16(payload));
+
+ //tail entry 0'd in memset.
+
+ *skb_out = skb_in;
+ return CP_LKM_WRAPPER_RES_DONE;
+}
+
+/*
+ * -------------------------------------
+ * | Signature | NCM Transfer Block
+ * -------------------------------------
+ * | Header Length |
+ * -------------------------------------
+ * | Sequence Number |
+ * -------------------------------------
+ * | Total Packet Length |
+ * -------------------------------------
+ * | NDP Index |
+ * -------------------------------------
+ *
+ *
+ * -------------------------------------
+ * | Signature | NCM Datagram Pointers
+ * -------------------------------------
+ * | Header Length |
+ * -------------------------------------
+ * | Index to next NDP |
+ * -------------------------------------
+ * | Datagram[0] index |
+ * -------------------------------------
+ * | Datagram[0] length |
+ * -------------------------------------
+ * | Datagram[1] index |
+ * -------------------------------------
+ * | Datagram[1] length |
+ * -------------------------------------
+ * .
+ * .
+ * .
+ * -------------------------------------
+ * | Datagram[n] index |
+ * -------------------------------------
+ * | Datagram[n] length |
+ * -------------------------------------
+ * | 0 | Termination of header
+ * -------------------------------------
+ * | 0 | Termination of header
+ * ------------------------------------
+ *
+ * Ethernet packets....
+ *
+ *
+ * This function processes the NCM Transfer Block. It can consist
+ * of multiple Ethernet pkts. We specified the max size we could
+ * handle during plug. Only a single SBK should ever be sent.
+*/
+static int cp_lkm_ncm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ u32 tmp_val;
+ u16 nth_len, ndp_len, datagram_index, datagram_len;
+ struct sk_buff *ncm_skb_out;
+ unsigned char *ptr = NULL, *tmp_ptr = NULL;
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ struct cp_lkm_ncm_wrapper_context* ncmwc = (struct cp_lkm_ncm_wrapper_context*)cpwc;
+ cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
+
+ *skb_out = NULL;
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+
+ //skb_in is NULL when the caller is recalling us to finish processing the skb.
+ if(NULL != skb_in) {
+ //print_hex_dump(KERN_INFO, "SKB_IN:", DUMP_PREFIX_ADDRESS, 16, 1, skb_in->data, 64, false);
+
+ ptr = (void *)skb_in->data;
+ //There are no headers on the pkts when in ctrl mode. Only in data mode. Shouldn't see control on data eps
+ if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
+ *skb_out = skb_in;
+ *dst = CP_LKM_WRAPPER_DST_CTRL;
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ // Not enough data for the headers, it is an error.
+ if(skb_in->len < sizeof(struct ncm_transfer_header) + sizeof(struct ncm_datagram_pointers)) {
+ //DEBUG_ERROR("%s() NCM ERROR: NCM packet size error, len: %d", __FUNCTION__,skb_in->len);
+ goto error;
+ }
+
+ //get the signature.
+ tmp_val = le32_to_cpu(WRAPPER_READ_U32(ptr));
+ ptr +=4;
+ if (tmp_val != NTB_HEADER_SIGNATURE) {
+ DEBUG_ERROR("%s() NCM ERROR: Invalid NCM Signature: 0x%lX", __FUNCTION__, tmp_val);
+ goto error;
+ }
+
+ //Check the header length
+ nth_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+ if (nth_len != sizeof(struct ncm_transfer_header)) {
+ DEBUG_ERROR("%s() NCM ERROR: Invalid NTH Size: %d", __FUNCTION__, nth_len);
+ goto error;
+ }
+
+ ncmwc->nth_seq_num = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+
+ //Get the total packet length
+ tmp_val = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+ if (tmp_val != skb_in->len || tmp_val > ncmwc->ntb_parms.dwNtbOutMaxSize) {
+ DEBUG_ERROR("%s() NCM ERROR: Invalid length: 0x%lX, skb_in->len: 0x%lX, dwNtbOutMaxSize: 0x%lX", __FUNCTION__, tmp_val, skb_in->len, ncmwc->ntb_parms.dwNtbOutMaxSize);
+ goto error;
+ }
+
+ //Get NDP index
+ tmp_val = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ //Validate against spec. Table 3-2
+ if (((tmp_val % 4) != 0) && (tmp_val < nth_len)) {
+ DEBUG_ERROR("%s() NCM ERROR: Invalid NDP index: 0x%lX", __FUNCTION__, tmp_val);
+ goto error;
+ }
+
+ //Move pointer to ndp offset
+ ptr = ((void *)skb_in->data) + tmp_val;
+
+ //get the signature.
+ tmp_val = le32_to_cpu(WRAPPER_READ_U32(ptr));
+ ptr +=4;
+ //We specified no CRC during plug
+ if (tmp_val != NDP_SIGNATURE_NO_CRC) {
+ DEBUG_ERROR("%s() NCM ERROR: Invalid NDP Signature: 0x%lX", __FUNCTION__, tmp_val);
+ goto error;
+ }
+
+ //Check the header length
+ ndp_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+ //Need to subtract size of ncm_datagram_info from size of ncm_datagram_pointers to account form empty NTB.
+ if ((ndp_len < sizeof(struct ncm_datagram_pointers)-sizeof(struct ncm_datagram_info))|| (ndp_len % 4 != 0)) {
+ DEBUG_ERROR("%s() NCM ERROR: Invalid NDP Size: %ld", __FUNCTION__, ndp_len);
+ goto error;
+ }
+
+ //Move past 2 bytes reserved.
+ ptr += 2;
+
+ //Validate datagram pointers. There must be a terminator entry or the
+ //entire packet is to be refused. Section 3.7
+ tmp_ptr = ptr;
+ ndp_len -= 8; //Subtrace header to get length of datagram pointers in bytes.
+ while (0 < ndp_len) {
+ datagram_index = le16_to_cpu(WRAPPER_READ_U16(tmp_ptr));
+ tmp_ptr +=2;
+
+ datagram_len = le16_to_cpu(WRAPPER_READ_U16(tmp_ptr));
+ tmp_ptr +=2;
+
+ //Need to check for early 0's.
+ if (0 == datagram_index && 0 == datagram_len) {
+ break;
+ }
+
+ ndp_len -= sizeof(struct ncm_datagram_info);
+ }
+
+ //We should be at the terminator value.
+ if (datagram_index != 0 && datagram_len != 0) {
+ goto error;
+ }
+
+ datagram_index = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+
+ datagram_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+
+ } else {
+
+ //We'd better have an offset
+ if (0 == ncmwc->datagram_offset) {
+ goto error;
+ }
+
+ skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
+ //We'd better have a queue'd skb for us to process.
+ if (NULL == skb_in) {
+ goto error;
+ }
+
+ ptr = skb_in->data + ncmwc->datagram_offset;
+ //print_hex_dump(KERN_INFO, "Data Gram PTRs:", DUMP_PREFIX_ADDRESS, 16, 1, ptr, 64, false);
+
+ //read the next datagram info
+ datagram_index = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+ datagram_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+
+ //DEBUG_TRACE("%s() dp_index: 0x%lX", __FUNCTION__, datagram_index);
+ //DEBUG_TRACE("%s() datagram_len: 0x%lX", __FUNCTION__, datagram_len);
+ }
+
+ //Save offset to next datagram pointer
+ ncmwc->datagram_offset = ptr - skb_in->data;
+
+ //Handle NULL datagram pointer entries. Section 3.7. Terminator would be both having value of 0,
+ //Spec says ignore anything after either of them is NULL
+ if (0 == datagram_index || 0 == datagram_len) {
+ if(skb_in) {
+ dev_kfree_skb_any(skb_in);
+ }
+ ncmwc->datagram_offset = 0;
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ //copy out the data packet
+ ncm_skb_out = skb_clone(skb_in, GFP_ATOMIC);
+ if (!ncm_skb_out) {
+ DEBUG_ERROR("%s() Failed to clone skb_in", __FUNCTION__);
+ goto error;
+ }
+ ncm_skb_out->len = datagram_len;
+ ncm_skb_out->data += datagram_index;
+
+ skb_set_tail_pointer(ncm_skb_out, ncm_skb_out->len);
+ *skb_out = ncm_skb_out;
+
+ //print_hex_dump(KERN_INFO, "skb_out:", DUMP_PREFIX_ADDRESS, 0, 1, ncm_skb_out->data, 64, false);
+
+ //Check next datagram pointer for terminator
+ datagram_index = le16_to_cpu(WRAPPER_READ_U16(ptr));
+ ptr +=2;
+
+ datagram_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
+
+ if (0 == datagram_index || 0 == datagram_len) {
+ if(skb_in) {
+ dev_kfree_skb_any(skb_in);
+ }
+ ncmwc->datagram_offset = 0;
+ return CP_LKM_WRAPPER_RES_DONE;
+ }
+
+ //Not done, so queue up for next call. We need to come back to process the terminator packet.
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
+ return CP_LKM_WRAPPER_RES_AGAIN;
+
+error:
+ if(skb_in) {
+ dev_kfree_skb_any(skb_in);
+ }
+
+ ncmwc->datagram_offset = 0;
+ return CP_LKM_WRAPPER_RES_ERROR;
+
+}
+
+static void* cp_lkm_ncm_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
+{
+
+ struct cp_lkm_ncm_wrapper_context* ncmwc;
+ struct cp_lkm_wrapper_context* wc;
+
+ DEBUG_TRACE("%s() ", __FUNCTION__);
+ ncmwc = kzalloc(sizeof(struct cp_lkm_ncm_wrapper_context), GFP_KERNEL);
+ if(!ncmwc) {
+ return NULL;
+ }
+ if(wrapper_info) {
+ memcpy(&ncmwc->ntb_parms,(struct ncm_ntb_parameters*)(wrapper_info), sizeof(struct ncm_ntb_parameters));
+ }
+ else{
+ DEBUG_ERROR("%s(),no ncm ntb parameters", __FUNCTION__);
+ return NULL;
+ }
+ wc = (struct cp_lkm_wrapper_context*)ncmwc;
+ cp_lkm_wrapper_common_init(wc);
+ wc->wrapper = wrapper;
+ wc->send = cp_lkm_ncm_wrapper_send;
+ wc->recv = cp_lkm_ncm_wrapper_recv;
+
+ ncmwc->datagram_offset = 0;
+ return ncmwc;
+
+}
+
+
+
+// ===== QMAP wrapper ================================================================
+
+/*
+ * qmap mux header:
+ *
+ * |-----------------------|-----------------------|-----------------------|-----------------------|
+ * Octet: | 0 | 1 | 2 | 3 |
+ * |-----------------------|-----------------------|-----------------------|-----------------------|
+ * Bit : |00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|
+ * |-----------------------|-----------------------|-----------------------|-----------------------|
+ * Field: |C |R | Pad Bytes | Mux ID | Payload Len With Padding |
+ * |-----------------------|-----------------------|-----------------------|-----------------------|
+ *
+ * C : QMAP control or data packet.
+ * 1 - QMAP control command
+ * 0 - Data packet
+ * R : Reserved
+ * PAD : Number of bytes padded to achieve 4 byte alignment. Padded bytes can be 0 or not.
+ * This is only needed if aggregating packets and need next packet to be 4 byte aligned
+ * Payload Len: Total payload length in bytes including padding (not including header)
+ *
+ * Notes: QMAP can aggregate IP packets, each with its own QMAP header in a single USB transfer.
+ * QMAP adds an empty header at the end, with mux id 0, and len 0.
+ *
+*/
+
+struct qmap_hdr{
+ u8 pad_bytes;
+ u8 mux_id;
+ u16 payload_len;
+} __attribute__((packed));
+
+
+#define CP_LKM_QMAP_DATA 0
+#define CP_LKM_QMAP_CTRL 1
+
+static void* cp_lkm_qmap_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
+{
+ struct cp_lkm_wrapper_context* cpwc;
+ cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
+ if(!cpwc) {
+ return cpwc;
+ }
+ cp_lkm_wrapper_common_init(cpwc);
+ cpwc->wrapper = wrapper;
+ cpwc->hdr_size = sizeof(struct qmap_hdr);
+ cpwc->send = cp_lkm_qmap_wrapper_send;
+ cpwc->recv = cp_lkm_qmap_wrapper_recv;
+ return cpwc;
+}
+
+/*
+ We only send one QMAP IP packet at a time.
+ While the spec is not clear on this (at least to me), it appears we are always supposed to add
+ a single empty QMAP header at the the end.
+ For us this will look like this:
+ QMAP Header
+ IP pkt
+ padding
+ Empty QMAP Header (all values 0)
+
+*/
+static int cp_lkm_qmap_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ struct qmap_hdr* qmh;
+ int in_len;
+ int hdr_size;
+ int pad = 0;
+ int result = CP_LKM_WRAPPER_RES_DONE;
+
+ // don't currently care about the wrapper_state, but this is how we would get it if we did
+ //cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, mux_id);
+
+ hdr_size = sizeof(struct qmap_hdr);
+ in_len = skb_in->len;
+ if(in_len & 3) {
+ pad = 4 - (in_len & 3);
+ }
+
+ //printk("%s() src: %d, len: %d, mux_id: %d, pad: %d\n",__FUNCTION__,src,in_len,mux_id,pad);
+
+ //add space for the initial header at the start, plus pad and ending header at the end
+ skb_in = cp_lkm_wrapper_skb_make_space(skb_in, hdr_size, pad);
+ if(!skb_in) {
+ DEBUG_WARN("%s() couldn't make space", __FUNCTION__);
+ return CP_LKM_WRAPPER_RES_ERROR;
+ }
+ skb_push(skb_in, sizeof(struct qmap_hdr));
+
+ //add the header at the front
+ qmh = (struct qmap_hdr*)skb_in->data;
+ qmh->pad_bytes = CP_LKM_QMAP_DATA + pad;
+ qmh->mux_id = mux_id;
+ qmh->payload_len = cpu_to_be16(in_len+pad);
+
+ // CA: determined the empty header is not necessary, but not sure about pad so keeping it.
+ // add pad (if needed) and empty header at the end.
+ //memset(skb_tail_pointer(skb_in), 0, sizeof(struct qmap_hdr)+pad);
+ //skb_put(skb_in, sizeof(struct qmap_hdr)+pad);
+ memset(skb_tail_pointer(skb_in), 0, pad);
+ skb_put(skb_in, pad);
+
+ *skb_out = skb_in;
+ return result;
+
+}
+
+static int cp_lkm_qmap_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ int c, pad, len;
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ struct qmap_hdr qmh;
+ int hdr_size;
+ struct sk_buff* tmp_skb;
+ int result = CP_LKM_WRAPPER_RES_DONE;
+ //cp_lkm_wrapper_state_t wrapper_state;
+
+ hdr_size = sizeof(struct qmap_hdr);
+
+ *skb_out = NULL;
+ *dst = CP_LKM_WRAPPER_DST_DATA;
+
+ //skb_in is NULL when we returned 'again' previously and so the caller is recalling us. This means there should be
+ //a queue'd skb for us to process.
+ if(skb_in == NULL) {
+ //printk("%s() had a pending\n", __FUNCTION__);
+ skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
+ }
+ if(skb_in == NULL) {
+ //nothing more to do
+ //printk("%s() done\n", __FUNCTION__);
+ goto qmap_recv_done;
+ }
+ if(skb_in->len < hdr_size){
+ //printk("%s() not enough data, len: %d, hdr_size: %d\n", __FUNCTION__, skb_in->len, hdr_size);
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto qmap_recv_done;
+ }
+
+ //read header
+ memcpy(&qmh, skb_in->data, sizeof(struct qmap_hdr));
+ qmh.payload_len = be16_to_cpu(qmh.payload_len);
+
+ c = qmh.pad_bytes & 0x8;
+ pad = qmh.pad_bytes & 0x7;
+ *mux_id = qmh.mux_id;
+ len = qmh.payload_len; //payload plus pad (doesn't include hdr)
+ skb_pull(skb_in, hdr_size);
+
+ //printk("%s() c: 0x%x, pad: %d, mux_id: 0x%x, pkt len: %d, skb len: %d\n", __FUNCTION__, c,pad,qmh.mux_id,len,skb_in->len);
+
+ // don't currently care about the usb state for processing, but if we did this is how we would get it
+ //wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, *mux_id);
+
+ if(skb_in->len < len){
+ //printk("%s() not enough data, pkt len: %d, skb len: %d\n", __FUNCTION__, len, skb_in->len);
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto qmap_recv_done;
+ }
+
+ //printk("%s() pkt len: %d, skb len: %d\n", __FUNCTION__, len, skb_in->len);
+
+ if(skb_in->len == (len + sizeof(struct qmap_hdr))){
+ //this is an exact fit plus an empty hdr at the end.
+ //Some modems add it, some don't. Dump the empty if present.
+ skb_set_tail_pointer(skb_in, len);
+ skb_in->len -= sizeof(struct qmap_hdr);
+ }
+
+ //if exact fit, send it
+ if (skb_in->len == len){
+ //printk("%s(), exact fit\n", __FUNCTION__);
+ skb_set_tail_pointer(skb_in, skb_in->len-pad); //dump the padding if any
+ *skb_out = skb_in;
+ skb_in = NULL; //so we don't free it below
+ if (c == CP_LKM_QMAP_CTRL) {
+ //TODO: decode ctrl packets to find pauses and resumes if we decide to support that
+ // when not using flow control, what do I do here?
+ *dst = CP_LKM_WRAPPER_DST_UNKNOWN;
+ }
+ else if (len == 0) {
+ //this is the 0 len header at the end. Tell the outside world to dump it.
+ *dst = CP_LKM_WRAPPER_DST_UNKNOWN;
+ }
+ goto qmap_recv_done;
+ }
+
+ //multiple packets in this one. Have to copy them
+ tmp_skb = skb_clone(skb_in, GFP_ATOMIC);
+ if (!tmp_skb) {
+ //printk("%s() couldn't clone skb\n", __FUNCTION__);
+ result = CP_LKM_WRAPPER_RES_ERROR;
+ goto qmap_recv_done;
+ }
+ tmp_skb->len = len-pad;
+ skb_set_tail_pointer(tmp_skb, len-pad);
+ *skb_out = tmp_skb;
+
+ //This skb has multiple pkts. We just cloned the first pkt into tmp_skb above. Move past that data and if there
+ //is any more data left, enqueue it and return 'again' so we can process it.
+ skb_pull(skb_in, len);
+
+ //More data after this one, queue and tell caller to come again sometime
+ //printk("%s() %d more to do\n", __FUNCTION__, skb_in->len);
+ skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
+ skb_in = NULL;
+ result = CP_LKM_WRAPPER_RES_AGAIN;
+
+ if (c == CP_LKM_QMAP_CTRL) {
+ //TODO: decode ctrl packets to find pauses and resumes if we decide to support that
+ // when not using flow control, what do I do here?
+ *dst = CP_LKM_WRAPPER_DST_UNKNOWN;
+ }
+
+qmap_recv_done:
+ if(skb_in) {
+ dev_kfree_skb_any(skb_in);
+ }
+ //if error, clear the out skb if any
+ if(result == CP_LKM_WRAPPER_RES_ERROR) {
+ if(*skb_out) {
+ dev_kfree_skb_any(*skb_out);
+ *skb_out = NULL;
+ }
+ }
+ //printk("%s() done result: %d, dst: %d, mux_id: %d\n", __FUNCTION__, result, *dst, *mux_id);
+ return result;
+}
+
+
+//================================ API
+//If any of the wrappers have wrapper_info passed in they need to save it in their structures since
+//it is freed after this function returns
+void *cp_lkm_wrapper_instance_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
+{
+ struct cp_lkm_wrapper_context* cpwc = NULL;
+
+ DEBUG_TRACE("%s() wrapper:%d", __FUNCTION__, wrapper);
+ switch (wrapper) {
+ case CP_LKM_WRAPPER_TYPE_ASIX:
+ cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
+ if(!cpwc) {
+ goto wrap_alloc_done;
+ }
+ cp_lkm_wrapper_common_init(cpwc);
+ cpwc->wrapper = wrapper;
+ cpwc->hdr_size = 4; //4 byte asix hdr
+ cpwc->send = cp_lkm_asix_wrapper_send;
+ cpwc->recv = cp_lkm_asix_wrapper_recv;
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_ASIX_88179:
+ cpwc = cp_lkm_asix88179_wrapper_alloc(wrapper, wrapper_info, len);
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_LG:
+ // not supported
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_DIRECT_IP:
+ cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
+ if(!cpwc) {
+ goto wrap_alloc_done;
+ }
+ cp_lkm_wrapper_common_init(cpwc);
+ cpwc->wrapper = wrapper;
+ cpwc->send = cp_lkm_dip_wrapper_send;
+ cpwc->recv = cp_lkm_dip_wrapper_recv;
+ cpwc->hdr_size = 6; //6 byte dip hdr
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_MSRNDIS:
+ cpwc = cp_lkm_msrndis_wrapper_alloc(wrapper, wrapper_info, len);
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_PEGASUS:
+ cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
+ if(!cpwc) {
+ goto wrap_alloc_done;
+ }
+ cp_lkm_wrapper_common_init(cpwc);
+ cpwc->wrapper = wrapper;
+ cpwc->send = cp_lkm_pegasus_wrapper_send;
+ cpwc->recv = cp_lkm_pegasus_wrapper_recv;
+ cpwc->hdr_size = 2; //2 byte pegasus hdr
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_NCM:
+ cpwc = cp_lkm_ncm_wrapper_alloc(wrapper, wrapper_info, len);
+ break;
+
+ case CP_LKM_WRAPPER_TYPE_QMAP:
+ cpwc = cp_lkm_qmap_wrapper_alloc(wrapper, wrapper_info, len);
+ break;
+
+ default:
+ cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
+ if(!cpwc) {
+ goto wrap_alloc_done;
+ }
+ cp_lkm_wrapper_common_init(cpwc);
+ cpwc->wrapper = wrapper;
+ cpwc->send = cp_lkm_generic_wrapper_send;
+ cpwc->recv = cp_lkm_generic_wrapper_recv;
+ break;
+ }
+
+wrap_alloc_done:
+ return cpwc;
+}
+
+void cp_lkm_wrapper_instance_free(void* ctxt)
+{
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+
+ DEBUG_TRACE("%s()", __FUNCTION__);
+
+ switch (cpwc->wrapper) {
+ case CP_LKM_WRAPPER_TYPE_LG:
+ // not supported
+ break;
+ default:
+ cp_lkm_wrapper_common_cleanup(cpwc);
+ kfree(ctxt);
+ break;
+ }
+}
+
+int cp_lkm_wrapper_hdr_size(void* ctxt)
+{
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ return cpwc->hdr_size;
+}
+
+void cp_lkm_wrapper_set_state(void* ctxt, int id, cp_lkm_wrapper_state_t wrapper_state)
+{
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ int i;
+
+ for (i = 0; i < cpwc->num_state_maps; i++) {
+ if (cpwc->state_maps[i].id == id) {
+ cpwc->state_maps[i].wrapper_state = wrapper_state;
+ return;
+ }
+ }
+ //if we get here, this is a new id
+ if (cpwc->num_state_maps < MAX_STATE_MAPS) {
+ cpwc->state_maps[cpwc->num_state_maps].wrapper_state = wrapper_state;
+ cpwc->state_maps[cpwc->num_state_maps].id = id;
+ cpwc->num_state_maps++;
+ }
+ else{
+ //DEBUG_ASSERT(cpwc->num_state_maps < MAX_STATE_MAPS, "Too many wrapper ids");
+ printk("%s() too many state maps, id: %d, state: %d\n",__FUNCTION__, id, wrapper_state);
+ }
+}
+
+int cp_lkm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ int res;
+ unsigned long flags;
+
+// DEBUG_ERROR("%s() ctxt:%p", __FUNCTION__, ctxt);
+ spin_lock_irqsave(&cpwc->lock, flags);
+ res = cpwc->send(ctxt, src, mux_id, skb_in, skb_out);
+ spin_unlock_irqrestore(&cpwc->lock, flags);
+ return res;
+}
+
+int cp_lkm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
+{
+ struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
+ int res;
+ unsigned long flags;
+
+ //DEBUG_ERROR("%s() ctxt:%p", __FUNCTION__, ctxt);
+ *mux_id = 0; //default this since a lot of wrappers don't set it
+ spin_lock_irqsave(&cpwc->lock, flags);
+ res = cpwc->recv(ctxt, dst, mux_id, skb_in, skb_out);
+ spin_unlock_irqrestore(&cpwc->lock, flags);
+ return res;
+}
+