blob: d93f24c614744e1750af74e70853976daaffec0e [file] [log] [blame]
Kyle Swensonf6ca0922023-04-04 11:26:38 -06001/*
2 * FILE NAME cpmodem_wrapper.c
3 *
4 * BRIEF MODULE DESCRIPTION
5 * Custom USB modem wrapper module
6 *
7 * Author: CradlePoint Technology, Inc. <source@cradlepoint.com>
8 * Ben Kendall <benk@cradlepoint.com>
9 * Cory Atkin <catkin@cradlepoint.com>
10 *
11 * Copyright 2012-2023, CradlePoint Technology, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to:
24 * Free Software Foundation
25 * 51 Franklin Street, Fifth Floor
26 * Boston, MA 02111-1301 USA
27 */
28
29
30// Necessary includes for this device driver
31#include <linux/module.h> // Needed by all modules
32#include <linux/kernel.h> // Needed for KERN_xxxx
33#include <linux/init.h> // Needed for the macros
34#include <linux/types.h>
35#include <linux/skbuff.h>
36#include <linux/list.h>
37#include <linux/netdevice.h>
38#include <cpmodem_wrapper.h>
39
40
41#define RUNTIME_DEBUG_TRACE (1 << 0)
42#define RUNTIME_DEBUG_INFO (1 << 1)
43#define RUNTIME_DEBUG_WARN (1 << 2)
44#define RUNTIME_DEBUG_ERROR (1 << 3)
45
46//#undef RUNTIME_DEBUG
47//#define RUNTIME_DEBUG ( RUNTIME_DEBUG_TRACE | RUNTIME_DEBUG_INFO | RUNTIME_DEBUG_WARN | RUNTIME_DEBUG_ERROR )
48
49static int cp_lkm_wrapper_log_level = 0;
50
51#ifdef RUNTIME_DEBUG
52static const char *cp_lkm_wrapper_runtime_debug_level_str[] = {
53 "ASSERT",
54 "TRACE",
55 "INFO",
56 "WARN",
57 "ERROR",
58};
59#else
60static const char *cp_lkm_wrapper_debug_log_level_str[] = {
61 "ASSERT",
62 "ERROR",
63 "WARN",
64 "INFO",
65 "TRACE",
66 "PRINTF"
67};
68#endif
69
70static int cp_out_get_level_index(int level)
71{
72 int level_index = 0;
73 while (level) {
74 level = level >> 1;
75 level_index++;
76 }
77 return level_index;
78}
79
80static void cp_out(int level, const char * file, int line, const char *fmt, ...)
81{
82 int file_str_len = 0;
83 char *file_pos = (char *)file;
84 char *fmt1;
85 va_list arg;
86 int level_index = 0;
87 const char *level_str = NULL;
88
89 if (level) { // level of 0 is ASSERT and log - always output
90 level_index = cp_out_get_level_index(level);
91
92#ifdef RUNTIME_DEBUG
93 if (!(RUNTIME_DEBUG & level)) {
94 return;
95 }
96 level_str = cp_lkm_wrapper_runtime_debug_level_str[level_index];
97#else
98 if (!(cp_lkm_wrapper_log_level & level)) {
99 return;
100 }
101 level_str = cp_lkm_wrapper_debug_log_level_str[level_index];
102#endif
103 }
104
105 va_start(arg, fmt);
106
107 if (file) {
108 char *pos = (char *)file;
109 while ((pos = strchr(pos, '/'))) {
110 pos++;
111 file_pos = pos;
112 }
113
114 file_str_len = strlen(file_pos);
115 }
116
117 fmt1 = kmalloc(strlen(fmt) + file_str_len + 12 + 6, GFP_ATOMIC); // +6 for debug type indication
118 if (!fmt1) {
119 return;
120 }
121 if (level_str) {
122 if (file) {
123 sprintf(fmt1, "%6s %s(%4d):%s\n", level_str, file_pos, line, fmt);
124 } else {
125 sprintf(fmt1, "%6s %s\n", level_str, fmt);
126 }
127 } else {
128 if (file) {
129 sprintf(fmt1, "%s(%4d):%s\n", file_pos, line, fmt);
130 } else {
131 sprintf(fmt1, "%s\n", fmt);
132 }
133 }
134 vprintk(fmt1, arg);
135 kfree(fmt1);
136 va_end(arg);
137}
138
139#ifdef RUNTIME_DEBUG
140// assert is always defined if RUNTIME_DEBUG is defined
141#define DEBUG_ASSERT(a, args...) \
142 if (!(a)) { \
143 cp_out(0, __FILE__, __LINE__, args); \
144 dump_stack(); \
145 while(1) { }; \
146 }
147#define DEBUG_TRACE(args...) cp_out(RUNTIME_DEBUG_TRACE, __FILE__, __LINE__, args)
148#define DEBUG_INFO(args...) cp_out(RUNTIME_DEBUG_INFO, __FILE__, __LINE__, args)
149#define DEBUG_WARN(args...) cp_out(RUNTIME_DEBUG_WARN, __FILE__, __LINE__, args)
150#define DEBUG_ERROR(args...) cp_out(RUNTIME_DEBUG_ERROR, __FILE__, __LINE__, args)
151
152#else
153#define DEBUG_ASSERT(a, args...)
154#define DEBUG_TRACE(args...) cp_out(LOG_DEBUG_LEVEL_TRACE, __FILE__, __LINE__, args)
155
156#define DEBUG_INFO(args...) cp_out(LOG_DEBUG_LEVEL_INFO, __FILE__, __LINE__, args)
157
158#define DEBUG_WARN(args...) cp_out(LOG_DEBUG_LEVEL_WARN, __FILE__, __LINE__, args)
159
160#define DEBUG_ERROR(args...) cp_out(LOG_DEBUG_LEVEL_ERROR, __FILE__, __LINE__, args)
161
162#define DEBUG_PRINTF(args...) cp_out(LOG_DEBUG_LEVEL_PRINTF, __FILE__, __LINE__, args)
163
164#endif
165
166#define LOG(args...) cp_out(0, NULL, 0, args)
167
168
169void cp_lkm_wrapper_set_log_level(int level)
170{
171 DEBUG_TRACE("%s(%d)", __FUNCTION__, level);
172
173 cp_lkm_wrapper_log_level = level;
174}
175
176/******************************* usb wrapper module functionality **********************************/
177
178typedef int (*cp_lkm_wrapper_send_op)(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
179typedef int (*cp_lkm_wrapper_recv_op)(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
180
181static int cp_lkm_generic_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
182static int cp_lkm_generic_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
183static int cp_lkm_asix_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
184static int cp_lkm_asix_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
185static int cp_lkm_asix88179_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
186static int cp_lkm_asix88179_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
187static int cp_lkm_dip_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
188static int cp_lkm_dip_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
189static int cp_lkm_ncm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
190static int cp_lkm_ncm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
191static int cp_lkm_msrndis_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
192static int cp_lkm_msrndis_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
193static int cp_lkm_pegasus_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
194static int cp_lkm_pegasus_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
195static int cp_lkm_qmap_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
196static int cp_lkm_qmap_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out);
197
198
199static void* cp_lkm_msrndis_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
200static void* cp_lkm_ncm_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
201static void* cp_lkm_asix88179_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
202static void* cp_lkm_qmap_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len);
203
204#define CP_LKM_WRAPPER_STATE_INIT 0
205#define CP_LKM_WRAPPER_STATE_SPLIT 1
206
207#define WRAPPER_WRITE_U8(ptr,val) (*((u8*)(ptr)) = val)
208#define WRAPPER_WRITE_U16(ptr,val) (*((u16*)(ptr)) = val)
209#define WRAPPER_WRITE_U32(ptr,val) (*((u32*)(ptr)) = val)
210
211#define WRAPPER_READ_U8(ptr) (*((u8*)(ptr)))
212#define WRAPPER_READ_U16(ptr) (*((u16*)(ptr)))
213#define WRAPPER_READ_U32(ptr) (*((u32*)(ptr)))
214
215struct cp_lkm_wrapper_state_map{
216 int id;
217 cp_lkm_wrapper_state_t wrapper_state;
218};
219#define MAX_STATE_MAPS 16
220
221struct cp_lkm_wrapper_context
222{
223 cp_lkm_wrapper_type_t wrapper;
224 int send_state; //generic send state that can be used by all wrappers
225 int recv_state; //generic recv state that can be used by all wrappers
226 cp_lkm_wrapper_send_op send;
227 cp_lkm_wrapper_recv_op recv;
228 int hdr_size;
229 spinlock_t lock;
230 struct cp_lkm_wrapper_state_map state_maps[MAX_STATE_MAPS];
231 int num_state_maps;
232 struct sk_buff_head skb_ctrl_recv_list;
233 struct sk_buff_head skb_data_recv_list;
234 struct sk_buff_head skb_ctrl_send_list;
235 struct sk_buff_head skb_data_send_list;
236};
237
238static void cp_lkm_wrapper_common_init(struct cp_lkm_wrapper_context* cpwc)
239{
240 cpwc->recv_state = CP_LKM_WRAPPER_STATE_INIT;
241 cpwc->send_state = CP_LKM_WRAPPER_STATE_INIT;
242 spin_lock_init(&cpwc->lock);
243 skb_queue_head_init(&cpwc->skb_ctrl_recv_list);
244 skb_queue_head_init(&cpwc->skb_ctrl_send_list);
245 skb_queue_head_init(&cpwc->skb_data_recv_list);
246 skb_queue_head_init(&cpwc->skb_data_send_list);
247}
248
249static void cp_lkm_wrapper_clean_list(struct sk_buff_head* list)
250{
251 struct sk_buff *skb;
252 while((skb = skb_dequeue(list)) != NULL){
253 DEBUG_INFO("%s() found a straggler", __FUNCTION__);
254
255 dev_kfree_skb_any(skb);
256 }
257}
258
259static void cp_lkm_wrapper_common_cleanup(struct cp_lkm_wrapper_context* cpwc)
260{
261 cp_lkm_wrapper_clean_list(&cpwc->skb_ctrl_recv_list);
262 cp_lkm_wrapper_clean_list(&cpwc->skb_ctrl_send_list);
263 cp_lkm_wrapper_clean_list(&cpwc->skb_data_recv_list);
264 cp_lkm_wrapper_clean_list(&cpwc->skb_data_send_list);
265}
266
267static struct sk_buff* cp_lkm_wrapper_skb_make_space(struct sk_buff* skb_in, int headspace, int tailspace)
268{
269 int headroom = skb_headroom(skb_in);
270 int tailroom = skb_tailroom(skb_in);
271 int space = headspace + tailspace;
272 if(skb_in == NULL) {
273 DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
274 return NULL;
275 }
276
277 if ((!skb_cloned(skb_in)) && ((headroom + tailroom) >= space)) {
278 if (headroom < headspace || tailroom < tailspace) {
279 //printk("%s() move it\n", __FUNCTION__);
280 skb_in->data = memmove(skb_in->head + headspace, skb_in->data, skb_in->len);
281 skb_set_tail_pointer(skb_in, skb_in->len);
282 }
283 } else {
284 struct sk_buff *skb2;
285 //printk("%s() copy it\n", __FUNCTION__);
286 skb2 = skb_copy_expand(skb_in, headspace, tailspace, GFP_ATOMIC);
287 dev_kfree_skb_any(skb_in);
288 skb_in = skb2;
289 }
290 return skb_in;
291}
292
293// generic helper function for getting the state for id from the ctxt
294static cp_lkm_wrapper_state_t cp_lkm_generic_wrapper_get_state(void* ctxt, int id)
295{
296 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
297 int i;
298 cp_lkm_wrapper_state_t wrapper_state = CP_LKM_WRAPPER_INVALID;
299 for (i = 0; i < cpwc->num_state_maps; i++) {
300 if (cpwc->state_maps[i].id == id) {
301 wrapper_state = cpwc->state_maps[i].wrapper_state;
302 break;
303 }
304 }
305 //printk("%s() id: %d, state: %d\n",__FUNCTION__,id,wrapper_state);
306 return wrapper_state;
307}
308
309static int cp_lkm_generic_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
310{
311 DEBUG_TRACE("%s()", __FUNCTION__);
312 *skb_out = skb_in;
313 return CP_LKM_WRAPPER_RES_DONE;
314}
315
316static int cp_lkm_generic_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
317{
318 int result = CP_LKM_WRAPPER_RES_DONE;
319 cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
320
321 //printk("%s() state: %d\n", __FUNCTION__, wrapper_state);
322 *skb_out = skb_in;
323
324 if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
325 //PPP modems will often use the data endpoints for AT while connecting and then PPP data once connected.
326 //That's why we need to check the state here
327 DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
328 *dst = CP_LKM_WRAPPER_DST_CTRL;
329 }
330 else{
331 *dst = CP_LKM_WRAPPER_DST_DATA;
332 }
333 return result;
334}
335
336#define ASIX_ENABLE_PADDING 0xffff0000
337#define ASIX_HDR_MASK 0x0000ffff
338#define ASIX_16BIT_EVEN_MASK 0xfffe
339
340//============================================== wrapper specific functions
341static int cp_lkm_asix_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
342{
343 int pad_len;
344 u32 pkt_len;
345 u32 padding = ASIX_ENABLE_PADDING;
346
347 pad_len = ((skb_in->len + sizeof(u32)) % 512) ? 0 : sizeof(u32);
348
349 *skb_out = NULL;
350
351 if(!skb_in) {
352 DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
353 return CP_LKM_WRAPPER_RES_ERROR;
354 }
355 //DEBUG_INFO("%s() wrapping", __FUNCTION__);
356
357 skb_in = cp_lkm_wrapper_skb_make_space(skb_in, sizeof(u32), pad_len);
358 if (!skb_in){
359 DEBUG_INFO("%s() couldn't expand", __FUNCTION__);
360 *skb_out = NULL;
361 return CP_LKM_WRAPPER_RES_ERROR;
362 }
363
364 //generate the len for the header
365 pkt_len = ((skb_in->len ^ ASIX_HDR_MASK) << 16) + skb_in->len;
366 skb_push(skb_in, sizeof(u32));
367 cpu_to_le32s(&pkt_len);
368 memcpy(skb_in->data, &pkt_len, sizeof(u32));
369
370 if (pad_len) {
371 cpu_to_le32s(&padding);
372 memcpy(skb_tail_pointer(skb_in), &padding, sizeof(u32));
373 skb_put(skb_in, sizeof(u32));
374 }
375 //DEBUG_INFO("%s() wrapped", __FUNCTION__);
376 *skb_out = skb_in;
377 return CP_LKM_WRAPPER_RES_DONE;
378}
379
380static int cp_lkm_asix_wrapper_recv(void *ctxt, int *dst, int *mux_id, struct sk_buff *skb_in, struct sk_buff **skb_out)
381{
382 u8 *head;
383 u32 hdr;
384 char *pkt;
385 struct sk_buff *pkt_skb;
386 u16 size;
387 struct cp_lkm_wrapper_context *cpwc = (struct cp_lkm_wrapper_context *)ctxt;
388 int result = CP_LKM_WRAPPER_RES_DONE;
389 cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
390
391 *skb_out = NULL;
392
393 //skb_in is NULL when we returned 'again' previously and so the caller is recalling us. This means there should be
394 //a queue'd skb for us to process.
395 if(!skb_in) {
396 DEBUG_TRACE("%s() had a pending", __FUNCTION__);
397 skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
398 }
399 if(!skb_in) {
400 //nothing more to do
401 DEBUG_TRACE("%s() done", __FUNCTION__);
402 goto asix_recv_done;
403 }
404 if(skb_in->len < sizeof(u32)){
405 DEBUG_ERROR("%s() not enough data", __FUNCTION__);
406 result = CP_LKM_WRAPPER_RES_ERROR;
407 goto asix_recv_done;
408 }
409
410 //read the hdr off the front
411 head = (u8 *) skb_in->data;
412 memcpy(&hdr, head, sizeof(u32));
413 le32_to_cpus(&hdr);
414 pkt = head + sizeof(u32);
415 skb_pull(skb_in, sizeof(u32));
416
417 //the complement sizes don't match, what to do? just keep going
418 if ((short)(hdr & ASIX_HDR_MASK) !=
419 ~((short)((hdr & ~(ASIX_HDR_MASK)) >> 16))) {
420 DEBUG_INFO("%s(), bad length", __FUNCTION__);
421 }
422 // get the packet length
423 size = (u16) (hdr & ASIX_HDR_MASK);
424
425 //if exact fit, send it
426 if ((skb_in->len) - ((size + 1) & ASIX_16BIT_EVEN_MASK) == 0){
427 DEBUG_TRACE("%s(), exact fit", __FUNCTION__);
428 *skb_out = skb_in;
429 skb_in = NULL; //so we don't free it below
430 goto asix_recv_done;
431 }
432
433 if (size > ETH_FRAME_LEN || size > skb_in->len) {
434 //deverr(dev,"asix_rx_fixup() Bad RX Length %d", size);
435 DEBUG_ERROR("%s() too big or buff too small", __FUNCTION__);
436
437 result = CP_LKM_WRAPPER_RES_ERROR;
438 goto asix_recv_done;
439 }
440
441 //multiple pkts in this one. Have to copy them
442 pkt_skb = skb_clone(skb_in, GFP_ATOMIC);
443 if (!pkt_skb) {
444 result = CP_LKM_WRAPPER_RES_ERROR;
445 goto asix_recv_done;
446 }
447 pkt_skb->len = size;
448 pkt_skb->data = pkt;
449 skb_set_tail_pointer(pkt_skb, size);
450 *skb_out = pkt_skb;
451
452 //This skb has multiple pkts. We just cloned the first pkt into pkt_skb above. Move past that data and if there
453 //is any more data left, enqueue it and return 'again' so we can process it.
454 skb_pull(skb_in, (size + 1) & ASIX_16BIT_EVEN_MASK);
455
456 //if have more (at least hdr size worth), requeue and tell caller to come again sometime
457 if (skb_in->len <= sizeof(u32)){
458 DEBUG_ERROR("%s() overflowed", __FUNCTION__);
459 result = CP_LKM_WRAPPER_RES_ERROR;
460 goto asix_recv_done;
461 }
462
463 DEBUG_TRACE("%s() more to do", __FUNCTION__);
464 skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
465 skb_in = NULL;
466 result = CP_LKM_WRAPPER_RES_AGAIN;
467
468asix_recv_done:
469 if(skb_in) {
470 dev_kfree_skb_any(skb_in);
471 }
472 //if error, clear the out skb if any
473 if(result == CP_LKM_WRAPPER_RES_ERROR) {
474 if(*skb_out) {
475 dev_kfree_skb_any(*skb_out);
476 *skb_out = NULL;
477 }
478 }
479 DEBUG_TRACE("%s() done result: 0x%x skb_out:%p", __FUNCTION__, result, *skb_out);
480 if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
481 DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
482 *dst = CP_LKM_WRAPPER_DST_CTRL;
483 }
484 else{
485 *dst = CP_LKM_WRAPPER_DST_DATA;
486 }
487
488 return result;
489}
490
491// asix88179 defines
492#define RX_HDR_CRC_ERR (1 << 31) // should this be 29?
493#define RX_HDR_DROP_ERR (1 << 30) // should this be 31?
494#define RX_HDR_L3CSUM_ERR 2
495#define RX_HDR_L4CSUM_ERR 1
496#define RX_HDR_L4_TYPE_UDP 4
497#define RX_HDR_L4_TYPE_TCP 16
498#define RX_HDR_L4_TYPE_MASK 0x1c
499
500struct cp_lkm_asix88179_wrapper_context {
501 struct cp_lkm_wrapper_context common;
502 u32 max_transfer_len;
503 u32 *pkt_hdr;
504 int pkt_cnt;
505};
506
507#define ASIX_88179_ENABLE_PADDING 0x80008000
508#define ASIX_88179_13BIT_MASK 0x1fff
509#define ASIX_88179_8BIT_BOUNDARY_MASK 0xFFF8
510
511static int cp_lkm_asix88179_wrapper_send(void *ctxt, int src, int mux_id, struct sk_buff *skb_in, struct sk_buff **skb_out)
512{
513 struct sk_buff *skb2;
514 struct cp_lkm_asix88179_wrapper_context *asix88179_wc = (struct cp_lkm_asix88179_wrapper_context *)ctxt;
515 u32 hdr1;
516 u32 hdr2;
517 int frame_size = asix88179_wc->max_transfer_len;
518 u32 mss;
519 *skb_out = NULL;
520
521 mss = skb_shinfo(skb_in)->gso_size;
522
523 hdr1 = skb_in->len;
524 hdr2 = mss;
525 if (((skb_in->len + 8) % frame_size) == 0) {
526 hdr2 |= ASIX_88179_ENABLE_PADDING; // enable padding
527 }
528
529 // make space for both headers
530 skb2 = cp_lkm_wrapper_skb_make_space(skb_in, sizeof(u32) * 2, 0);
531 if (!skb2) {
532 // skb_in is already freed in cp_lkm_wrapper_skb_make_space
533 printk("%s() - could not make space\n", __FUNCTION__);
534 return CP_LKM_WRAPPER_RES_ERROR;
535 }
536 skb_in = skb2;
537
538 cpu_to_le32s(&hdr2);
539 skb_push(skb_in, sizeof(u32));
540 skb_copy_to_linear_data(skb_in, &hdr2, sizeof(u32));
541
542 cpu_to_le32s(&hdr1);
543 skb_push(skb_in, sizeof(u32));
544 skb_copy_to_linear_data(skb_in, &hdr1, sizeof(u32));
545
546 *skb_out = skb_in;
547 return CP_LKM_WRAPPER_RES_DONE;
548}
549
550static void cp_lkm_asix88179_check_csum(struct sk_buff *skb, u32 *pkt_hdr)
551{
552 u32 err_ind = *pkt_hdr;
553 bool hdr_err = (err_ind & RX_HDR_L3CSUM_ERR) || (err_ind & RX_HDR_L4CSUM_ERR);
554 bool csum_valid = ((err_ind & RX_HDR_L4_TYPE_MASK) == RX_HDR_L4_TYPE_TCP) || ((err_ind & RX_HDR_L4_TYPE_MASK) == RX_HDR_L4_TYPE_UDP);
555
556 skb->ip_summed = CHECKSUM_NONE;
557
558 if (!hdr_err && csum_valid) {
559 skb->ip_summed = CHECKSUM_UNNECESSARY;
560 }
561}
562
563static unsigned long total_pkt_cnt = 0;
564static unsigned long total_pkt_processed = 0;
565
566static int cp_lkm_asix88179_wrapper_recv(void *ctxt, int *dst, int *mux_id, struct sk_buff *skb_in, struct sk_buff **skb_out)
567{
568 u32 hdr;
569 u16 hdr_off;
570 struct cp_lkm_asix88179_wrapper_context* cp_88179wc = (struct cp_lkm_asix88179_wrapper_context*)ctxt;
571 struct cp_lkm_wrapper_context *cpwc = (struct cp_lkm_wrapper_context *)ctxt;
572 int result = CP_LKM_WRAPPER_RES_DONE;
573 struct sk_buff *pkt_skb;
574 u16 pkt_len;
575 bool crc_runt;
576 unsigned int end_len;
577
578 cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
579
580 *skb_out = NULL;
581
582 DEBUG_TRACE("%s()", __FUNCTION__);
583
584 //skb_in is NULL when we returned 'again' previously and so the caller is recalling us. This means there should be
585 //a queue'd skb for us to process.
586 if(!skb_in) {
587 DEBUG_TRACE("%s() had a pending", __FUNCTION__);
588 skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
589 } else {
590 DEBUG_TRACE("%s() 1st pkt of queue, skb_in->len=%x", __FUNCTION__, skb_in->len);
591 skb_trim(skb_in, skb_in->len - 4);
592 memcpy(&hdr, skb_tail_pointer(skb_in), sizeof(u32));
593 le32_to_cpus(&hdr);
594
595 cp_88179wc->pkt_cnt = (u16)hdr;
596 total_pkt_cnt += cp_88179wc->pkt_cnt;
597 hdr_off = (u16)(hdr >> 16);
598 cp_88179wc->pkt_hdr = (u32 *)(skb_in->data + hdr_off);
599 le32_to_cpus(cp_88179wc->pkt_hdr);
600 }
601 if(!skb_in) {
602 //nothing more to do
603 DEBUG_TRACE("%s() done", __FUNCTION__);
604 goto asix_recv_done;
605 }
606 if(skb_in->len < sizeof(u32)){
607 DEBUG_ERROR("%s() not enough data", __FUNCTION__);
608 result = CP_LKM_WRAPPER_RES_ERROR;
609 goto asix_recv_done;
610 }
611
612 while (cp_88179wc->pkt_cnt--) {
613
614 pkt_len = (*cp_88179wc->pkt_hdr >> 16) & ASIX_88179_13BIT_MASK;
615 end_len = (pkt_len + 7) & ASIX_88179_8BIT_BOUNDARY_MASK;
616
617 DEBUG_TRACE("%s() rx_hdr = %x, pkt_cnt=%x, pkt_hdr=%x, pkt_len=%x", __FUNCTION__, hdr, cp_88179wc->pkt_cnt, cp_88179wc->pkt_hdr, pkt_len);
618 // Check CRC or runt packet
619 crc_runt = (*cp_88179wc->pkt_hdr & RX_HDR_CRC_ERR) || (*cp_88179wc->pkt_hdr & RX_HDR_DROP_ERR);
620 if (crc_runt) {
621 skb_pull(skb_in, end_len);
622 cp_88179wc->pkt_hdr++;
623 le32_to_cpus(cp_88179wc->pkt_hdr);
624
625 DEBUG_TRACE("%s() crc error or runt", __FUNCTION__);
626 continue;
627 }
628
629 total_pkt_processed++;
630
631 //multiple packets in this one. Have to copy them
632 pkt_skb = skb_clone(skb_in, GFP_ATOMIC);
633 if (!pkt_skb) {
634 result = CP_LKM_WRAPPER_RES_ERROR;
635 goto asix_recv_done;
636 }
637
638 pkt_skb->data = skb_in->data + 2;
639 pkt_skb->len = pkt_len;
640 pkt_skb->truesize = pkt_len + sizeof(struct sk_buff);
641 skb_set_tail_pointer(pkt_skb, pkt_len);
642 cp_lkm_asix88179_check_csum(pkt_skb, cp_88179wc->pkt_hdr);
643 *skb_out = pkt_skb;
644
645 if (cp_88179wc->pkt_cnt != 0) {
646 //This skb has multiple pkts. We just cloned the first pkt into pkt_skb above. Move past that data and if there
647 //is any more data left, enqueue it and return 'again' so we can process it.
648 skb_pull(skb_in, end_len);
649 cp_88179wc->pkt_hdr++;
650 le32_to_cpus(cp_88179wc->pkt_hdr);
651
652 DEBUG_TRACE("%s() more to do", __FUNCTION__);
653 skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
654 skb_in = NULL;
655 result = CP_LKM_WRAPPER_RES_AGAIN;
656 }
657 break;
658 }
659
660asix_recv_done:
661 if(skb_in) {
662 dev_kfree_skb_any(skb_in);
663 }
664 //if error, clear the out skb if any
665 if(result == CP_LKM_WRAPPER_RES_ERROR) {
666 if(*skb_out) {
667 dev_kfree_skb_any(*skb_out);
668 *skb_out = NULL;
669 }
670 }
671 DEBUG_TRACE("%s() done result: 0x%x skb_out:%p", __FUNCTION__, result, *skb_out);
672 if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
673 DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
674 *dst = CP_LKM_WRAPPER_DST_CTRL;
675 } else{
676 *dst = CP_LKM_WRAPPER_DST_DATA;
677 }
678
679 return result;
680}
681
682static void* cp_lkm_asix88179_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
683{
684 struct cp_lkm_asix88179_wrapper_context* asix88179_wc;
685 struct cp_lkm_wrapper_context* wc;
686
687 asix88179_wc = kzalloc(sizeof(struct cp_lkm_asix88179_wrapper_context), GFP_KERNEL);
688 if(!asix88179_wc) {
689 return NULL;
690 }
691
692 if(wrapper_info) {
693 asix88179_wc->max_transfer_len = *((u32*)(wrapper_info));
694 DEBUG_INFO("%s(), max transfer:%d", __FUNCTION__, asix88179_wc->max_transfer_len);
695 } else {
696 DEBUG_ERROR("%s(),no max transfer set", __FUNCTION__);
697 }
698
699 wc = (struct cp_lkm_wrapper_context*)asix88179_wc;
700 cp_lkm_wrapper_common_init(wc);
701 wc->wrapper = wrapper;
702 wc->send = cp_lkm_asix88179_wrapper_send;
703 wc->recv = cp_lkm_asix88179_wrapper_recv;
704
705 return asix88179_wc;
706
707}
708
709// ===== pegasus wrapper
710static int cp_lkm_pegasus_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
711{
712 int padlen = 0;
713 u32 packet_len;
714 u32 hdrlen = 2;
715
716 *skb_out = NULL;
717
718 if(skb_in == NULL) {
719 DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
720 return CP_LKM_WRAPPER_RES_ERROR;
721 }
722 //DEBUG_INFO("%s() wrapping", __FUNCTION__);
723
724 skb_in = cp_lkm_wrapper_skb_make_space(skb_in, hdrlen, padlen);
725 if (!skb_in){
726 DEBUG_ERROR("%s() couldn't expand", __FUNCTION__);
727 *skb_out = NULL;
728 return CP_LKM_WRAPPER_RES_ERROR;
729 }
730
731 //generate the mirror'd len for the header
732 packet_len = skb_in->len;
733 skb_push(skb_in, sizeof(u16));
734 WRAPPER_WRITE_U16(skb_in->data, cpu_to_le16(packet_len));
735
736 //DEBUG_INFO("%s() wrapped", __FUNCTION__);
737 *skb_out = skb_in;
738 return CP_LKM_WRAPPER_RES_DONE;
739}
740
741static int cp_lkm_pegasus_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
742{
743 u32 hdr_size;
744 u32 pkt_size;
745
746 //DEBUG_INFO("%s() unwrap it", __FUNCTION__);
747
748 *skb_out = NULL;
749 *dst = CP_LKM_WRAPPER_DST_DATA;
750 hdr_size = 2;
751
752 if(skb_in == NULL) {
753 //nothing more to do
754 DEBUG_TRACE("%s() done", __FUNCTION__);
755 return CP_LKM_WRAPPER_RES_DONE;
756 }
757
758 // If don't have enough for the headers, it is an error
759 if(skb_in->len < hdr_size) {
760 dev_kfree_skb_any(skb_in);
761 return CP_LKM_WRAPPER_RES_ERROR;
762 }
763 //read the pkt size and make sure have enough data. the pkt size
764 //doesn't include the dip header so add it in for comparison
765 pkt_size = le16_to_cpu(WRAPPER_READ_U16(skb_in->data));
766 if(pkt_size > skb_in->len){
767 DEBUG_ERROR("%s() bad data pkt pkt_size:%d, data size: %d", __FUNCTION__, pkt_size, skb_in->len);
768 dev_kfree_skb_any(skb_in);
769 return CP_LKM_WRAPPER_RES_ERROR;
770 }
771 //remove the dip and ethernet hdrs
772 skb_pull(skb_in, hdr_size);
773 *skb_out = skb_in;
774 DEBUG_TRACE("%s() data pkt", __FUNCTION__);
775
776 return CP_LKM_WRAPPER_RES_DONE;
777}
778
779//===================direct ip wrapper
780#define SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSGID 0x3F
781#define SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_EXTENDED_MSGID 0x0002
782#define SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSG_SPECIFIC_ID 0x00
783#define SIERRA_DIRECTIP_HDR_SIZE 6
784#define SIERRA_DIRECTIP_ETHER_SIZE 14
785static int cp_lkm_dip_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
786{
787 u32 packet_len;
788 u32 hdr_len;
789 cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
790
791 *skb_out = NULL;
792 //DEBUG_INFO("%s() wrap it", __FUNCTION__);
793
794 if(skb_in == NULL) {
795 DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
796 return CP_LKM_WRAPPER_RES_ERROR;
797 }
798
799 //in ctrl mode, we don't put a wrapper on (only after data comes up)
800 if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
801 *skb_out = skb_in;
802 DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
803 return CP_LKM_WRAPPER_RES_DONE;
804 }
805 //DEBUG_INFO("%s() wrapping", __FUNCTION__);
806
807 // Add header:
808 // HIP header: 6 bytes
809 // Fake ethernet hdr: 14 bytes
810 hdr_len = SIERRA_DIRECTIP_HDR_SIZE + SIERRA_DIRECTIP_ETHER_SIZE;
811 skb_in = cp_lkm_wrapper_skb_make_space(skb_in, hdr_len, 0);
812 if (!skb_in){
813 DEBUG_ERROR("%s() couldn't expand", __FUNCTION__);
814 return CP_LKM_WRAPPER_RES_ERROR;
815 }
816
817 packet_len = skb_in->len;
818 packet_len += SIERRA_DIRECTIP_ETHER_SIZE; //add bytes for the ethernet hdr (the dip hdr isn't counted in the len)
819
820 //ethernet protocol
821 skb_push(skb_in, sizeof(u16));
822 WRAPPER_WRITE_U16(skb_in->data, cpu_to_be16(0x0800));
823
824 //bogus ethernet addrs (modem side doesn't care)
825 skb_push(skb_in, 12);
826 memset(skb_in->data, 0, 12);
827
828 //extended msg id
829 skb_push(skb_in, sizeof(u16));
830 WRAPPER_WRITE_U16(skb_in->data, cpu_to_be16(SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_EXTENDED_MSGID));
831
832 //msg specific id
833 skb_push(skb_in, 1);
834 WRAPPER_WRITE_U8(skb_in->data, SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSG_SPECIFIC_ID);
835
836 //msg indication id
837 skb_push(skb_in, 1);
838 WRAPPER_WRITE_U8(skb_in->data, SIERRA_DIRECTIP_UPLINK_DATA_INDICATION_MSGID);
839
840 //len
841 skb_push(skb_in, sizeof(u16));
842 WRAPPER_WRITE_U16(skb_in->data, cpu_to_be16(packet_len));
843
844 //DEBUG_INFO("%s() data pkt", __FUNCTION__);
845 *skb_out = skb_in;
846 return CP_LKM_WRAPPER_RES_DONE;
847}
848
849static int cp_lkm_dip_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
850{
851 u32 hdr_size;
852 u32 pkt_size;
853 cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
854
855 //DEBUG_INFO("%s() unwrap it", __FUNCTION__);
856
857 *skb_out = NULL;
858 *dst = CP_LKM_WRAPPER_DST_DATA;
859 hdr_size = SIERRA_DIRECTIP_HDR_SIZE + SIERRA_DIRECTIP_ETHER_SIZE;
860
861 if(skb_in == NULL) {
862 //nothing more to do
863 DEBUG_TRACE("%s() done", __FUNCTION__);
864 return CP_LKM_WRAPPER_RES_DONE;
865 }
866
867 //There are no headers on the pkts when in ctrl mode. Only in data mode
868 if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
869 DEBUG_TRACE("%s() ctrl pkt", __FUNCTION__);
870 *skb_out = skb_in;
871 *dst = CP_LKM_WRAPPER_DST_CTRL;
872 return CP_LKM_WRAPPER_RES_DONE;
873 }
874
875 //from here down, they are data packets
876
877 // If don't have enough for the headers, it is an error
878 if(skb_in->len < hdr_size) {
879 dev_kfree_skb_any(skb_in);
880 return CP_LKM_WRAPPER_RES_ERROR;
881 }
882 //read the pkt size and make sure have enough data. the pkt size
883 //doesn't include the dip header so add it in for comparison
884 pkt_size = be16_to_cpu(WRAPPER_READ_U16(skb_in->data));
885 if((pkt_size+SIERRA_DIRECTIP_HDR_SIZE) > skb_in->len){
886 dev_kfree_skb_any(skb_in);
887 return CP_LKM_WRAPPER_RES_ERROR;
888 }
889 //remove the dip and ethernet hdrs
890 skb_pull(skb_in, hdr_size);
891 *skb_out = skb_in;
892 DEBUG_TRACE("%s() data pkt", __FUNCTION__);
893
894 return CP_LKM_WRAPPER_RES_DONE;
895}
896
897//===================== msrndis wrapper
898#define MSRNDIS_REMOTE_NDIS_PACKET_MSG 0x00000001 // data packet
899
900struct cp_lkm_msrndis_wrapper_context{
901 struct cp_lkm_wrapper_context common;
902 u32 max_transfer_len;
903};
904
905// data pkt header
906struct msrndis_data_hdr { // data packet message header (msrndis_hdr preceeds this header) (payload immediately follows)
907 u32 data_offset;
908 u32 data_length;
909 u32 OOB_data_offset;
910 u32 OOB_data_length;
911 u32 num_OOB_data_elements;
912 u32 per_packet_info_offset;
913 u32 per_packet_info_length;
914 u32 reserved[2];
915}__attribute__((packed));
916
917struct msrndis_hdr { // general msrndis header at beginning of all messages
918 u32 message_type;
919 u32 message_length;
920} __attribute__((packed));
921
922static int cp_lkm_msrndis_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
923{
924 u32 data_hdr_len;
925 u32 msg_hdr_len;
926 struct msrndis_data_hdr hdr;
927 u32 packet_len;
928
929 *skb_out = NULL;
930 DEBUG_TRACE("%s() wrap it", __FUNCTION__);
931
932 if(skb_in == NULL) {
933 DEBUG_ERROR("%s() NULL skb_in, shouldn't happen", __FUNCTION__);
934 return CP_LKM_WRAPPER_RES_ERROR;
935 }
936
937 // This bad boy has pkt data plus a data hdr plus a msg header (it was created by microsoft after all)
938 packet_len = skb_in->len;
939 data_hdr_len = sizeof(struct msrndis_data_hdr);
940 msg_hdr_len = sizeof(struct msrndis_hdr);
941
942 //need to add space for both headers
943 skb_in = cp_lkm_wrapper_skb_make_space(skb_in, data_hdr_len + msg_hdr_len, 0);
944 if (!skb_in){
945 DEBUG_ERROR("%s() couldn't expand", __FUNCTION__);
946 *skb_out = NULL;
947 return CP_LKM_WRAPPER_RES_ERROR;
948 }
949
950 //create the data hdr
951 memset(&hdr, 0x00, data_hdr_len);
952 hdr.data_offset = cpu_to_le32(data_hdr_len); //data starts after the data hdr
953 hdr.data_length = cpu_to_le32(packet_len); //the data hdr doesn't include the hdr lenght in length, only the data
954 skb_push(skb_in, data_hdr_len);
955 memcpy(skb_in->data, &hdr, data_hdr_len);
956
957 //Create the msg hdr, the length includes the msg header size as well
958 packet_len = skb_in->len + msg_hdr_len;
959
960 skb_push(skb_in, sizeof(u32));
961 WRAPPER_WRITE_U32(skb_in->data, cpu_to_le32(packet_len));
962
963 skb_push(skb_in, sizeof(u32));
964 WRAPPER_WRITE_U32(skb_in->data, cpu_to_le32(MSRNDIS_REMOTE_NDIS_PACKET_MSG));
965
966 DEBUG_TRACE("%s() data pkt", __FUNCTION__);
967 *skb_out = skb_in;
968 return CP_LKM_WRAPPER_RES_DONE;
969}
970
971static int cp_lkm_msrndis_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
972{
973 u32 data_hdr_len = sizeof(struct msrndis_data_hdr);
974 u32 msg_hdr_len = sizeof(struct msrndis_hdr);
975 struct msrndis_data_hdr hdr;
976 u32 adv = 0;
977 u32 out_len;
978 u32 pkt_len;
979 u32 pkt_type;
980 struct sk_buff *skb_working = NULL;
981
982 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
983 struct cp_lkm_msrndis_wrapper_context* msrndis_wc = (struct cp_lkm_msrndis_wrapper_context*)ctxt;
984
985// DEBUG_INFO("%s() unwrap it", __FUNCTION__);
986 *skb_out = NULL;
987 *dst = CP_LKM_WRAPPER_DST_DATA;
988
989 if (skb_in) {
990 cpwc->recv_state = CP_LKM_WRAPPER_STATE_INIT;
991 DEBUG_TRACE("%s() done", __FUNCTION__);
992 if (0 == skb_in->len) {
993 dev_kfree_skb_any(skb_in);
994 skb_in = NULL;
995 } else if (msrndis_wc->max_transfer_len == skb_in->len) {
996 DEBUG_INFO("%s() - max transfer - setting split", __FUNCTION__);
997 cpwc->recv_state = CP_LKM_WRAPPER_STATE_SPLIT;
998 }
999 }
1000
1001 skb_working = skb_dequeue(&cpwc->skb_data_recv_list);
1002
1003 if (!skb_working) {
1004 skb_working = skb_in;
1005 } else if (skb_in) {
1006 // append data to skb_working
1007 skb_working = cp_lkm_wrapper_skb_make_space(skb_working, 0, skb_in->len);
1008 if(!skb_working) {
1009 DEBUG_WARN("%s() failed to make space", __FUNCTION__);
1010 return CP_LKM_WRAPPER_RES_ERROR;
1011 }
1012 memcpy(skb_tail_pointer(skb_working), skb_in->data, skb_in->len);
1013 skb_put(skb_working, skb_in->len);
1014 dev_kfree_skb_any(skb_in);
1015 }
1016
1017 if (!skb_working) {
1018 return CP_LKM_WRAPPER_RES_DONE;
1019 }
1020
1021 if(skb_working->len < msg_hdr_len) {
1022 if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
1023 DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
1024 dev_kfree_skb_any(skb_working);
1025 return CP_LKM_WRAPPER_RES_ERROR;
1026 }
1027
1028 // expecting a split packet
1029 skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
1030
1031 return CP_LKM_WRAPPER_RES_DONE;
1032 }
1033
1034 pkt_type = le32_to_cpu(WRAPPER_READ_U32(skb_working->data));
1035 skb_pull(skb_working, 4);
1036
1037 pkt_len = le32_to_cpu(WRAPPER_READ_U32(skb_working->data));
1038 skb_pull(skb_working, 4);
1039
1040 // try to determine if this packet len is reasonable
1041 if (pkt_len > (4 * 1024) || pkt_len < 0) {
1042 // probably bad packet length - drop the packets
1043 DEBUG_WARN("%s() - bad packet len:%x", __FUNCTION__, pkt_len);
1044 DEBUG_WARN("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
1045
1046 dev_kfree_skb_any(skb_working);
1047// DEBUG_ASSERT(0, "bad packet len:%d", pkt_len);
1048
1049 return CP_LKM_WRAPPER_RES_ERROR;
1050 }
1051
1052 if (skb_working->len < data_hdr_len) {
1053 if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
1054 DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
1055 dev_kfree_skb_any(skb_working);
1056 return CP_LKM_WRAPPER_RES_ERROR;
1057 }
1058
1059 // expecting a split packet
1060 skb_push(skb_working, msg_hdr_len);
1061 skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
1062
1063 return CP_LKM_WRAPPER_RES_DONE;
1064 }
1065 memcpy(&hdr, skb_working->data, data_hdr_len);
1066 hdr.data_offset = le32_to_cpu(hdr.data_offset);
1067 hdr.data_length = le32_to_cpu(hdr.data_length);
1068 skb_pull(skb_working, data_hdr_len);
1069
1070 //account for any gaps between the end of the hdr and the start of data
1071 if(hdr.data_offset > data_hdr_len) {
1072 adv = hdr.data_offset - data_hdr_len;
1073 if(skb_working->len < adv) {
1074 if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
1075 DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
1076 dev_kfree_skb_any(skb_working);
1077 return CP_LKM_WRAPPER_RES_ERROR;
1078 }
1079
1080 // expecting a split packet
1081 skb_push(skb_working, msg_hdr_len + data_hdr_len);
1082 skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
1083 return CP_LKM_WRAPPER_RES_DONE;
1084 }
1085
1086 skb_pull(skb_working, adv);
1087 }
1088
1089 if(skb_working->len < hdr.data_length) {
1090 if (CP_LKM_WRAPPER_STATE_SPLIT != cpwc->recv_state) {
1091 DEBUG_INFO("%s() - flushing remaining byte count:%d", __FUNCTION__, skb_working->len);
1092 dev_kfree_skb_any(skb_working);
1093 return CP_LKM_WRAPPER_RES_ERROR;
1094 }
1095 DEBUG_TRACE("%s() data pkt", __FUNCTION__);
1096
1097 // expecting a split packet
1098 skb_push(skb_working, msg_hdr_len + data_hdr_len + adv);
1099 skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
1100 return CP_LKM_WRAPPER_RES_DONE;
1101 }
1102
1103 out_len = hdr.data_length;
1104
1105 if (MSRNDIS_REMOTE_NDIS_PACKET_MSG != pkt_type) {
1106 out_len = msg_hdr_len + data_hdr_len + adv + hdr.data_length;
1107 skb_push(skb_working, msg_hdr_len + data_hdr_len + adv);
1108
1109 }
1110
1111 *skb_out = skb_clone(skb_working, GFP_ATOMIC);
1112 if (!(*skb_out)) {
1113 DEBUG_WARN("%s() - couldn't clone skb", __FUNCTION__);
1114 dev_kfree_skb_any(skb_working);
1115 return CP_LKM_WRAPPER_RES_ERROR;
1116 }
1117 skb_set_tail_pointer(*skb_out, out_len);
1118 (*skb_out)->len = out_len;
1119
1120 skb_pull(skb_working, out_len);
1121
1122 if (skb_working->len) {
1123 DEBUG_INFO("%s() complete pkt with remaining data: %d", __FUNCTION__, skb_working->len);
1124 skb_queue_tail(&cpwc->skb_data_recv_list, skb_working);
1125 *dst = (MSRNDIS_REMOTE_NDIS_PACKET_MSG == pkt_type) ? CP_LKM_WRAPPER_DST_DATA : CP_LKM_WRAPPER_DST_CTRL;
1126 return CP_LKM_WRAPPER_RES_AGAIN;
1127 }
1128
1129 dev_kfree_skb_any(skb_working);
1130 *dst = (MSRNDIS_REMOTE_NDIS_PACKET_MSG == pkt_type) ? CP_LKM_WRAPPER_DST_DATA : CP_LKM_WRAPPER_DST_CTRL;
1131 return CP_LKM_WRAPPER_RES_DONE;
1132
1133}
1134
1135static void* cp_lkm_msrndis_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
1136{
1137 struct cp_lkm_msrndis_wrapper_context* msrndis_wc;
1138 struct cp_lkm_wrapper_context* wc;
1139
1140 msrndis_wc = kzalloc(sizeof(struct cp_lkm_msrndis_wrapper_context), GFP_KERNEL);
1141 if(!msrndis_wc) {
1142 return NULL;
1143 }
1144
1145 if(wrapper_info) {
1146 msrndis_wc->max_transfer_len = *((u32*)(wrapper_info));
1147 DEBUG_INFO("%s(), max transfer:%d", __FUNCTION__, msrndis_wc->max_transfer_len);
1148 }
1149 else{
1150 DEBUG_ERROR("%s(),no max transfer set", __FUNCTION__);
1151 }
1152
1153 wc = (struct cp_lkm_wrapper_context*)msrndis_wc;
1154 cp_lkm_wrapper_common_init(wc);
1155 wc->wrapper = wrapper;
1156 wc->send = cp_lkm_msrndis_wrapper_send;
1157 wc->recv = cp_lkm_msrndis_wrapper_recv;
1158
1159 return msrndis_wc;
1160
1161}
1162
1163
1164
1165//============== NCM wrapper
1166//There are 2 modes of operation for an NCM device, 16 bit and 32 bit. 16 bit block allows for transfer bkocks up to 64K in length,
1167//while 32 allows for 4G length blocks. We will be using the 16 bit, which is set in plug.
1168
1169#define NTB_HEADER_SIGNATURE 0x484D434E //"NCMH" 16 bit transfer blocks signature.
1170#define NDP_SIGNATURE_NO_CRC 0x304D434E //"NCM0"
1171
1172///
1173/// THIS STRUCTURE MUST BE THE SAME AS THE ONE IN ncm_modem.h
1174///
1175struct ncm_ntb_parameters{
1176 u16 wLength;
1177 u16 bmNtbFormatsSupported;
1178 u32 dwNtbInMaxSize;
1179 u16 wNdpInDivisor;
1180 u16 wNdpInPayloadRemainder;
1181 u16 wNdpInAlignment;
1182 u16 reserved;
1183 u32 dwNtbOutMaxSize;
1184 u16 wNdpOutDivisor;
1185 u16 wNdpOutPayloadRemainder;
1186 u16 wNdpOutAlignment;
1187 u16 wNtbOutMaxDatagrams;
1188};
1189
1190//NCM Transfer Header (NTH)
1191struct ncm_transfer_header {
1192 u32 signature;
1193 u16 header_length;
1194 u16 sequence_number;
1195 u16 ntb_length;//length of entire block
1196 u16 ndp_index; //Offset in block in NDP
1197}__attribute__ ((packed));
1198
1199struct ncm_datagram_info {
1200 u16 index;
1201 u16 length;
1202}__attribute__ ((packed));
1203
1204//NCM Datagram Pointers (NDP)
1205struct ncm_datagram_pointers {
1206 u32 signature;
1207 u16 length; // Size of this NDP. Must be multiple of 4, and at least 0x10
1208 u16 next_ndp_index; //offset of next ndp in NTB.
1209 struct ncm_datagram_info datagram_info[2]; //Setting to one datagream for now. It's 2 due to the NULL tail item required in list.
1210}__attribute__ ((packed));
1211
1212struct cp_lkm_ncm_wrapper_context{
1213 struct cp_lkm_wrapper_context common;
1214 u32 nth_seq_num;
1215 u32 datagram_offset;
1216 struct ncm_ntb_parameters ntb_parms; // usb max transfer size - used to detect runt HIM blocks
1217};
1218
1219static int cp_lkm_ncm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
1220{
1221 u32 ndp_padding, datagram_padding, align_factor, total_size, header_size;
1222 u32 payload = skb_in->len;
1223 void *ptr = NULL;
1224 struct cp_lkm_ncm_wrapper_context* ncmwc = (struct cp_lkm_ncm_wrapper_context*)ctxt;
1225
1226 *skb_out = NULL;
1227
1228 //should never see this in here
1229 if(skb_in == NULL) {
1230 return CP_LKM_WRAPPER_RES_DONE;
1231 }
1232
1233 header_size = sizeof(struct ncm_transfer_header);
1234
1235 //Need to align NDP by the align value. offset%align = 0. Add align value -1 and mask off by it's inverse to get aligned offset.
1236 //Then subtract current header size to get the padding.
1237 align_factor = ncmwc->ntb_parms.wNdpInAlignment - 1;
1238 ndp_padding = ((header_size + align_factor) & ~align_factor) - header_size;
1239 header_size += ndp_padding;
1240
1241 header_size += sizeof(struct ncm_datagram_pointers);
1242
1243 //Alignment for
1244
1245 //Need to align NDP by the divisor value + the remainder value. offset%divisor = 0. Add divisor value -1 and mask off by it's inverse to get aligned offset.
1246 //Then subtract current header size to get the alignment padding and add the remainder to get the total padding.
1247 align_factor = ncmwc->ntb_parms.wNdpInDivisor - 1;
1248 datagram_padding = (((header_size + align_factor) & ~align_factor) - header_size) + ncmwc->ntb_parms.wNdpInPayloadRemainder;
1249
1250 header_size += datagram_padding;
1251
1252 total_size = header_size + payload;
1253
1254 //Need to account for our max transfer size allowed by modem. Current modem is 400K, should never hit this.
1255 if (ncmwc->ntb_parms.dwNtbInMaxSize < total_size) {
1256 dev_kfree_skb_any(skb_in);
1257 return CP_LKM_WRAPPER_RES_ERROR;
1258 }
1259
1260 //add space for the header to skb_in
1261 skb_in = cp_lkm_wrapper_skb_make_space(skb_in, header_size, 0);
1262 if(!skb_in) {
1263 DEBUG_WARN("%s() couldn't make space", __FUNCTION__);
1264 return CP_LKM_WRAPPER_RES_ERROR;
1265 }
1266
1267 //write NCM Pkt hdr
1268 ptr = (void *)skb_push(skb_in, header_size);
1269 memset(ptr, 0, header_size);
1270
1271 WRAPPER_WRITE_U32(ptr, cpu_to_le32(NTB_HEADER_SIGNATURE));
1272 ptr +=4;
1273
1274 //Write the header size
1275 WRAPPER_WRITE_U16(ptr, cpu_to_le16(sizeof(struct ncm_transfer_header)));
1276 ptr +=4; //Moving 2 to skip using optional sequence number
1277
1278 //Total NTB size
1279 WRAPPER_WRITE_U16(ptr, cpu_to_le16(skb_in->len));
1280 ptr += 2;
1281
1282 //Index of first ndp
1283 WRAPPER_WRITE_U16(ptr, cpu_to_le16(sizeof(struct ncm_transfer_header) + ndp_padding));
1284 ptr += (2 + ndp_padding);
1285
1286 //Write the ndp
1287 WRAPPER_WRITE_U32(ptr, cpu_to_le32(NDP_SIGNATURE_NO_CRC));
1288 ptr +=4;
1289
1290 //Write the ndp size
1291 WRAPPER_WRITE_U16(ptr, cpu_to_le16(sizeof(struct ncm_datagram_pointers)));
1292 ptr +=4; //Moving past 2 reserved as well
1293
1294 //Write the datagram index. It's write after the ntb length
1295 WRAPPER_WRITE_U16(ptr, cpu_to_le16(header_size));
1296 ptr +=2;
1297
1298 //Write the datagram length.
1299 WRAPPER_WRITE_U16(ptr, cpu_to_le16(payload));
1300
1301 //tail entry 0'd in memset.
1302
1303 *skb_out = skb_in;
1304 return CP_LKM_WRAPPER_RES_DONE;
1305}
1306
1307/*
1308 * -------------------------------------
1309 * | Signature | NCM Transfer Block
1310 * -------------------------------------
1311 * | Header Length |
1312 * -------------------------------------
1313 * | Sequence Number |
1314 * -------------------------------------
1315 * | Total Packet Length |
1316 * -------------------------------------
1317 * | NDP Index |
1318 * -------------------------------------
1319 *
1320 *
1321 * -------------------------------------
1322 * | Signature | NCM Datagram Pointers
1323 * -------------------------------------
1324 * | Header Length |
1325 * -------------------------------------
1326 * | Index to next NDP |
1327 * -------------------------------------
1328 * | Datagram[0] index |
1329 * -------------------------------------
1330 * | Datagram[0] length |
1331 * -------------------------------------
1332 * | Datagram[1] index |
1333 * -------------------------------------
1334 * | Datagram[1] length |
1335 * -------------------------------------
1336 * .
1337 * .
1338 * .
1339 * -------------------------------------
1340 * | Datagram[n] index |
1341 * -------------------------------------
1342 * | Datagram[n] length |
1343 * -------------------------------------
1344 * | 0 | Termination of header
1345 * -------------------------------------
1346 * | 0 | Termination of header
1347 * ------------------------------------
1348 *
1349 * Ethernet packets....
1350 *
1351 *
1352 * This function processes the NCM Transfer Block. It can consist
1353 * of multiple Ethernet pkts. We specified the max size we could
1354 * handle during plug. Only a single SBK should ever be sent.
1355*/
1356static int cp_lkm_ncm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
1357{
1358 u32 tmp_val;
1359 u16 nth_len, ndp_len, datagram_index, datagram_len;
1360 struct sk_buff *ncm_skb_out;
1361 unsigned char *ptr = NULL, *tmp_ptr = NULL;
1362 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1363 struct cp_lkm_ncm_wrapper_context* ncmwc = (struct cp_lkm_ncm_wrapper_context*)cpwc;
1364 cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, CP_LKM_WRAPPER_DEFAULT_ID);
1365
1366 *skb_out = NULL;
1367 *dst = CP_LKM_WRAPPER_DST_DATA;
1368
1369 //skb_in is NULL when the caller is recalling us to finish processing the skb.
1370 if(NULL != skb_in) {
1371 //print_hex_dump(KERN_INFO, "SKB_IN:", DUMP_PREFIX_ADDRESS, 16, 1, skb_in->data, 64, false);
1372
1373 ptr = (void *)skb_in->data;
1374 //There are no headers on the pkts when in ctrl mode. Only in data mode. Shouldn't see control on data eps
1375 if(wrapper_state == CP_LKM_WRAPPER_CTRL) {
1376 *skb_out = skb_in;
1377 *dst = CP_LKM_WRAPPER_DST_CTRL;
1378 return CP_LKM_WRAPPER_RES_DONE;
1379 }
1380
1381 // Not enough data for the headers, it is an error.
1382 if(skb_in->len < sizeof(struct ncm_transfer_header) + sizeof(struct ncm_datagram_pointers)) {
1383 //DEBUG_ERROR("%s() NCM ERROR: NCM packet size error, len: %d", __FUNCTION__,skb_in->len);
1384 goto error;
1385 }
1386
1387 //get the signature.
1388 tmp_val = le32_to_cpu(WRAPPER_READ_U32(ptr));
1389 ptr +=4;
1390 if (tmp_val != NTB_HEADER_SIGNATURE) {
1391 DEBUG_ERROR("%s() NCM ERROR: Invalid NCM Signature: 0x%lX", __FUNCTION__, tmp_val);
1392 goto error;
1393 }
1394
1395 //Check the header length
1396 nth_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
1397 ptr +=2;
1398 if (nth_len != sizeof(struct ncm_transfer_header)) {
1399 DEBUG_ERROR("%s() NCM ERROR: Invalid NTH Size: %d", __FUNCTION__, nth_len);
1400 goto error;
1401 }
1402
1403 ncmwc->nth_seq_num = le16_to_cpu(WRAPPER_READ_U16(ptr));
1404 ptr +=2;
1405
1406 //Get the total packet length
1407 tmp_val = le16_to_cpu(WRAPPER_READ_U16(ptr));
1408 ptr +=2;
1409 if (tmp_val != skb_in->len || tmp_val > ncmwc->ntb_parms.dwNtbOutMaxSize) {
1410 DEBUG_ERROR("%s() NCM ERROR: Invalid length: 0x%lX, skb_in->len: 0x%lX, dwNtbOutMaxSize: 0x%lX", __FUNCTION__, tmp_val, skb_in->len, ncmwc->ntb_parms.dwNtbOutMaxSize);
1411 goto error;
1412 }
1413
1414 //Get NDP index
1415 tmp_val = le16_to_cpu(WRAPPER_READ_U16(ptr));
1416 //Validate against spec. Table 3-2
1417 if (((tmp_val % 4) != 0) && (tmp_val < nth_len)) {
1418 DEBUG_ERROR("%s() NCM ERROR: Invalid NDP index: 0x%lX", __FUNCTION__, tmp_val);
1419 goto error;
1420 }
1421
1422 //Move pointer to ndp offset
1423 ptr = ((void *)skb_in->data) + tmp_val;
1424
1425 //get the signature.
1426 tmp_val = le32_to_cpu(WRAPPER_READ_U32(ptr));
1427 ptr +=4;
1428 //We specified no CRC during plug
1429 if (tmp_val != NDP_SIGNATURE_NO_CRC) {
1430 DEBUG_ERROR("%s() NCM ERROR: Invalid NDP Signature: 0x%lX", __FUNCTION__, tmp_val);
1431 goto error;
1432 }
1433
1434 //Check the header length
1435 ndp_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
1436 ptr +=2;
1437 //Need to subtract size of ncm_datagram_info from size of ncm_datagram_pointers to account form empty NTB.
1438 if ((ndp_len < sizeof(struct ncm_datagram_pointers)-sizeof(struct ncm_datagram_info))|| (ndp_len % 4 != 0)) {
1439 DEBUG_ERROR("%s() NCM ERROR: Invalid NDP Size: %ld", __FUNCTION__, ndp_len);
1440 goto error;
1441 }
1442
1443 //Move past 2 bytes reserved.
1444 ptr += 2;
1445
1446 //Validate datagram pointers. There must be a terminator entry or the
1447 //entire packet is to be refused. Section 3.7
1448 tmp_ptr = ptr;
1449 ndp_len -= 8; //Subtrace header to get length of datagram pointers in bytes.
1450 while (0 < ndp_len) {
1451 datagram_index = le16_to_cpu(WRAPPER_READ_U16(tmp_ptr));
1452 tmp_ptr +=2;
1453
1454 datagram_len = le16_to_cpu(WRAPPER_READ_U16(tmp_ptr));
1455 tmp_ptr +=2;
1456
1457 //Need to check for early 0's.
1458 if (0 == datagram_index && 0 == datagram_len) {
1459 break;
1460 }
1461
1462 ndp_len -= sizeof(struct ncm_datagram_info);
1463 }
1464
1465 //We should be at the terminator value.
1466 if (datagram_index != 0 && datagram_len != 0) {
1467 goto error;
1468 }
1469
1470 datagram_index = le16_to_cpu(WRAPPER_READ_U16(ptr));
1471 ptr +=2;
1472
1473 datagram_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
1474 ptr +=2;
1475
1476 } else {
1477
1478 //We'd better have an offset
1479 if (0 == ncmwc->datagram_offset) {
1480 goto error;
1481 }
1482
1483 skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
1484 //We'd better have a queue'd skb for us to process.
1485 if (NULL == skb_in) {
1486 goto error;
1487 }
1488
1489 ptr = skb_in->data + ncmwc->datagram_offset;
1490 //print_hex_dump(KERN_INFO, "Data Gram PTRs:", DUMP_PREFIX_ADDRESS, 16, 1, ptr, 64, false);
1491
1492 //read the next datagram info
1493 datagram_index = le16_to_cpu(WRAPPER_READ_U16(ptr));
1494 ptr +=2;
1495 datagram_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
1496 ptr +=2;
1497
1498 //DEBUG_TRACE("%s() dp_index: 0x%lX", __FUNCTION__, datagram_index);
1499 //DEBUG_TRACE("%s() datagram_len: 0x%lX", __FUNCTION__, datagram_len);
1500 }
1501
1502 //Save offset to next datagram pointer
1503 ncmwc->datagram_offset = ptr - skb_in->data;
1504
1505 //Handle NULL datagram pointer entries. Section 3.7. Terminator would be both having value of 0,
1506 //Spec says ignore anything after either of them is NULL
1507 if (0 == datagram_index || 0 == datagram_len) {
1508 if(skb_in) {
1509 dev_kfree_skb_any(skb_in);
1510 }
1511 ncmwc->datagram_offset = 0;
1512 return CP_LKM_WRAPPER_RES_DONE;
1513 }
1514
1515 //copy out the data packet
1516 ncm_skb_out = skb_clone(skb_in, GFP_ATOMIC);
1517 if (!ncm_skb_out) {
1518 DEBUG_ERROR("%s() Failed to clone skb_in", __FUNCTION__);
1519 goto error;
1520 }
1521 ncm_skb_out->len = datagram_len;
1522 ncm_skb_out->data += datagram_index;
1523
1524 skb_set_tail_pointer(ncm_skb_out, ncm_skb_out->len);
1525 *skb_out = ncm_skb_out;
1526
1527 //print_hex_dump(KERN_INFO, "skb_out:", DUMP_PREFIX_ADDRESS, 0, 1, ncm_skb_out->data, 64, false);
1528
1529 //Check next datagram pointer for terminator
1530 datagram_index = le16_to_cpu(WRAPPER_READ_U16(ptr));
1531 ptr +=2;
1532
1533 datagram_len = le16_to_cpu(WRAPPER_READ_U16(ptr));
1534
1535 if (0 == datagram_index || 0 == datagram_len) {
1536 if(skb_in) {
1537 dev_kfree_skb_any(skb_in);
1538 }
1539 ncmwc->datagram_offset = 0;
1540 return CP_LKM_WRAPPER_RES_DONE;
1541 }
1542
1543 //Not done, so queue up for next call. We need to come back to process the terminator packet.
1544 skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
1545 return CP_LKM_WRAPPER_RES_AGAIN;
1546
1547error:
1548 if(skb_in) {
1549 dev_kfree_skb_any(skb_in);
1550 }
1551
1552 ncmwc->datagram_offset = 0;
1553 return CP_LKM_WRAPPER_RES_ERROR;
1554
1555}
1556
1557static void* cp_lkm_ncm_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
1558{
1559
1560 struct cp_lkm_ncm_wrapper_context* ncmwc;
1561 struct cp_lkm_wrapper_context* wc;
1562
1563 DEBUG_TRACE("%s() ", __FUNCTION__);
1564 ncmwc = kzalloc(sizeof(struct cp_lkm_ncm_wrapper_context), GFP_KERNEL);
1565 if(!ncmwc) {
1566 return NULL;
1567 }
1568 if(wrapper_info) {
1569 memcpy(&ncmwc->ntb_parms,(struct ncm_ntb_parameters*)(wrapper_info), sizeof(struct ncm_ntb_parameters));
1570 }
1571 else{
1572 DEBUG_ERROR("%s(),no ncm ntb parameters", __FUNCTION__);
1573 return NULL;
1574 }
1575 wc = (struct cp_lkm_wrapper_context*)ncmwc;
1576 cp_lkm_wrapper_common_init(wc);
1577 wc->wrapper = wrapper;
1578 wc->send = cp_lkm_ncm_wrapper_send;
1579 wc->recv = cp_lkm_ncm_wrapper_recv;
1580
1581 ncmwc->datagram_offset = 0;
1582 return ncmwc;
1583
1584}
1585
1586
1587
1588// ===== QMAP wrapper ================================================================
1589
1590/*
1591 * qmap mux header:
1592 *
1593 * |-----------------------|-----------------------|-----------------------|-----------------------|
1594 * Octet: | 0 | 1 | 2 | 3 |
1595 * |-----------------------|-----------------------|-----------------------|-----------------------|
1596 * Bit : |00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|
1597 * |-----------------------|-----------------------|-----------------------|-----------------------|
1598 * Field: |C |R | Pad Bytes | Mux ID | Payload Len With Padding |
1599 * |-----------------------|-----------------------|-----------------------|-----------------------|
1600 *
1601 * C : QMAP control or data packet.
1602 * 1 - QMAP control command
1603 * 0 - Data packet
1604 * R : Reserved
1605 * PAD : Number of bytes padded to achieve 4 byte alignment. Padded bytes can be 0 or not.
1606 * This is only needed if aggregating packets and need next packet to be 4 byte aligned
1607 * Payload Len: Total payload length in bytes including padding (not including header)
1608 *
1609 * Notes: QMAP can aggregate IP packets, each with its own QMAP header in a single USB transfer.
1610 * QMAP adds an empty header at the end, with mux id 0, and len 0.
1611 *
1612*/
1613
1614struct qmap_hdr{
1615 u8 pad_bytes;
1616 u8 mux_id;
1617 u16 payload_len;
1618} __attribute__((packed));
1619
1620
1621#define CP_LKM_QMAP_DATA 0
1622#define CP_LKM_QMAP_CTRL 1
1623
1624static void* cp_lkm_qmap_wrapper_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
1625{
1626 struct cp_lkm_wrapper_context* cpwc;
1627 cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
1628 if(!cpwc) {
1629 return cpwc;
1630 }
1631 cp_lkm_wrapper_common_init(cpwc);
1632 cpwc->wrapper = wrapper;
1633 cpwc->hdr_size = sizeof(struct qmap_hdr);
1634 cpwc->send = cp_lkm_qmap_wrapper_send;
1635 cpwc->recv = cp_lkm_qmap_wrapper_recv;
1636 return cpwc;
1637}
1638
1639/*
1640 We only send one QMAP IP packet at a time.
1641 While the spec is not clear on this (at least to me), it appears we are always supposed to add
1642 a single empty QMAP header at the the end.
1643 For us this will look like this:
1644 QMAP Header
1645 IP pkt
1646 padding
1647 Empty QMAP Header (all values 0)
1648
1649*/
1650static int cp_lkm_qmap_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
1651{
1652 struct qmap_hdr* qmh;
1653 int in_len;
1654 int hdr_size;
1655 int pad = 0;
1656 int result = CP_LKM_WRAPPER_RES_DONE;
1657
1658 // don't currently care about the wrapper_state, but this is how we would get it if we did
1659 //cp_lkm_wrapper_state_t wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, mux_id);
1660
1661 hdr_size = sizeof(struct qmap_hdr);
1662 in_len = skb_in->len;
1663 if(in_len & 3) {
1664 pad = 4 - (in_len & 3);
1665 }
1666
1667 //printk("%s() src: %d, len: %d, mux_id: %d, pad: %d\n",__FUNCTION__,src,in_len,mux_id,pad);
1668
1669 //add space for the initial header at the start, plus pad and ending header at the end
1670 skb_in = cp_lkm_wrapper_skb_make_space(skb_in, hdr_size, pad);
1671 if(!skb_in) {
1672 DEBUG_WARN("%s() couldn't make space", __FUNCTION__);
1673 return CP_LKM_WRAPPER_RES_ERROR;
1674 }
1675 skb_push(skb_in, sizeof(struct qmap_hdr));
1676
1677 //add the header at the front
1678 qmh = (struct qmap_hdr*)skb_in->data;
1679 qmh->pad_bytes = CP_LKM_QMAP_DATA + pad;
1680 qmh->mux_id = mux_id;
1681 qmh->payload_len = cpu_to_be16(in_len+pad);
1682
1683 // CA: determined the empty header is not necessary, but not sure about pad so keeping it.
1684 // add pad (if needed) and empty header at the end.
1685 //memset(skb_tail_pointer(skb_in), 0, sizeof(struct qmap_hdr)+pad);
1686 //skb_put(skb_in, sizeof(struct qmap_hdr)+pad);
1687 memset(skb_tail_pointer(skb_in), 0, pad);
1688 skb_put(skb_in, pad);
1689
1690 *skb_out = skb_in;
1691 return result;
1692
1693}
1694
1695static int cp_lkm_qmap_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
1696{
1697 int c, pad, len;
1698 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1699 struct qmap_hdr qmh;
1700 int hdr_size;
1701 struct sk_buff* tmp_skb;
1702 int result = CP_LKM_WRAPPER_RES_DONE;
1703 //cp_lkm_wrapper_state_t wrapper_state;
1704
1705 hdr_size = sizeof(struct qmap_hdr);
1706
1707 *skb_out = NULL;
1708 *dst = CP_LKM_WRAPPER_DST_DATA;
1709
1710 //skb_in is NULL when we returned 'again' previously and so the caller is recalling us. This means there should be
1711 //a queue'd skb for us to process.
1712 if(skb_in == NULL) {
1713 //printk("%s() had a pending\n", __FUNCTION__);
1714 skb_in = skb_dequeue(&cpwc->skb_data_recv_list);
1715 }
1716 if(skb_in == NULL) {
1717 //nothing more to do
1718 //printk("%s() done\n", __FUNCTION__);
1719 goto qmap_recv_done;
1720 }
1721 if(skb_in->len < hdr_size){
1722 //printk("%s() not enough data, len: %d, hdr_size: %d\n", __FUNCTION__, skb_in->len, hdr_size);
1723 result = CP_LKM_WRAPPER_RES_ERROR;
1724 goto qmap_recv_done;
1725 }
1726
1727 //read header
1728 memcpy(&qmh, skb_in->data, sizeof(struct qmap_hdr));
1729 qmh.payload_len = be16_to_cpu(qmh.payload_len);
1730
1731 c = qmh.pad_bytes & 0x8;
1732 pad = qmh.pad_bytes & 0x7;
1733 *mux_id = qmh.mux_id;
1734 len = qmh.payload_len; //payload plus pad (doesn't include hdr)
1735 skb_pull(skb_in, hdr_size);
1736
1737 //printk("%s() c: 0x%x, pad: %d, mux_id: 0x%x, pkt len: %d, skb len: %d\n", __FUNCTION__, c,pad,qmh.mux_id,len,skb_in->len);
1738
1739 // don't currently care about the usb state for processing, but if we did this is how we would get it
1740 //wrapper_state = cp_lkm_generic_wrapper_get_state(ctxt, *mux_id);
1741
1742 if(skb_in->len < len){
1743 //printk("%s() not enough data, pkt len: %d, skb len: %d\n", __FUNCTION__, len, skb_in->len);
1744 result = CP_LKM_WRAPPER_RES_ERROR;
1745 goto qmap_recv_done;
1746 }
1747
1748 //printk("%s() pkt len: %d, skb len: %d\n", __FUNCTION__, len, skb_in->len);
1749
1750 if(skb_in->len == (len + sizeof(struct qmap_hdr))){
1751 //this is an exact fit plus an empty hdr at the end.
1752 //Some modems add it, some don't. Dump the empty if present.
1753 skb_set_tail_pointer(skb_in, len);
1754 skb_in->len -= sizeof(struct qmap_hdr);
1755 }
1756
1757 //if exact fit, send it
1758 if (skb_in->len == len){
1759 //printk("%s(), exact fit\n", __FUNCTION__);
1760 skb_set_tail_pointer(skb_in, skb_in->len-pad); //dump the padding if any
1761 *skb_out = skb_in;
1762 skb_in = NULL; //so we don't free it below
1763 if (c == CP_LKM_QMAP_CTRL) {
1764 //TODO: decode ctrl packets to find pauses and resumes if we decide to support that
1765 // when not using flow control, what do I do here?
1766 *dst = CP_LKM_WRAPPER_DST_UNKNOWN;
1767 }
1768 else if (len == 0) {
1769 //this is the 0 len header at the end. Tell the outside world to dump it.
1770 *dst = CP_LKM_WRAPPER_DST_UNKNOWN;
1771 }
1772 goto qmap_recv_done;
1773 }
1774
1775 //multiple packets in this one. Have to copy them
1776 tmp_skb = skb_clone(skb_in, GFP_ATOMIC);
1777 if (!tmp_skb) {
1778 //printk("%s() couldn't clone skb\n", __FUNCTION__);
1779 result = CP_LKM_WRAPPER_RES_ERROR;
1780 goto qmap_recv_done;
1781 }
1782 tmp_skb->len = len-pad;
1783 skb_set_tail_pointer(tmp_skb, len-pad);
1784 *skb_out = tmp_skb;
1785
1786 //This skb has multiple pkts. We just cloned the first pkt into tmp_skb above. Move past that data and if there
1787 //is any more data left, enqueue it and return 'again' so we can process it.
1788 skb_pull(skb_in, len);
1789
1790 //More data after this one, queue and tell caller to come again sometime
1791 //printk("%s() %d more to do\n", __FUNCTION__, skb_in->len);
1792 skb_queue_tail(&cpwc->skb_data_recv_list, skb_in);
1793 skb_in = NULL;
1794 result = CP_LKM_WRAPPER_RES_AGAIN;
1795
1796 if (c == CP_LKM_QMAP_CTRL) {
1797 //TODO: decode ctrl packets to find pauses and resumes if we decide to support that
1798 // when not using flow control, what do I do here?
1799 *dst = CP_LKM_WRAPPER_DST_UNKNOWN;
1800 }
1801
1802qmap_recv_done:
1803 if(skb_in) {
1804 dev_kfree_skb_any(skb_in);
1805 }
1806 //if error, clear the out skb if any
1807 if(result == CP_LKM_WRAPPER_RES_ERROR) {
1808 if(*skb_out) {
1809 dev_kfree_skb_any(*skb_out);
1810 *skb_out = NULL;
1811 }
1812 }
1813 //printk("%s() done result: %d, dst: %d, mux_id: %d\n", __FUNCTION__, result, *dst, *mux_id);
1814 return result;
1815}
1816
1817
1818//================================ API
1819//If any of the wrappers have wrapper_info passed in they need to save it in their structures since
1820//it is freed after this function returns
1821void *cp_lkm_wrapper_instance_alloc(cp_lkm_wrapper_type_t wrapper, void* wrapper_info, int len)
1822{
1823 struct cp_lkm_wrapper_context* cpwc = NULL;
1824
1825 DEBUG_TRACE("%s() wrapper:%d", __FUNCTION__, wrapper);
1826 switch (wrapper) {
1827 case CP_LKM_WRAPPER_TYPE_ASIX:
1828 cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
1829 if(!cpwc) {
1830 goto wrap_alloc_done;
1831 }
1832 cp_lkm_wrapper_common_init(cpwc);
1833 cpwc->wrapper = wrapper;
1834 cpwc->hdr_size = 4; //4 byte asix hdr
1835 cpwc->send = cp_lkm_asix_wrapper_send;
1836 cpwc->recv = cp_lkm_asix_wrapper_recv;
1837 break;
1838
1839 case CP_LKM_WRAPPER_TYPE_ASIX_88179:
1840 cpwc = cp_lkm_asix88179_wrapper_alloc(wrapper, wrapper_info, len);
1841 break;
1842
1843 case CP_LKM_WRAPPER_TYPE_LG:
1844 // not supported
1845 break;
1846
1847 case CP_LKM_WRAPPER_TYPE_DIRECT_IP:
1848 cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
1849 if(!cpwc) {
1850 goto wrap_alloc_done;
1851 }
1852 cp_lkm_wrapper_common_init(cpwc);
1853 cpwc->wrapper = wrapper;
1854 cpwc->send = cp_lkm_dip_wrapper_send;
1855 cpwc->recv = cp_lkm_dip_wrapper_recv;
1856 cpwc->hdr_size = 6; //6 byte dip hdr
1857 break;
1858
1859 case CP_LKM_WRAPPER_TYPE_MSRNDIS:
1860 cpwc = cp_lkm_msrndis_wrapper_alloc(wrapper, wrapper_info, len);
1861 break;
1862
1863 case CP_LKM_WRAPPER_TYPE_PEGASUS:
1864 cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
1865 if(!cpwc) {
1866 goto wrap_alloc_done;
1867 }
1868 cp_lkm_wrapper_common_init(cpwc);
1869 cpwc->wrapper = wrapper;
1870 cpwc->send = cp_lkm_pegasus_wrapper_send;
1871 cpwc->recv = cp_lkm_pegasus_wrapper_recv;
1872 cpwc->hdr_size = 2; //2 byte pegasus hdr
1873 break;
1874
1875 case CP_LKM_WRAPPER_TYPE_NCM:
1876 cpwc = cp_lkm_ncm_wrapper_alloc(wrapper, wrapper_info, len);
1877 break;
1878
1879 case CP_LKM_WRAPPER_TYPE_QMAP:
1880 cpwc = cp_lkm_qmap_wrapper_alloc(wrapper, wrapper_info, len);
1881 break;
1882
1883 default:
1884 cpwc = kzalloc(sizeof(struct cp_lkm_wrapper_context), GFP_KERNEL);
1885 if(!cpwc) {
1886 goto wrap_alloc_done;
1887 }
1888 cp_lkm_wrapper_common_init(cpwc);
1889 cpwc->wrapper = wrapper;
1890 cpwc->send = cp_lkm_generic_wrapper_send;
1891 cpwc->recv = cp_lkm_generic_wrapper_recv;
1892 break;
1893 }
1894
1895wrap_alloc_done:
1896 return cpwc;
1897}
1898
1899void cp_lkm_wrapper_instance_free(void* ctxt)
1900{
1901 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1902
1903 DEBUG_TRACE("%s()", __FUNCTION__);
1904
1905 switch (cpwc->wrapper) {
1906 case CP_LKM_WRAPPER_TYPE_LG:
1907 // not supported
1908 break;
1909 default:
1910 cp_lkm_wrapper_common_cleanup(cpwc);
1911 kfree(ctxt);
1912 break;
1913 }
1914}
1915
1916int cp_lkm_wrapper_hdr_size(void* ctxt)
1917{
1918 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1919 return cpwc->hdr_size;
1920}
1921
1922void cp_lkm_wrapper_set_state(void* ctxt, int id, cp_lkm_wrapper_state_t wrapper_state)
1923{
1924 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1925 int i;
1926
1927 for (i = 0; i < cpwc->num_state_maps; i++) {
1928 if (cpwc->state_maps[i].id == id) {
1929 cpwc->state_maps[i].wrapper_state = wrapper_state;
1930 return;
1931 }
1932 }
1933 //if we get here, this is a new id
1934 if (cpwc->num_state_maps < MAX_STATE_MAPS) {
1935 cpwc->state_maps[cpwc->num_state_maps].wrapper_state = wrapper_state;
1936 cpwc->state_maps[cpwc->num_state_maps].id = id;
1937 cpwc->num_state_maps++;
1938 }
1939 else{
1940 //DEBUG_ASSERT(cpwc->num_state_maps < MAX_STATE_MAPS, "Too many wrapper ids");
1941 printk("%s() too many state maps, id: %d, state: %d\n",__FUNCTION__, id, wrapper_state);
1942 }
1943}
1944
1945int cp_lkm_wrapper_send(void* ctxt, int src, int mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
1946{
1947 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1948 int res;
1949 unsigned long flags;
1950
1951// DEBUG_ERROR("%s() ctxt:%p", __FUNCTION__, ctxt);
1952 spin_lock_irqsave(&cpwc->lock, flags);
1953 res = cpwc->send(ctxt, src, mux_id, skb_in, skb_out);
1954 spin_unlock_irqrestore(&cpwc->lock, flags);
1955 return res;
1956}
1957
1958int cp_lkm_wrapper_recv(void* ctxt, int* dst, int* mux_id, struct sk_buff* skb_in, struct sk_buff** skb_out)
1959{
1960 struct cp_lkm_wrapper_context* cpwc = (struct cp_lkm_wrapper_context*)ctxt;
1961 int res;
1962 unsigned long flags;
1963
1964 //DEBUG_ERROR("%s() ctxt:%p", __FUNCTION__, ctxt);
1965 *mux_id = 0; //default this since a lot of wrappers don't set it
1966 spin_lock_irqsave(&cpwc->lock, flags);
1967 res = cpwc->recv(ctxt, dst, mux_id, skb_in, skb_out);
1968 spin_unlock_irqrestore(&cpwc->lock, flags);
1969 return res;
1970}
1971