blob: 0ad30616c6383384000c18d023bca07cd8f35e16 [file] [log] [blame]
Kyle Swenson74ad7532023-02-16 11:05:29 -07001/*
2 * FILE NAME cpmodem_shim.c
3 *
4 * BRIEF MODULE DESCRIPTION
5 * Frankendriver - USB to ethernet, ip or PPP controlled via a block driver.
6 *
7 * Author: CradlePoint Technology, Inc. <source@cradlepoint.com>
8 * Ben Kendall <benk@cradlepoint.com>
9 * Cory Atkin <catkin@cradlepoint.com>
10 *
11 * Copyright 2012, CradlePoint Technology, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to:
24 * Free Software Foundation
25 * 51 Franklin Street, Fifth Floor
26 * Boston, MA 02111-1301 USA
27 */
28
29
30// Necessary includes for device drivers
31#include <linux/module.h> // Needed by all modules
32#include <linux/kernel.h> // Needed for KERN_xxxx
33#include <linux/init.h> // Needed for the macros
34#include <linux/cdev.h>
35#include <linux/slab.h> // kmalloc()
36#include <linux/fs.h> // everything...
37#include <linux/poll.h>
38#include <linux/errno.h> // error codes
39#include <linux/types.h> // size_t
40#include <linux/proc_fs.h>
41#include <linux/fcntl.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/if_ether.h>
45#include <linux/if_arp.h>
46#include <linux/ethtool.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include <linux/inetdevice.h>
50#include <linux/ip.h>
51#include <net/addrconf.h>
52#include <linux/tty.h>
53#include <linux/tty_flip.h>
54#include <linux/spinlock.h>
55#include <linux/ktime.h>
56/* #include <asm/system.h> // cli(), *_flags */
57#include <asm/uaccess.h> // copy_from/to_user
58#include <linux/usb.h>
59#include <linux/version.h> // LINUX_VERSION_CODE
60#include <cpmodem_shim.h>
61#include <cpmodem_wrapper.h>
62
63
64//#define KERNEL_2_6_21 // comment this out for 3.0.29 kernel
65/*********************************************** logging and debug ************************************************/
66
67#define RUNTIME_DEBUG_TRACE (1 << 0)
68#define RUNTIME_DEBUG_INFO (1 << 1)
69#define RUNTIME_DEBUG_WARN (1 << 2)
70#define RUNTIME_DEBUG_ERROR (1 << 3)
71#define RUNTIME_LOG 0
72#define RUNTIME_ASSERT -1
73
74//#undef RUNTIME_DEBUG
75//#define RUNTIME_DEBUG ( /*RUNTIME_DEBUG_TRACE |*/ RUNTIME_DEBUG_INFO | RUNTIME_DEBUG_WARN | RUNTIME_DEBUG_ERROR )
76
77
78static int cp_lkm_log_level = 0;
79
80#ifdef RUNTIME_DEBUG
81static const char *cp_lkm_shim_runtime_debug_level_str[] = {
82 "ASSERT",
83 "TRACE",
84 "INFO",
85 "WARN",
86 "ERROR",
87};
88#else
89static const char *cp_lkm_shim_debug_log_level_str[] = {
90 "ASSERT",
91 "ERROR",
92 "WARN",
93 "INFO",
94 "TRACE",
95 "PRINTF"
96};
97#endif
98
99static int cp_out_get_level_index(int level)
100{
101 int level_index = 0;
102 while (level) {
103 level = level >> 1;
104 level_index++;
105 }
106 return level_index;
107}
108
109static void cp_out(int level, const char * file, int line, const char *fmt, ...)
110{
111 int file_str_len = 0;
112 char *file_pos = (char *)file;
113 char *fmt1;
114 va_list arg;
115 int level_index = 0;
116 const char *level_str = NULL;
117 const char *kernel_lvl_str = NULL;
118
119 if (level>0) { // level of 0 is LOG and -1 is ASSERT - always output
120 level_index = cp_out_get_level_index(level);
121
122#ifdef RUNTIME_DEBUG
123 if (!(RUNTIME_DEBUG & level)) {
124 return;
125 }
126 level_str = cp_lkm_shim_runtime_debug_level_str[level_index];
127#else
128 if (!(cp_lkm_log_level & level)) {
129 return;
130 }
131 level_str = cp_lkm_shim_debug_log_level_str[level_index];
132#endif
133 }
134
135
136 switch(level) {
137 case RUNTIME_DEBUG_TRACE:
138 kernel_lvl_str = KERN_INFO;
139 break;
140 case RUNTIME_DEBUG_INFO:
141 kernel_lvl_str = KERN_INFO;
142 break;
143 case RUNTIME_DEBUG_WARN:
144 kernel_lvl_str = KERN_WARNING;
145 break;
146 case RUNTIME_DEBUG_ERROR:
147 kernel_lvl_str = KERN_ERR;
148 break;
149 case RUNTIME_LOG:
150 kernel_lvl_str = KERN_INFO;
151 break;
152 case RUNTIME_ASSERT:
153 kernel_lvl_str = KERN_ERR;
154 break;
155 default:
156 kernel_lvl_str = KERN_INFO;
157 break;
158 }
159
160
161 va_start(arg, fmt);
162
163 if (file) {
164 char *pos = (char *)file;
165 while ((pos = strchr(pos, '/'))) {
166 pos++;
167 file_pos = pos;
168 }
169
170 file_str_len = strlen(file_pos);
171 }
172
173 fmt1 = kmalloc(strlen(fmt) + file_str_len + 12 + 6 + 2, GFP_ATOMIC); // +6 for debug type indication, +2 for linux syslog level
174 if (!fmt1) {
175 return;
176 }
177 if (level_str) {
178 if (file) {
179 sprintf(fmt1, "%s%6s %s(%4d):%s\n", kernel_lvl_str, level_str, file_pos, line, fmt);
180 } else {
181 sprintf(fmt1, "%s%6s %s\n", kernel_lvl_str, level_str, fmt);
182 }
183 } else {
184 if (file) {
185 sprintf(fmt1, "%s%s(%4d):%s\n", kernel_lvl_str, file_pos, line, fmt);
186 } else {
187 sprintf(fmt1, "%s%s\n", kernel_lvl_str, fmt);
188 }
189 }
190 vprintk(fmt1, arg);
191 kfree(fmt1);
192 va_end(arg);
193}
194
195#ifdef RUNTIME_DEBUG
196// assert is always defined if RUNTIME_DEBUG is defined
197// bad idea to kill things in kernel, so we just print the assert msg and keep going
198#define DEBUG_ASSERT(a, args...) \
199 if (!(a)) { \
200 printk(KERN_ERR "\n!!! CPMODEM_SHIM ASSERT !!!\n"); \
201 cp_out(RUNTIME_ASSERT, __FILE__, __LINE__, args); \
202 dump_stack(); \
203 }
204#define DEBUG_TRACE(args...) cp_out(RUNTIME_DEBUG_TRACE, __FILE__, __LINE__, args)
205#define DEBUG_INFO(args...) cp_out(RUNTIME_DEBUG_INFO, __FILE__, __LINE__, args)
206#define DEBUG_WARN(args...) cp_out(RUNTIME_DEBUG_WARN, __FILE__, __LINE__, args)
207#define DEBUG_ERROR(args...) cp_out(RUNTIME_DEBUG_ERROR, __FILE__, __LINE__, args)
208#else
209#define DEBUG_ASSERT(a, args...)
210#define DEBUG_TRACE(args...) cp_out(LOG_DEBUG_LEVEL_TRACE, __FILE__, __LINE__, args)
211
212#define DEBUG_INFO(args...) cp_out(LOG_DEBUG_LEVEL_INFO, __FILE__, __LINE__, args)
213
214#define DEBUG_WARN(args...) cp_out(LOG_DEBUG_LEVEL_WARN, __FILE__, __LINE__, args)
215
216#define DEBUG_ERROR(args...) cp_out(LOG_DEBUG_LEVEL_ERROR, __FILE__, __LINE__, args)
217
218#define DEBUG_PRINTF(args...) cp_out(LOG_DEBUG_LEVEL_PRINTF, __FILE__, __LINE__, args)
219
220#endif
221
222#define LOG(args...) cp_out(RUNTIME_LOG, NULL, 0, args)
223
224/*********************************************** general definitions and helper functions *************************/
225
226// Buffer to store data
227struct cp_lkm_read_msg {
228 struct cp_lkm_msg_hdr hdr;
229 struct sk_buff *skb;
230 struct list_head list;
231};
232
233struct cp_lkm_common_ctx {
234 u8 open_cnt;
235
236 // read operation members
237 wait_queue_head_t inq;
238 struct list_head read_list;
239 spinlock_t read_list_lock;
240 bool reading_data;
241 bool q_waiting;
242 // write operation members
243 struct sk_buff *write_skb;
244
245 int (*open)(struct cp_lkm_common_ctx *ctx); // called at open
246 int (*close)(struct cp_lkm_common_ctx *ctx); // called at close
247 int (*handle_msg)(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb); // called at write
248 int (*handle_ioctl)(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp); // called at ioctl
249};
250
251
252int cp_lkm_open(struct inode *inode, struct file *filp);
253int cp_lkm_release(struct inode *inode, struct file *filp);
254ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos);
255ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos);
256#ifdef KERNEL_2_6_21
257int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
258#else
259long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg);
260#endif
261unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *);
262
263static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common);
264static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common);
265static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb);
266
267/* Structure that declares the usual file
268 access functions */
269struct file_operations cp_lkm_fops = {
270 .owner = THIS_MODULE,
271 .read = cp_lkm_read,
272 .write = cp_lkm_write,
273#ifdef KERNEL_2_6_21
274 .ioctl = cp_lkm_ioctl,
275#else
276 .unlocked_ioctl = cp_lkm_ioctl,
277#endif
278 .open = cp_lkm_open,
279 .poll = cp_lkm_poll,
280 .release = cp_lkm_release
281};
282
283static int major;
284static struct device *cp_lkm_dev[2];
285static struct class *cp_lkm_class;
286
287#define CP_LKM_USB_MGR_MINOR 0
288#define CP_LKM_PM_MGR_MINOR 1
289#define CP_LKM_ITER 3000 //CP_LIM_ITER * CP_LKM_TIMEOUT_MS = 30000 or 30 seconds
290#define CP_LKM_TIMEOUT_MS 10
291
292typedef int (*cp_lkm_data_transfer_t)(void *ctx, struct sk_buff *skb);
293typedef void (*cp_lkm_data_hdr_size_t)(void *ctx, int wrapper_hdr_size, int *hdr_size, int* hdr_offset);
294typedef int (*cp_lkm_poll_t)(void *ctx, int budget);
295typedef void (*cp_lkm_schedule_t)(void *ctx);
296typedef void (*cp_lkm_complete_t)(void *ctx);
297typedef int (*cp_lkm_msg_t)(void *ctx);
298struct cp_lkm_edi {
299 //values provided by usb side, called by pm side
300 cp_lkm_data_transfer_t usb_send;
301 void *usb_send_ctx;
302
303 //value provided by pm side, called by usb side
304 cp_lkm_msg_t pm_send_pause; //called by usb to pause the network q
305 cp_lkm_msg_t pm_send_resume; //called by usb to resume the network q
306 cp_lkm_data_transfer_t pm_recv;
307 cp_lkm_data_hdr_size_t pm_get_hdr_size; //ask pm how much space it needs for headers
308 void *pm_recv_ctx;
309
310 void *pm_stats64_ctx;
311};
312
313static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link);
314
315struct cp_lkm_pm_stats64 {
316 u64 rx_packets;
317 u64 tx_packets;
318 u64 rx_bytes;
319 u64 tx_bytes;
320 u64 rx_errors;
321 u64 tx_errors;
322 u64 rx_dropped;
323 u64 tx_dropped;
324
325 u64 rx_over_errors;
326
327 struct u64_stats_sync syncp;
328};
329
330struct cp_lkm_pm_common {
331 int unique_id;
332 u32 attached;
333 cp_lkm_pm_type_t type;
334 struct net_device *net_dev;
335 struct cp_lkm_edi *edi;
336 struct list_head filter_list;
337 u32 filter_drop_cnt;
338
339 // keep these in pm context so dual sim hidden unplug/plug do not affect the stats
340 struct cp_lkm_pm_stats64 *pcpu_stats64;
341
342 int pm_link_count; //token used to prevent xmit and poll from being called if we are linking or unlinking, -1 = unlinking so block xmit and poll,
343 spinlock_t pm_link_lock; //lock to protect getting and releasing the pm_link_count token
344
345 struct list_head list;
346};
347
348//static void cp_lkm_pm_update_stats64(struct cp_lkm_pm_stats64 *stats, u64 *field, u64 incr);
349#define UPDATE_STATS(stats_ctx, field, incr) if (stats_ctx) { \
350 struct cp_lkm_pm_stats64 *stats = this_cpu_ptr(((struct cp_lkm_pm_common *)stats_ctx)->pcpu_stats64); \
351 if (stats) { \
352 u64_stats_update_begin(&stats->syncp); \
353 stats->field += incr; \
354 u64_stats_update_end(&stats->syncp); \
355 } \
356 }
357
358//Keep these commented out for release
359//static int dbg_memleak_timer_started = 0;
360//static struct timer_list dbg_memleak_timer;
361//static spinlock_t dbg_state_lock;
362//static int dbg_state_init = 0;
363//static int g_dbg_memalloc_cnt = 0;
364//static int g_stuck_cnt = 0;
365//static int g_stuck_chk = 0;
366//static int g_unlink_cnt = 0;
367
368typedef size_t ref_t;
369typedef void (*memref_final_method_t)(void *buf);
370struct memref {
371 memref_final_method_t mfree;
372 atomic_t refs;
373};
374
375
376void *memref_alloc(size_t size, memref_final_method_t mfree)
377{
378 struct memref *ptr;
379
380 ptr = (struct memref *)kmalloc(sizeof(struct memref) + size, GFP_ATOMIC);
381 if (!ptr) {
382 return NULL;
383 }
384 //g_dbg_memalloc_cnt++;
385 ptr->mfree = mfree;
386 atomic_set(&ptr->refs, 1);
387
388 return (ptr + 1);
389}
390
391void *memref_alloc_and_zero(size_t size, memref_final_method_t mfree)
392{
393 void *ptr;
394
395 ptr = memref_alloc(size, mfree);
396 if (!ptr) {
397 return NULL;
398 }
399
400 memset(ptr, 0x00, size);
401
402 return ptr;
403}
404
405static void *memref_ref(void *buf)
406{
407 struct memref *mb;
408
409 if (!buf) {
410 return NULL;
411 }
412
413 mb = (struct memref *)(buf) - 1;
414
415// if (0 == atomic_read(&mb->refs)) {
416// DEBUG_INFO("%s() !refs", __FUNCTION__);
417// return NULL;
418// }
419
420 atomic_inc(&mb->refs);
421
422 return buf;
423}
424
425#if 0
426static ref_t memref_cnt(void *buf)
427{
428 struct memref *mb;
429
430 if (!buf) {
431 return 0;
432 }
433
434 mb = (struct memref *)(buf) - 1;
435 return atomic_read(&mb->refs);
436}
437#endif
438
439static ref_t memref_deref(void *buf)
440{
441 struct memref *mb;
442
443 if (!buf) {
444 return 0;
445 }
446
447 mb = (struct memref *)(buf) - 1;
448
449// if (0 == atomic_read(&mb->refs)) {
450// DEBUG_INFO("%s() !refs", __FUNCTION__);
451// return NULL;
452// }
453
454 if (atomic_dec_and_test(&mb->refs)) {
455 //g_dbg_memalloc_cnt--;
456 if (mb->mfree) {
457 mb->mfree(buf);
458 }
459 kfree(mb);
460 return 0;
461 }
462
463 return atomic_read(&mb->refs);
464}
465
466/*
467 * Generic function to repeatedly call a function until it either succeeds or the delay and iters
468 * have been exhausted. Optionally it can throw a kernel panic on failure.
469 *
470 * ctxt - the ctxt to pass into do_fun
471 * do_fun - the function to call until it returns success
472 * delay_ms - the amount of time to delay between calls to do_fun on failure
473 * iter - the number of times to call do_fun
474 * die_str - if should panic on failure, then pass in the die_str to display
475 *
476 * if die_str provided, this function will not exit on failure.
477 * else it will exit with the result of the call to do_fun
478 * Note: total wait time is delay_ms * iter
479*/
480typedef bool (*do_function_t)(void* ctx1, void* ctx2);
481bool cp_lkm_do_or_die(void* ctx1, void*ctx2, do_function_t do_fun, u32 delay_ms, u32 iter, const char* die_str)
482{
483 bool done = false;
484 //set_current_state(TASK_UNINTERRUPTIBLE);
485 while (!done && iter) {
486 iter--;
487 done = do_fun(ctx1,ctx2);
488 if (!done) {
489 msleep(delay_ms);
490 //schedule_timeout(msecs_to_jiffies(delay_ms));
491 //set_current_state(TASK_UNINTERRUPTIBLE);
492 }
493 }
494 if(!done && die_str) {
495 panic(die_str);
496 //BUG_ON()
497 }
498 //set_current_state(TASK_RUNNING);
499 return done;
500}
501
502/******************************* kernel module USB/Wrapper functionality *********************************
503 *
504 * The shim has multiple entry points. It can be pumped by hw interrupts, software interrupts, or threads.
505 * The trick to getting the shim to work properly is knowing from which contexts the different functions can be called
506 * and what you can do in that context.
507 *
508 * The biggest concern is to make sure we aren't nulling out a function or instance pointer in one context while another
509 * context is using it. Pointers are changed when linking or unlinking to the protocol manager or when the device unplugs.
510 * For link/unlink or unplug, we need to make sure all other processing has been blocked or stopped. We use a combination of
511 * tokens and spinlocks to achieve this.
512 *
513 * Another complexity is dealing with multi-core processors such as we have in some routers. With multi-core you can have
514 * a hw interrupt, software interrupt or thread running on one core and a hw interrupt, soft interrupt, or thread running on
515 * another at the same time. In addition, the same soft interrupt code can run on both cores at the same time.
516 * With single core, the hw int would block the thread. The shim was orginally designed with a single-core system, so a lot of work
517 * has been put into verifying multi-core works.
518 *
519 * Single core: We can be pumped by:
520 * Hardware interrupt - all interrupts disabled, can't be preempted
521 * Software interrupt - hw interrupts not disabled, can be preempted by hw interrupt
522 * Thread or other process - can be preempted by hw or sw interrupt.
523 *
524 * Multi core: all bets are off. Everything can run at the same time so you have to be very careful with locks and tokens to not corrupt
525 * variables and to not run funtions reentrantly.
526 *
527 * Here are the specific contexts (threads, processes)that pump us:
528 * 1. USB on a hardware interrupt context. This happens on tx and rx done (all interrupts disabled, schedule callbacks and get out fast)
529 * 2. USB on the hub thread. This happens on unplug (can sleep or pause, but be careful because it stops all USB system hub processing)
530 * 3. Kernel workqueue thread (our own callback, can sleep or pause, but be careful, it stops all the kernel workqueue processing)
531 * 4. tasklet or timer soft interrupt context (our own callbacks on sw interrupt, hw interrupts enabled, can't sleep or do pause)
532 * 5. ioctl or device write on a kernel thread (this is cpusb in app space talking to us, runs on a thread, can be prempted in multi-core)
533 * 6. network (send from network side, runs as a software interrupt)
534 *
535 * Which functions are called in which contexts and what they do:
536 * #1 - cp_lkm_usb_xmit_complete - called by usb layer when transmit is done in hw interrupt context
537 * throw transfer in done q, on success, schedule tasklet or NAPI poll (#4) by calling
538 * cp_lkm_usb_done_and_defer_data() for data packets or cp_lkm_usb_done_and_defer_other() for non-data pkts.
539 * On error schedule kevent (#3) by calling cp_lkm_usb_defer_kevent()
540 * cp_lkm_usb_recv_complete - called by usb layer when recv is done in hw interrupt context
541 * throw transfer in done q, schedule tasklet or NAPI poll (#4), on error schedule kevent (#3)
542 *
543 * #2 - cp_lkm_usb_probe - called when the usb hub layer detects a plug, called on hub thread context
544 * cp_lkm_usb_disconnect - called when the usb hub layer detects an unplug, called on hub thread context
545 * schedule mgr_kevent to clean up
546 *
547 * #3 - cp_lkm_usb_kevent - scheduled by tx and rx complete (#1) on USB halt errors or out of memory failure. Is a workqueue thread
548 * clears the halts, sees if memory available. On success, schedules the tasklet or NAPI poll(#4)
549 *
550 * #4 - cp_lkm_usb_process_data_done_tasklet - Scheduled by rx or tx complete (#1). Runs in soft int context. This function is used when we
551 * are using a non-NAPI compliant protocol manager (i.e. PPP). It processes recv'd pkts and sends
552 * them onto the protocol manager, frees all sent skb's and restock more recv urbs to the USB layer.
553 * cp_lkm_usb_process_other_done_tasklet -Same as first one except is it scheduled anytime we recv a pkt that needs to go to the common
554 * modem stack instead of to the network stack (ctrl, status or diagnostics pkt)
555 *
556 * #5 - cp_lkm_usb_handle_ioctl - ioctl mux function called by the kernel when the app ioctl is called
557 * calls the appropriate mux function
558 * cp_lkm_usb_plug_intf - called by ioctl mux to register a device. Register a usb driver to catch
559 * the plug event from the usb stack
560 * cp_lkm_usb_open_intf - called by ioctl mux indicate the data channel is active. This causes us to
561 * mux all data packets to the network stack instead of up to cpusb in app space
562 * cp_lkm_usb_close_intf - called by ioctl mux to indicate the data connection has gone down.
563 * This causes us to mux all packets up to cpusb in app space instead of to network
564 *
565 * cp_lkm_usb_unplug_intf - called by ioctl mux. Releases the interface, deregisters the usb driver, cleans up memory
566 * cp_lkm_usb_handle_msg - called by the device driver write function. This is how cpusb sends us usb packets that
567 * we need to send to usb
568 * #6 - cp_lkm_usb_start_xmit - called by the network interface
569 * sends a transmit to the usb layer
570*/
571
572
573struct cp_lkm_usb_dev;
574struct cp_lkm_usb_base_dev;
575
576
577/* we record the state for each of our queued skbs */
578enum skb_state {
579 illegal = 0,
580 out_start, // start a data or other transmit
581 out_done, // data or other transmit done
582 in_data_start, // start a recv (either data or other)
583 in_data_done, // recv data done
584 in_data_cleanup,
585 in_other_start,
586 in_other_done, // recv other done
587 in_other_cleanup,
588 ctrl_start, // start a usb ctrl transfer
589 ctrl_done, // usb ctrl transfer finished
590 unlink_start // telling usb to give our urb back
591};
592
593#define EVENT_TX_HALT 0
594#define EVENT_RX_HALT 1
595#define EVENT_RX_MEMORY 2
596#define EVENT_STS_SPLIT 3
597#define EVENT_LINK_RESET 4
598
599//These are standard USB defines
600#define UE_BULK 0x02
601#define UE_INTERRUPT 0x03
602
603#define MAX_INTF_EPS 10
604
605#define CP_LKM_USB_RECV 0x01
606#define CP_LKM_USB_LISTEN 0x02
607
608struct cp_lkm_base_ep
609{
610 struct list_head list; // for inserting in the cpbdev list of base endpoints
611 struct list_head eps; // list of cloned endpoints based off this one
612 struct cp_lkm_usb_base_dev* cpbdev; // pointer back to the cpdev this endpoint belongs to
613 int ep_num; // endpoint number
614 unsigned long err_flags; // errors on the ep (halt, no mem)
615 int con_flags; //connection flags (recv, listen)
616 int q_cnt; //number of urbs down at the lower layer
617 int type; //ep type (interrupt, bulk etc)
618 int max_transfer_size;
619 int pipe;
620 int interval; // interval for interrupt end points
621};
622
623struct cp_lkm_ep
624{
625 struct list_head list_bep; // for being inserted into the bep's list of eps
626 struct list_head list_cpdev; // for being inserted into the cpdev's list of eps
627 struct cp_lkm_base_ep* bep; // pointer to this ep's base endpoint
628 struct cp_lkm_usb_dev* cpdev; // pointer back to the cpdev this endpoint belongs to
629 int con_flags; //connection flags (recv, listen)
630 int ep_num; // duplicated from base endpoint for convenience
631};
632
633/* This struct gets stored in skb->cb which is currently a 48 byte buffer
634 The size of this struct needs to not ever be bigger than 48
635*/
636struct skb_data {
637 //if pointers and ints are 64 bits (8 bytes) then this is 48 bytes currently and
638 //no other variables can be added
639 struct urb *urb;
640 struct cp_lkm_usb_base_dev *cpbdev;
641 struct cp_lkm_base_ep* bep;
642 enum skb_state state;
643 int status;
644 int unique_id; //id of cpdev that sent the tx pkt
645};
646
647#define MAX_USB_DRVR_NAME 10
648#define USB_DRVR_FRMT_STR "cpusb%d"
649
650struct cp_lkm_usb_base_dev
651{
652 struct list_head list; //for inserting in global dev list
653 struct list_head cpdev_list; //list of cpdevs cloned from this base dev
654 struct list_head in_bep_list; // list of base in endpoints
655 struct list_head out_bep_list; // list of base out endpoints
656 int data_in_bep_num; //data in ep number
657 int data_out_bep_num; //data out ep number
658
659 struct usb_driver* usb_driver;
660 struct usb_device_id* usb_id_table;
661 int vid;
662 int pid;
663 int intf_num;
664 int alt_intf_num;
665 int usb_bus;
666 int usb_addr;
667 int feature_flags;
668 int base_id; //unique id of the first clone to plug
669 cp_lkm_usb_state_t base_state;
670
671 struct sk_buff_head in_q; //recv skb's are stored here while down at usb waiting to be filled with recv data
672 struct sk_buff_head out_q; //send skb's are stored here while down at usb waiting to be transmitted
673 struct sk_buff_head ctrlq; //ctrl skb's are stored here while down at usb waiting to be filled or transmitted
674 struct sk_buff_head data_tx_done; //tx skb's are stored here while waiting to be freed
675 struct sk_buff_head data_rx_done; //recv and ctrl skb's are stored here while waiting to have recv data processed
676 struct sk_buff_head other_done; //sent skb's are stored here while waiting to be freed
677
678 u32 data_q_len; // holds count of data pkts (both rx and tx) needing to be processed
679 spinlock_t data_q_lock; // lock to keep data_q_len sync'd
680 spinlock_t processing_state_lock;
681 cp_lkm_usb_process_state_t processing_state;
682 spinlock_t other_state_lock;
683 cp_lkm_usb_process_state_t other_state;
684 bool scheduled; //tasklet scheduled to process the pending
685
686 struct tasklet_struct other_process_tasklet;
687 struct tasklet_struct data_process_tasklet;
688
689 int rx_schedule_threshold;
690 int tx_schedule_threshold;
691 int tx_resume_threshold;
692
693 struct work_struct kevent;
694 char usb_drvr_name[MAX_USB_DRVR_NAME];
695 void* wrapper_ctxt;
696 int wrapper_hdr_size;
697 int pm_hdr_size;
698 int pm_hdr_offset;
699
700 struct usb_interface* intf;
701 struct usb_device *udev;
702
703 int plug_result;
704 bool disconnect_wait;
705
706 struct timer_list rx_delay;
707
708 int tx_usb_q_count;
709 bool tx_paused;
710
711 struct timer_list usb_pause_stuck_timer;
712 int tx_proc_cnt; //how many data tx pkts have we successfully sent
713 int tx_proc_cnt_at_pause; //how many data tx pkts we had sent when we paused
714
715 #if 0
716 //debug stuff, comment out
717 //unsigned int dbg_total_stuck_cnt;
718 //unsigned int dbg_total_tx_at_stuck_cnt;
719 //unsigned int dbg_total_tx_proc;
720 #endif
721};
722
723struct cp_lkm_usb_dev
724{
725 //init at open
726 struct cp_lkm_usb_base_dev* cpbdev;
727 int unique_id;
728 int pm_id;
729 int clone_num;
730 int mux_id;
731
732 cp_lkm_usb_state_t state;
733 struct list_head list; //for inserting in base dev list
734
735 struct cp_lkm_edi* edi;
736
737 struct list_head in_ep_list; //list of in endpoints on the dev
738 struct list_head out_ep_list; //list of out endpoints on the dev
739 int data_in_ep_num; //data in ep number
740 int data_out_ep_num; //data out ep number
741
742 //for debug
743 #if 0
744 struct timer_list dbg_timer;
745 unsigned int dbg_total_rx_irq;
746 unsigned int dbg_total_tx_irq;
747 unsigned int dbg_total_rx_proc;
748 unsigned int dbg_total_d_done;
749 unsigned int dbg_total_o_done;
750 unsigned int dbg_total_pause;
751 unsigned int dbg_total_resume;
752 unsigned int dbg_total_max_work;
753 unsigned int dbg_total_timeout;
754 unsigned int dbg_total_budget;
755 unsigned int dbg_total_o_tasklet;
756 unsigned int dbg_total_d_resched;
757 unsigned int dbg_total_wq_sched;
758 unsigned int dbg_total_napi_sched;
759 unsigned int dbg_total_tasklet_sched;
760 unsigned int dbg_total_d_comp;
761 //unsigned int dbg_total_ic;
762 //unsigned int dbg_total_tc;
763 unsigned int dbg_total_rx_qlen;
764 unsigned int dbg_total_tx_qlen;
765 unsigned int dbg_total_num_hybrid_t;
766 unsigned int dbg_total_num_normal_t;
767 unsigned int dbg_total_num_hybrid;
768 unsigned int dbg_total_num_normal;
769 unsigned int dbg_total_num_d_timers;
770 unsigned int dbg_total_sch_sk;
771 #endif
772};
773
774struct cp_lkm_usb_ctx
775{
776 struct cp_lkm_common_ctx common;
777 struct list_head dev_list;
778 spinlock_t lock; //used to protect access to dev_list from different instances. Also used to coordinate thread accesses from usb and cpmodem layers.
779 //when one thread grabs the lock, no other threads can run (soft and hw IRQs can still run). The usb hub unplug handler runs on a thread.
780 //this means if one thread grabs the lock it can be guaranteed the modem can unplug while it is doing its thing.
781};
782
783//static void cp_lkm_usb_dbg_memleak_timer (unsigned long param);
784//static void cp_lkm_usb_dbg_timer (unsigned long param);
785
786enum {
787 CP_LKM_STUCK_INIT = 0,
788 CP_LKM_STUCK_START,
789 CP_LKM_STUCK_STOP,
790 CP_LKM_STUCK_DEINIT
791};
792static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action);
793static void cp_lkm_usb_pause_stuck_timer(unsigned long param);
794
795static void cp_lkm_usb_delay_timer (unsigned long param);
796static void cp_lkm_usb_kevent (struct work_struct *work);
797static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx);
798static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx);
799static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
800static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
801
802static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb);
803static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb, int src, struct cp_lkm_ep* ep);
804static void cp_lkm_usb_xmit_complete (struct urb *urb);
805static int cp_lkm_usb_submit_recv (struct cp_lkm_usb_base_dev* cpbdev, struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data);
806static void cp_lkm_usb_recv_complete (struct urb *urb);
807
808static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
809static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb);
810static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
811
812static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci);
813static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep);
814
815static void cp_lkm_usb_process_other_done_tasklet (unsigned long param);
816static void cp_lkm_usb_process_data_done_tasklet (unsigned long param);
817static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpdev);
818static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev);
819static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work);
820static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_resume, bool have_lock);
821
822static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep);
823static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in);
824static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev);
825
826static struct cp_lkm_usb_ctx cp_lkm_usb_mgr;
827
828// Knobs we can tweak on a processor by processor basis to maximize performance
829// Dummy values filled in here so we don't get warning on using unitialized variables
830static int CP_LKM_PM_NAPI_WEIGHT = 0; //budget we register with NAPI (max number of pkts it thinks we will process).
831static int CP_LKM_USB_NAPI_MAX_WORK = 0; //actual number of pkts we will process (we're not entirely honest with NAPI)
832static int CP_LKM_USB_MAX_RX_QLEN = 0; //max number of rx data URBs we allow to flow in the shim (we alloc these)
833static int CP_LKM_USB_MAX_OTHER_QLEN = 0; //max number of rx urbs on non-data endpoints
834static int CP_LKM_USB_TX_PAUSE_Q_PKTS = 0; //max number of tx data URBs we allow to flow in the shim (alloc'd by network stack, we control this by pausing)
835static int CP_LKM_USB_TX_RESUME_Q_PKTS = 0; //un-pause network at this number
836//static int CP_LKM_USB_TX_RESUME_Q_PKTS_HYBRID = 0; //un-pause network at this number when in hybrid mode with pkt counting
837static int CP_LKM_USB_TX_SCHED_CNT = 0; //How many done tx's we allow to accumulate before scheduling cleanup in normal mode
838//static int CP_LKM_USB_TX_SCHED_CNT_HYBRID = 0; //How many done tx's we allow to accumulate before scheduling cleanup in hybrid mode with pkt counting
839static int CP_LKM_USB_RX_SCHED_CNT = 0; //How many done rx's we allow to accumulate before scheduling processing in normal mode
840//static int CP_LKM_USB_RX_SCHED_CNT_HYBRID = 0; //How many done rx's we allow to accumulate before scheduling processing in hybrid mode with pkt counting
841static int CP_LKM_USB_RESTOCK_MULTIPLE = 0; //How many rx URBs we should restock as we process them (0 means don't restock as we go, 1 means every one, 2 means 1 out of every 2 etc)
842//static int CP_LKM_USB_DATA_MAX_PPS = 0; //Packets per second that will cause us to transition from normal to hybrid mode when using pkt counting
843//static int CP_LKM_USB_DATA_MIN_PPS = 0; //packets per second that will cause us to transition from hybrid back to normal when using pkt counting
844static int CP_LKM_USB_TASKLET_CNT = 0; //in hybrid mode, schedule tasklet on cnts 0 to this number
845static int CP_LKM_USB_WORKQUEUE_CNT = 0; //in hybrid mode, schedule workqueue on cnts CP_LKM_USB_TASKLET_CNT to this number, then start cnt over
846static int CP_LKM_USB_PROCESS_DIVISOR = 0; //times to loop through the process loop, doing pkts/divisor pkts each time. Set to 1 to only process what was there when entering
847//broadcom EHCI controller has issues we need to work around
848static int cp_lkm_is_broadcom = 0;
849
850#define CP_LKM_USB_PAUSED_CNT 5000
851
852//TODO remove
853#if 0
854static int g_dbg_data_skballoc_cnt = 0;
855static int g_dbg_other_skballoc_cnt = 0;
856static int g_dbg_ctrl_skballoc_cnt = 0;
857static int g_dbg_xmit_skballoc_cnt = 0;
858static int g_dbg_urballoc_cnt = 0;
859static int g_dbg_unplug_cnt = 0;
860static void cp_lkm_usb_urb_cnt(int inc)
861{
862 unsigned long flags;
863 spin_lock_irqsave(&dbg_state_lock, flags);
864 g_dbg_urballoc_cnt += inc;
865 spin_unlock_irqrestore(&dbg_state_lock, flags); //release lock so interrupts can resume firing
866}
867static void cp_lkm_usb_cnts(int state, int inc)
868{
869 #if 1
870 unsigned long flags;
871 spin_lock_irqsave(&dbg_state_lock, flags);
872
873 switch (state) {
874 case in_other_start:
875 case in_other_done:
876 case in_other_cleanup:
877 g_dbg_other_skballoc_cnt+=inc;
878 break;
879 case ctrl_start:
880 case ctrl_done:
881 g_dbg_ctrl_skballoc_cnt+=inc;
882 break;
883 case out_start:
884 case out_done:
885 g_dbg_xmit_skballoc_cnt+=inc;
886 break;
887 case in_data_start:
888 case in_data_done:
889 case in_data_cleanup:
890 g_dbg_data_skballoc_cnt+=inc;
891 break;
892 case unlink_start:
893 g_dbg_unplug_cnt+=inc;
894 break;
895 default:
896 printk("!!clean: unknown skb state: %d\n",state);
897 break;
898 }
899 spin_unlock_irqrestore(&dbg_state_lock, flags);
900 #endif
901}
902#endif
903
904static struct cp_lkm_usb_dev* cp_lkm_usb_find_muxed_dev(struct cp_lkm_usb_base_dev* cpbdev, int mux_id)
905{
906 struct list_head *pos;
907 list_for_each(pos, &cpbdev->cpdev_list){
908 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
909 //printk("%s() cpdev: %p, cpdev->mux_id: %d\n", __FUNCTION__, cpdev, cpdev->mux_id);
910 if(cpdev->mux_id == mux_id) {
911 return cpdev;
912 }
913 }
914 return NULL;
915}
916
917static struct cp_lkm_usb_dev* cp_lkm_usb_find_dev(int uniqueid)
918{
919 struct list_head *bpos;
920 struct list_head *pos;
921 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
922 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
923 list_for_each(pos, &cpbdev->cpdev_list){
924 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
925 if(cpdev->unique_id == uniqueid) {
926 return cpdev;
927 }
928 }
929 }
930 return NULL;
931}
932
933#define CP_LKM_DEV_MATCH_ALL 1
934#define CP_LKM_DEV_MATCH_BUS_ADDR_ONLY 2
935
936// Find base device from its bus, addr and unique id
937static struct cp_lkm_usb_base_dev* cp_lkm_usb_find_base_dev(int bus, int addr, int unique_id, int match)
938{
939 struct list_head *pos;
940 struct list_head *bpos;
941 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
942 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
943 if(cpbdev->usb_bus == bus && cpbdev->usb_addr == addr) {
944 if (match == CP_LKM_DEV_MATCH_BUS_ADDR_ONLY) {
945 return cpbdev;
946 }
947 if (cpbdev->base_id == unique_id) {
948 //matches the base_id so don't need to look further
949 return cpbdev;
950 }
951 //look to see if matches the unique_id of one of the cpdevs (only hit this case when running clones)
952 list_for_each(pos, &cpbdev->cpdev_list){
953 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
954 if (cpdev->unique_id == unique_id) {
955 return cpbdev;
956 }
957 }
958 }
959 }
960 return NULL;
961}
962
963/*
964static struct cp_lkm_usb_dev* cp_lkm_usb_get_head_dev(void)
965{
966 struct list_head *bpos;
967 struct list_head *pos;
968 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
969 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
970 list_for_each(pos, &cpbdev->cpdev_list){
971 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
972 return cpdev;
973 }
974 }
975 return NULL;
976}
977*/
978
979// pause or unpause all cpdevs associated with this cpbdev
980static void cp_lkm_usb_dev_pause(struct cp_lkm_usb_base_dev* cpbdev, bool pause)
981{
982 struct list_head *pos;
983
984 list_for_each(pos, &cpbdev->cpdev_list){
985 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
986 if (pause) {
987 if(cpdev->edi->pm_send_pause) {
988 cpdev->edi->pm_send_pause(cpdev->edi->pm_recv_ctx);
989 //cpdev->dbg_total_pause++;
990 }
991 }
992 else{
993 if (cpdev->edi->pm_send_resume) {
994 //cpdev->dbg_total_resume++;
995 cpdev->edi->pm_send_resume(cpdev->edi->pm_recv_ctx);
996 }
997 }
998 }
999 cpbdev->tx_paused = pause;
1000}
1001
1002static void cp_lkm_usb_clean_list(struct sk_buff_head* list)
1003{
1004 struct sk_buff *skb;
1005 struct skb_data *entry;
1006
1007 while((skb = skb_dequeue(list)) != NULL){
1008 DEBUG_TRACE("%s() found a straggler", __FUNCTION__);
1009 entry = (struct skb_data *) skb->cb;
1010 if(entry->urb) {
1011 //cp_lkm_usb_urb_cnt(-1);
1012 usb_free_urb (entry->urb);
1013 }
1014 //cp_lkm_usb_cnts(entry->state, -1);
1015 dev_kfree_skb_any(skb);
1016 }
1017}
1018
1019static void cp_lkm_usb_mark_as_dead(struct cp_lkm_usb_dev* cpdev)
1020{
1021 cpdev->edi->usb_send_ctx = NULL;
1022 if(cpdev->state != CP_LKM_USB_DEAD) {
1023 LOG("Device with id:%d unplugged", cpdev->unique_id);
1024 }
1025 cpdev->state = CP_LKM_USB_DEAD;
1026}
1027
1028static void cp_lkm_usb_mark_base_as_dead(struct cp_lkm_usb_base_dev* cpbdev)
1029{
1030 cpbdev->base_state = CP_LKM_USB_DEAD;
1031}
1032
1033static struct cp_lkm_base_ep* cp_lkm_usb_get_bep(struct cp_lkm_usb_base_dev* cpbdev, int ep_num)
1034{
1035 struct cp_lkm_base_ep* bep = NULL;
1036 struct list_head *entry, *nxt, *head;
1037
1038 if(USB_DIR_IN & ep_num) {
1039 //printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1040 head = &cpbdev->in_bep_list;
1041 }
1042 else{
1043 //printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1044 head = &cpbdev->out_bep_list;
1045 }
1046
1047 list_for_each_safe(entry, nxt, head) {
1048 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1049 if (bep->ep_num == ep_num) {
1050 //printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
1051 return bep;
1052 }
1053 }
1054 //printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
1055
1056 return NULL;
1057}
1058
1059static struct cp_lkm_ep* cp_lkm_usb_get_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
1060{
1061 struct cp_lkm_ep* ep = NULL;
1062 struct list_head *entry, *nxt, *head;
1063
1064 if(USB_DIR_IN & ep_num) {
1065 //printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1066 head = &cpdev->in_ep_list;
1067 }
1068 else{
1069 //printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1070 head = &cpdev->out_ep_list;
1071 }
1072
1073 list_for_each_safe(entry, nxt, head) {
1074 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1075 if (ep->ep_num == ep_num) {
1076 //printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
1077 return ep;
1078 }
1079 }
1080 //printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
1081
1082 return NULL;
1083}
1084
1085static void cp_lkm_usb_bep_finalize(void *arg)
1086{
1087 struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)arg;
1088 struct list_head *entry, *nxt;
1089 struct cp_lkm_ep *ep;
1090
1091 //printk("%s() start\n", __FUNCTION__);
1092 //todo remove
1093 //del_timer_sync(&cpdev->dbg_timer);
1094
1095 //printk("%s() - free eps\n",__FUNCTION__);
1096 list_for_each_safe(entry, nxt, &bep->eps) {
1097 ep = list_entry(entry, struct cp_lkm_ep, list_bep);
1098 //printk("%s() - free ep: %p from bep: %p\n",__FUNCTION__,ep,bep);
1099 list_del(&ep->list_bep);
1100 memref_deref(ep);
1101 }
1102
1103}
1104
1105static void cp_lkm_usb_ep_finalize(void *arg)
1106{
1107 //struct cp_lkm_ep* ep = (struct cp_lkm_ep*)arg;
1108 //printk("%s() - free ep: %p, ep_num: 0x%x\n",__FUNCTION__,arg ,ep->ep_num);
1109}
1110
1111static struct cp_lkm_ep* cp_lkm_usb_create_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
1112{
1113 struct cp_lkm_ep* ep;
1114 struct cp_lkm_base_ep* bep;
1115 struct cp_lkm_usb_base_dev* cpbdev;
1116
1117 DEBUG_ASSERT(cpdev, "cpdev is null");
1118 cpbdev = cpdev->cpbdev;
1119 DEBUG_ASSERT(cpbdev, "cpbdev is null");
1120
1121 //see if already exists first
1122 ep = cp_lkm_usb_get_ep(cpdev, ep_num);
1123 if(ep) {
1124 DEBUG_TRACE("%s() ep: %p already exists", __FUNCTION__, ep);
1125 //printk("%s() ep: 0x%x already exists\n", __FUNCTION__, ep_num);
1126 return ep;
1127 }
1128 //printk("%s() - create new ep, cpdev: %p, ep_num: 0x%x\n",__FUNCTION__,cpdev, ep_num);
1129
1130 //Need to create new ep and possibly a new bep. We will alloc and init everything first and
1131 //then if that all works, we will put everything in its proper place (in lists and stuff)
1132 ep = memref_alloc_and_zero(sizeof(struct cp_lkm_ep), cp_lkm_usb_ep_finalize);
1133 if(!ep) {
1134 DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
1135 return NULL;
1136 }
1137 INIT_LIST_HEAD(&ep->list_bep);
1138 INIT_LIST_HEAD(&ep->list_cpdev);
1139 ep->ep_num = ep_num;
1140
1141 //may need to create a new base ep if this is the first time we've seen this endpoint number and direction
1142 //this is always the case for non-cloned interfaces
1143 bep = cp_lkm_usb_get_bep(cpbdev, ep_num);
1144 if (!bep) {
1145 bep = memref_alloc_and_zero(sizeof(struct cp_lkm_base_ep), cp_lkm_usb_bep_finalize);
1146 if(!bep) {
1147 DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
1148 memref_deref(ep);
1149 return NULL;
1150 }
1151 //printk("%s() - create new bep: %p, cpbdev: %p, ep_num: 0x%x\n",__FUNCTION__,bep, cpbdev, ep_num);
1152 bep->ep_num = ep_num;
1153 bep->cpbdev = cpbdev;
1154 INIT_LIST_HEAD(&bep->list);
1155 INIT_LIST_HEAD(&bep->eps);
1156 if(USB_DIR_IN & ep_num) {
1157 list_add_tail(&bep->list, &cpbdev->in_bep_list);
1158 }
1159 else{
1160 list_add_tail(&bep->list, &cpbdev->out_bep_list);
1161 }
1162 }
1163
1164 //if we get here, everything alloc'd ok, so can insert in lists and stuf
1165
1166 // Each ep will have two memrefs, one from the alloc which is for entry in the cpdev list
1167 // and another for entry into the bep list. This way the ep won't be freed until it is removed
1168 // from both lists at unplug time
1169 ep->cpdev = cpdev;
1170 ep->bep = bep;
1171 if(USB_DIR_IN & ep_num) {
1172 list_add_tail(&ep->list_cpdev, &cpdev->in_ep_list);
1173 }
1174 else{
1175 list_add_tail(&ep->list_cpdev, &cpdev->out_ep_list);
1176 }
1177 memref_ref(ep);
1178 list_add_tail(&ep->list_bep, &bep->eps);
1179 return ep;
1180
1181}
1182
1183// cp_lkm_usb_plug_intf is called by cpusb via the ioctl. It registers a driver for the interface.
1184// This function is then called by the lower usb layer so we can claim that interface.
1185int cp_lkm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1186{
1187 struct cp_lkm_usb_base_dev* cpbdev;
1188 struct usb_device* udev;
1189 struct usb_host_interface* interface;
1190 int unique_id;
1191 //unsigned long flags;
1192 int rc;
1193 uintptr_t tmp_uid;
1194
1195 usb_get_intf(intf);
1196
1197 //printk("%s()\n",__FUNCTION__);
1198
1199 udev = interface_to_usbdev (intf);
1200 interface = intf->cur_altsetting;
1201
1202 unique_id = (int)id->driver_info;
1203 tmp_uid = unique_id;
1204 spin_lock(&cp_lkm_usb_mgr.lock);
1205
1206 // Error scenario to watch for here:
1207 // 1. Device unplugs and replugs before the upper app detects the unplug and calls our unplug_intf. In
1208 // this case this driver is still registered and will get the new probe (we don't want this, we want the app driver
1209 // to get the plug and claim the device orginally). When disconnect happens we set the state to DEAD. If we get
1210 // a probe on a dead device, don't take it.
1211 cpbdev = cp_lkm_usb_find_base_dev(udev->bus->busnum, udev->devnum, unique_id, CP_LKM_DEV_MATCH_ALL);
1212 if(!cpbdev || cpbdev->base_state == CP_LKM_USB_DEAD) {
1213 spin_unlock(&cp_lkm_usb_mgr.lock);
1214
1215 DEBUG_TRACE("%s() no cpdev or already dead", __FUNCTION__);
1216 return -ENXIO;
1217 }
1218
1219 //make sure it is for our device (match the usb addresses)
1220 //printk("%s() id: %d ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!\n", __FUNCTION__, unique_id,
1221 // cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
1222 if(cpbdev->usb_bus != udev->bus->busnum || cpbdev->usb_addr != udev->devnum || cpbdev->intf_num != interface->desc.bInterfaceNumber) {
1223 spin_unlock(&cp_lkm_usb_mgr.lock);
1224
1225 DEBUG_TRACE("%s() reject ourbus: %d, probebus: %d, ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!", __FUNCTION__,
1226 cpbdev->usb_bus, udev->bus->busnum, cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
1227 return -ENXIO;
1228 }
1229 cpbdev->intf = intf;
1230 cpbdev->udev = udev;
1231
1232 spin_unlock(&cp_lkm_usb_mgr.lock);
1233
1234 if(cpbdev->alt_intf_num) {
1235 rc = usb_set_interface(udev, cpbdev->intf_num, cpbdev->alt_intf_num);
1236 if(rc) {
1237 DEBUG_ERROR("%s() set intf failed :%d", __FUNCTION__,rc);
1238 cpbdev->plug_result = -1; //only set this on failure, not reject
1239 return -1;
1240 }
1241 }
1242
1243 spin_lock(&cp_lkm_usb_mgr.lock);
1244 cpbdev->base_state = CP_LKM_USB_CTRL;
1245
1246 usb_set_intfdata(intf, (void*)tmp_uid);
1247 usb_get_dev (udev);
1248 memref_ref(cpbdev);
1249 spin_unlock(&cp_lkm_usb_mgr.lock);
1250
1251 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_INIT);
1252
1253 //throughput control stuff
1254 cpbdev->rx_schedule_threshold = CP_LKM_USB_RX_SCHED_CNT;
1255 cpbdev->tx_schedule_threshold = CP_LKM_USB_TX_SCHED_CNT;
1256 cpbdev->tx_resume_threshold = CP_LKM_USB_TX_RESUME_Q_PKTS;
1257
1258
1259 //todo remove
1260 //if (!dbg_memleak_timer_started) {
1261 // dbg_memleak_timer_started = 1;
1262 // dbg_memleak_timer.function = cp_lkm_usb_dbg_memleak_timer;
1263 // dbg_memleak_timer.data = 0;
1264
1265 // init_timer(&dbg_memleak_timer);
1266 // mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(20000));
1267 //}
1268 //if (dbg_state_init == 0) {
1269 // spin_lock_init(&dbg_state_lock);
1270 // dbg_state_init = 1;
1271 //}
1272
1273
1274
1275 DEBUG_TRACE("%s() probe done", __FUNCTION__);
1276 return 0;
1277}
1278
1279static bool cp_lkm_usb_shuter_down_do_pm_unlink(void* ctx1, void* ctx2)
1280{
1281 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1282 struct cp_lkm_usb_dev* cpdev;
1283 struct list_head *pos;
1284 unsigned long flags;
1285 //Unlink from the pm and disable the data state machine
1286 bool done = false;
1287 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
1288 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
1289 cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //data soft interrupt handlers now won't run
1290
1291 spin_lock(&cpbdev->data_q_lock);
1292 cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT;
1293 spin_unlock(&cpbdev->data_q_lock); //usb hw interrupts now won't schedule soft interrupt handlers
1294
1295 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags); //release lock so interrupts can resume firing
1296 //unlink the pm side for all cpdevs associated with this cpbdev. Once this returns we are guaranteed not to get any new xmit skb's from the pm
1297 list_for_each(pos, &cpbdev->cpdev_list){
1298 cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
1299 LOG("Unlink cpdev: %p from pm", cpdev);
1300 cp_lkm_pm_usb_link(cpdev->edi, cpdev->pm_id, 0);
1301 cpdev->edi->usb_send_ctx = NULL;
1302 }
1303
1304 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
1305 done = true;
1306 }
1307 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
1308 return done;
1309}
1310
1311static bool cp_lkm_usb_shuter_down_do_other_tasklet(void* ctx1, void* ctx2)
1312{
1313 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1314 unsigned long flags;
1315 bool done = false;
1316 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
1317 if(cpbdev->other_state == USB_PROCESS_STATE_IDLE){
1318 cpbdev->other_state = USB_PROCESS_STATE_PAUSED;
1319 done = true;
1320 }
1321 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
1322 return done;
1323}
1324
1325static bool cp_lkm_usb_shuter_down_do_empty_queues(void* ctx1, void* ctx2)
1326{
1327 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1328 bool done = false;
1329
1330 if (skb_queue_empty(&cpbdev->in_q) &&
1331 skb_queue_empty(&cpbdev->out_q) &&
1332 skb_queue_empty(&cpbdev->ctrlq)){
1333 done = true;
1334 }
1335 return done;
1336}
1337
1338static void cp_lkm_usb_shuter_down(struct cp_lkm_usb_base_dev* cpbdev)
1339{
1340 struct list_head *entry, *nxt;
1341 struct cp_lkm_base_ep *bep;
1342
1343
1344 //printk("%s() done\n", __FUNCTION__);
1345
1346 //Unlink from the pm and disable the data state machine
1347 LOG("Unlink cpdev from pm");
1348 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_pm_unlink, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to unlink pm from cpdev");
1349
1350 //disable the 'other' tasklet
1351 LOG("Disable cpdev other tasklet");
1352 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_other_tasklet, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to shutdown cpdev other tasklet");
1353
1354 //Once we get here no xmits can happen or any recv or xmit done processing can happen so no new kevents can be scheduled
1355 //so we can stop them here
1356 //clear all the flags before flushing the kevents so that we won't try to do anything during the kevent callback
1357 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
1358 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1359 bep->err_flags = 0;
1360 bep->con_flags = 0;
1361 }
1362 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
1363 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1364 bep->err_flags = 0;
1365 bep->con_flags = 0;
1366 }
1367
1368 //This forces the kernel to run all scheduled kevents, so any of our pending ones will run. (Note: Make sure
1369 //our kevent handlers check to see if we are attached before doing anything so that we don't schedule anything new while
1370 //shutting down)
1371 LOG("Cancel cpdev kevents");
1372 cancel_work_sync(&cpbdev->kevent);
1373
1374 //Make sure all the urbs have been cancelled
1375 // ensure there are no more active urbs
1376 //set_current_state(TASK_UNINTERRUPTIBLE);
1377 //these cause the urbs to be cancelled and the callbacks to be called. The urbs are removed from
1378 //the queues in the callbacks.
1379 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, NULL);
1380 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, NULL);
1381 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->ctrlq, NULL);
1382
1383 LOG("Wait for all cpdev urbs to be returned");
1384 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_empty_queues, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to empty cpdev queues");
1385
1386 //shutdown timer and tasklets
1387 LOG("Shutdown cpdev timers and tasklets");
1388 del_timer_sync (&cpbdev->rx_delay);
1389 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_DEINIT);
1390
1391 tasklet_kill(&cpbdev->data_process_tasklet);
1392 tasklet_kill(&cpbdev->other_process_tasklet);
1393
1394 // All outstanding transfers are back, so now we can clean up.
1395 cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
1396 cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
1397 cp_lkm_usb_clean_list(&cpbdev->other_done);
1398
1399 //printk("%s() done\n", __FUNCTION__);
1400 usb_set_intfdata(cpbdev->intf, NULL);
1401 usb_put_intf(cpbdev->intf);
1402 cpbdev->intf = NULL;
1403 LOG("cpdev unplug done");
1404
1405 return;
1406
1407}
1408
1409// Called when the USB hub detects that our device just unplugged.
1410// Called in a thread context. We do the lower usb cleanup here because there
1411// are some things that have to be done before exiting from disconnect.
1412// We don't clean up the upper layer stuff because the upper layer doesn't yet know
1413// we are unplugged and will continue to send us data. When the upper layer gets the
1414// unplug notify, it will call cp_lkm_usb_unplug_intf. We finish cleaning up in there.
1415void cp_lkm_usb_disconnect(struct usb_interface *intf)
1416{
1417 struct cp_lkm_usb_dev* cpdev;
1418 struct cp_lkm_usb_base_dev* cpbdev;
1419 //unsigned long flags;
1420 int unique_id;
1421
1422 // We don't want this function to run at the same time as any of the calls from the modem common stack (ioctl and write)
1423 // They all grab this lock for the duration of their calls. They also check the state of the device before proceeding.
1424 // Once we have the lock, we know none of them are running. Any new calls will block waiting on the lock.
1425 // If we then change the state to dead we can release the lock while we do the rest of cleanup. When they get the lock
1426 // they will see the state is dead and error out and return immediately. This prevents us from blocking the common modem thread.
1427 spin_lock(&cp_lkm_usb_mgr.lock);
1428
1429 //If cpdev is not in intf, then this is the close->disconnect path, so do nothing
1430 unique_id = (uintptr_t)usb_get_intfdata(intf);
1431
1432 //struct usb_device *udev;
1433 //printk("%s() start, id: %d\n", __FUNCTION__, unique_id);
1434
1435 //see if device already went away, this should be impossible
1436 //the unique id is always for the first instance if running clones
1437 cpdev = cp_lkm_usb_find_dev(unique_id);
1438 if(!cpdev) {
1439 //printk("%s() no cpdev, id: %d\n", __FUNCTION__, unique_id);
1440 spin_unlock(&cp_lkm_usb_mgr.lock);
1441 return;
1442 }
1443 cpbdev = cpdev->cpbdev;
1444 cpbdev->disconnect_wait = true;
1445
1446 // Mark the device as dead so we won't start anything new.
1447 // NOTE: make sure nothing new can be started on the USB side from this point on.
1448 // This includes transmits from the network. Transmits from cpusb.
1449 // Recv packets, halt clears, ioctls etc
1450 cp_lkm_usb_mark_base_as_dead(cpbdev);
1451
1452 // Once device is marked dead, we can release the semaphore. This is so write and ioctl from the modem stack
1453 // can return quickly with errors instead of blocking while the disconnect completes.
1454 spin_unlock(&cp_lkm_usb_mgr.lock);
1455
1456 cp_lkm_usb_shuter_down(cpbdev);
1457
1458 cpbdev->disconnect_wait = false;
1459 memref_deref(cpbdev);
1460
1461 //printk("%s() done id: %d\n", __FUNCTION__,unique_id);
1462}
1463
1464static void cp_lkm_usb_base_dev_finalize(void *arg)
1465{
1466 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)arg;
1467 struct list_head *entry, *nxt;
1468 struct cp_lkm_base_ep *bep;
1469 //int unique_id = cpbdev->base_id;
1470 //printk("%s()\n", __FUNCTION__);
1471
1472 //if was added to the list, need to remove it.
1473 if(cpbdev->list.next != &cpbdev->list) {
1474 spin_lock(&cp_lkm_usb_mgr.lock);
1475 list_del(&cpbdev->list);
1476 //printk("%s() free cpbdev from global list \n", __FUNCTION__);
1477 spin_unlock(&cp_lkm_usb_mgr.lock);
1478 }
1479
1480 //These should already be empty, but just in case
1481 //printk("%s() clean lists\n", __FUNCTION__);
1482 cp_lkm_usb_clean_list(&cpbdev->in_q);
1483 cp_lkm_usb_clean_list(&cpbdev->out_q);
1484 cp_lkm_usb_clean_list(&cpbdev->ctrlq);
1485 cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
1486 cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
1487 cp_lkm_usb_clean_list(&cpbdev->other_done);
1488
1489 if(cpbdev->wrapper_ctxt) {
1490 //printk("%s() free wrapper\n", __FUNCTION__);
1491 cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
1492 cpbdev->wrapper_ctxt = NULL;
1493 }
1494 if(cpbdev->usb_driver) {
1495 //printk("%s() free driver\n", __FUNCTION__);
1496 kfree(cpbdev->usb_driver);
1497 cpbdev->usb_driver = NULL;
1498 }
1499 if(cpbdev->usb_id_table) {
1500 //printk("%s() free id table\n", __FUNCTION__);
1501 kfree(cpbdev->usb_id_table);
1502 cpbdev->usb_id_table = NULL;
1503 }
1504 if(cpbdev->udev) {
1505 //printk("%s() free udev\n", __FUNCTION__);
1506 usb_put_dev (cpbdev->udev);
1507 cpbdev->udev = NULL;
1508 }
1509
1510 //printk("%s() - free eps\n",__FUNCTION__);
1511 list_for_each_safe(entry, nxt, &cpbdev->cpdev_list) {
1512 struct cp_lkm_usb_dev* cpdev = list_entry(entry, struct cp_lkm_usb_dev, list);
1513 //printk("%s() - free cpdev: %p from cpbdev: %p\n",__FUNCTION__, cpdev, cpbdev);
1514 list_del(&cpdev->list);
1515 memref_deref(cpdev);
1516 }
1517 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
1518 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1519 //printk("%s() - free in bep: %p from cpbdev: %p\n",__FUNCTION__,bep, cpbdev);
1520 list_del(&bep->list);
1521 memref_deref(bep);
1522 }
1523 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
1524 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1525 //printk("%s() - free out bep: %p from cpbdev: %p\n ",__FUNCTION__,bep, cpbdev);
1526 list_del(&bep->list);
1527 memref_deref(bep);
1528 }
1529 //printk("%s() done base_id: %d\n", __FUNCTION__,unique_id);
1530
1531}
1532
1533static void cp_lkm_usb_dev_finalize(void *arg)
1534{
1535 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)arg;
1536 struct list_head *entry, *nxt;
1537 struct cp_lkm_ep *ep;
1538
1539 //printk("%s() start\n", __FUNCTION__);
1540 //todo remove
1541 //del_timer_sync(&cpdev->dbg_timer);
1542
1543 //printk("%s() - free eps\n",__FUNCTION__);
1544 list_for_each_safe(entry, nxt, &cpdev->in_ep_list) {
1545 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1546 //printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
1547 list_del(&ep->list_cpdev);
1548 memref_deref(ep);
1549 }
1550 list_for_each_safe(entry, nxt, &cpdev->out_ep_list) {
1551 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1552 //printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
1553 list_del(&ep->list_cpdev);
1554 memref_deref(ep);
1555 }
1556
1557 if(cpdev->edi) {
1558 //printk("%s() free edi\n", __FUNCTION__);
1559 cpdev->edi->usb_send_ctx = NULL;
1560 cpdev->edi->usb_send = NULL;
1561
1562 memref_deref(cpdev->edi);
1563 cpdev->edi = NULL;
1564 }
1565
1566 //printk("%s() end \n", __FUNCTION__);
1567}
1568
1569static int cp_lkm_usb_plug_intf(struct cp_lkm_usb_plug_intf* pi)
1570{
1571 int retval;
1572 struct cp_lkm_usb_dev* cpdev = NULL;
1573 struct cp_lkm_usb_base_dev* cpbdev = NULL;
1574 bool need_new;
1575 bool is_cloneable;
1576
1577 //Make sure we aren't going to overflow the skb space reserved for us to use
1578 //DEBUG_ASSERT(sizeof(struct skb_data) < sizeof(((struct sk_buff*)0)->cb));
1579 //DEBUG_INFO("%s(), skb_data size: %d, skb_buff cb size: %d",__FUNCTION__,sizeof(struct skb_data),sizeof(((struct sk_buff*)0)->cb));
1580
1581 // We need to alloc a new cpbdev on plug if:
1582 // 1. The device is not cloned at this layer (thus each plug has its own cpbdev)
1583 // Note: Some devices are cloned at other layers (cpusb_linux.c), so they can be running as clones in the system, but not at this layer.
1584 // This is why we can't just look at the clone_num to determine.
1585 // 2. It is cloneable and clone_num is 0 (only the first clone gets a new cpbdev, the rest share it)
1586 is_cloneable = pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF;
1587 need_new = !is_cloneable || (is_cloneable && pi->clone_num == 0);
1588
1589 //printk("%s() start id:%d vid/pid: 0x%x/0x%x, bus/addr: %d/%d, intf: %d, flags: 0x%x, clone: %d, mux: %d\n", __FUNCTION__, pi->unique_id, pi->vid, pi->pid, pi->bus, pi->addr, pi->intf_num, pi->feature_flags, pi->clone_num, pi->mux_id);
1590
1591 if (need_new) {
1592 //first instance, so need a new cpbdev
1593 cpbdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_base_dev), cp_lkm_usb_base_dev_finalize);
1594 if(!cpbdev) {
1595 //printk("%s() failed to alloc cpbdev\n", __FUNCTION__);
1596 goto init_fail;
1597 }
1598 //printk("%s() id: %d, alloc'd new cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
1599 cpbdev->base_state = CP_LKM_USB_INIT;
1600 cpbdev->vid = pi->vid;
1601 cpbdev->pid = pi->pid;
1602 cpbdev->intf_num = pi->intf_num;
1603 cpbdev->alt_intf_num = pi->alt_intf_num;
1604 cpbdev->usb_bus = pi->bus;
1605 cpbdev->usb_addr = pi->addr;
1606 cpbdev->feature_flags = pi->feature_flags;
1607 cpbdev->base_id = pi->unique_id;
1608 INIT_LIST_HEAD(&cpbdev->in_bep_list);
1609 INIT_LIST_HEAD(&cpbdev->out_bep_list);
1610 INIT_LIST_HEAD(&cpbdev->list);
1611 INIT_LIST_HEAD(&cpbdev->cpdev_list);
1612 cpbdev->data_in_bep_num = pi->ep_in;
1613 cpbdev->data_out_bep_num = pi->ep_out;
1614
1615 //alloc and register the usb driver
1616 cpbdev->usb_driver = kzalloc(sizeof(struct usb_driver), GFP_KERNEL);
1617 if(!cpbdev->usb_driver) {
1618 //printk("%s() failed to alloc driver\n", __FUNCTION__);
1619 goto init_fail;
1620 }
1621
1622 cpbdev->usb_id_table = kzalloc(sizeof(struct usb_device_id)*2, GFP_KERNEL);
1623 if(!cpbdev->usb_id_table) {
1624 //printk("%s() failed to alloc table\n", __FUNCTION__);
1625 goto init_fail;
1626 }
1627
1628 cpbdev->usb_id_table[0].idVendor = cpbdev->vid;
1629 cpbdev->usb_id_table[0].idProduct = cpbdev->pid;
1630 cpbdev->usb_id_table[0].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
1631 cpbdev->usb_id_table[0].driver_info = (unsigned long)pi->unique_id;
1632
1633 //create unique drvr string
1634 sprintf(cpbdev->usb_drvr_name, USB_DRVR_FRMT_STR, pi->unique_id);
1635 cpbdev->usb_driver->name = cpbdev->usb_drvr_name;
1636 cpbdev->usb_driver->probe = cp_lkm_usb_probe;
1637 cpbdev->usb_driver->disconnect = cp_lkm_usb_disconnect;
1638 cpbdev->usb_driver->id_table = cpbdev->usb_id_table;
1639
1640
1641 skb_queue_head_init (&cpbdev->in_q);
1642 skb_queue_head_init (&cpbdev->out_q);
1643 skb_queue_head_init (&cpbdev->ctrlq);
1644 skb_queue_head_init (&cpbdev->data_tx_done);
1645 skb_queue_head_init (&cpbdev->data_rx_done);
1646 skb_queue_head_init (&cpbdev->other_done);
1647 cpbdev->data_q_len = 0;
1648 spin_lock_init(&cpbdev->data_q_lock);
1649 spin_lock_init(&cpbdev->processing_state_lock);
1650 spin_lock_init(&cpbdev->other_state_lock);
1651 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
1652 cpbdev->other_state = USB_PROCESS_STATE_IDLE;
1653 INIT_WORK(&cpbdev->kevent, cp_lkm_usb_kevent);
1654
1655 cpbdev->rx_delay.function = cp_lkm_usb_delay_timer; //TODO: this needs to handle the cpdev or cpbdev??
1656 cpbdev->rx_delay.data = (unsigned long) cpbdev; //????? should this be cpdev??
1657 init_timer (&cpbdev->rx_delay);
1658
1659 cpbdev->data_process_tasklet.func = cp_lkm_usb_process_data_done_tasklet; //TODO: modify to take cpbdev
1660 cpbdev->data_process_tasklet.data = (unsigned long) cpbdev;
1661
1662 cpbdev->other_process_tasklet.func = cp_lkm_usb_process_other_done_tasklet; //TODO: modify to take cpbdev
1663 cpbdev->other_process_tasklet.data = (unsigned long) cpbdev;
1664
1665 cpbdev->disconnect_wait = false;
1666
1667 spin_lock(&cp_lkm_usb_mgr.lock);
1668 list_add_tail(&cpbdev->list, &cp_lkm_usb_mgr.dev_list);
1669 spin_unlock(&cp_lkm_usb_mgr.lock);
1670
1671 // When we call register, it calls our probe function with all available matching interfaces. In probe
1672 // we save the result of the probe so we can return fail here if it didn't go well
1673 //printk("%s() reg drvr for vid:%x, pid:%x, addr:%d, intf:%d\n", __FUNCTION__, pi->vid,pi->pid,pi->addr,pi->intf_num);
1674 retval = usb_register(cpbdev->usb_driver);
1675 if(retval || cpbdev->plug_result != 0) {
1676 //printk("%s() failed to register driver or probe failed retval:%d, plug_result:%d\n", __FUNCTION__, retval, cpbdev->plug_result);
1677 goto init_fail;
1678 }
1679 cpbdev->base_state = CP_LKM_USB_CTRL;
1680 DEBUG_TRACE("%s() done", __FUNCTION__);
1681 }
1682 else{
1683 //clone, should already have a base dev
1684 cpbdev = cp_lkm_usb_find_base_dev(pi->bus, pi->addr, pi->unique_id, CP_LKM_DEV_MATCH_BUS_ADDR_ONLY);
1685 if(!cpbdev) {
1686 //printk("%s() failed to find cpbdev\n", __FUNCTION__);
1687 goto init_fail;
1688 }
1689 //printk("%s() id: %d, already have cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
1690 }
1691
1692 // make sure base dev has all the feature flags of every clone
1693 cpbdev->feature_flags |= pi->feature_flags;
1694
1695 //printk("%s() id: %d, cpbdev: %p, alloc new cpdev\n", __FUNCTION__, pi->unique_id, cpbdev);
1696 cpdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_dev), cp_lkm_usb_dev_finalize);
1697 if(!cpdev) {
1698 //printk("%s() failed to alloc cpdev\n", __FUNCTION__);
1699 goto init_fail;
1700 }
1701 //printk("%s() id: %d, cpdev: %p\n", __FUNCTION__, pi->unique_id, cpdev);
1702
1703 INIT_LIST_HEAD(&cpdev->in_ep_list);
1704 INIT_LIST_HEAD(&cpdev->out_ep_list);
1705 INIT_LIST_HEAD(&cpdev->list);
1706 //add to list right away so if anything below fails, it will be cleaned up when cpbdev is cleaned up
1707 list_add_tail(&cpdev->list, &cpbdev->cpdev_list);
1708 cpdev->cpbdev = cpbdev;
1709 cpdev->unique_id = pi->unique_id;
1710 //clone and mux are only used with muxed clone interfaces.
1711 cpdev->clone_num = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->clone_num : 0;
1712 cpdev->mux_id = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->mux_id : CP_LKM_WRAPPER_DEFAULT_ID;
1713 //printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, pi->unique_id, pi->clone_num, cpdev->mux_id);
1714 cpdev->data_in_ep_num = pi->ep_in;
1715 cpdev->data_out_ep_num = pi->ep_out;
1716 //pre-create the data endpoints so they will be first in the list, since they are most often used
1717 cp_lkm_usb_create_ep(cpdev, pi->ep_in);
1718 cp_lkm_usb_create_ep(cpdev, pi->ep_out);
1719 cpdev->edi = memref_alloc_and_zero(sizeof(struct cp_lkm_edi), NULL);
1720 if(!cpdev->edi) {
1721 //printk("%s() failed to alloc edi\n", __FUNCTION__);
1722 goto init_fail;
1723 }
1724 cpdev->edi->usb_send = cp_lkm_usb_start_xmit;
1725
1726 //for debug, comment out before checkin
1727 //cpdev->dbg_timer.function = cp_lkm_usb_dbg_timer;
1728 //cpdev->dbg_timer.data = (unsigned long)cpdev;
1729 //init_timer(&cpdev->dbg_timer);
1730 //mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(10000));
1731
1732 //TODO CA: I think this shouldn't be set until open, commenting out for now to see if blows chow in plug fest
1733 //cpdev->edi->usb_send_ctx = cpdev;
1734
1735 cpdev->state = CP_LKM_USB_CTRL;
1736
1737 //printk("%s() done success id: %d\n", __FUNCTION__, pi->unique_id);
1738
1739 return 0;
1740
1741init_fail:
1742 if(cpbdev) {
1743 //the finalizer for cpbdev does the clean up
1744 memref_deref(cpbdev);
1745 }
1746 //returning an error to the modem stack on plug will cause it to hard reset
1747 //the modem, thus causing the rest of the driver cleanup to occur
1748 //printk("%s() open_intf fail\n", __FUNCTION__);
1749 return -1;
1750}
1751
1752static int cp_lkm_usb_set_wrapper(struct cp_lkm_usb_set_wrapper* sw)
1753{ //unsigned long flags;
1754 struct cp_lkm_usb_dev* cpdev;
1755 struct cp_lkm_usb_base_dev* cpbdev;
1756 void* wrapper_info = NULL;
1757 unsigned long not_copied;
1758 int res = 0;
1759 //printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, sw->unique_id, sw->clone_num, sw->mux_id);
1760
1761 spin_lock(&cp_lkm_usb_mgr.lock);
1762 cpdev = cp_lkm_usb_find_dev(sw->unique_id);
1763
1764 if(!cpdev) {
1765 spin_unlock(&cp_lkm_usb_mgr.lock);
1766 //printk("%s() no cpdev found for id: %d\n", __FUNCTION__, sw->unique_id);
1767 return -1;
1768 }
1769 cpbdev = cpdev->cpbdev;
1770 if(cpbdev->base_state == CP_LKM_USB_DEAD){
1771 //modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
1772 spin_unlock(&cp_lkm_usb_mgr.lock);
1773 //printk("%s() set_wrapper fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev->state);
1774 return 0;
1775 }
1776
1777// benk - what if wrapper_info_len is 0???
1778 if(cpbdev->wrapper_ctxt){
1779 //already have a wrapper so free it
1780 cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
1781 }
1782
1783 if(sw->wrapper_info_len) {
1784 wrapper_info = kzalloc(sw->wrapper_info_len, GFP_KERNEL);
1785 if(!wrapper_info) {
1786 DEBUG_ERROR("%s() couldn't alloc wrapper info", __FUNCTION__);
1787 res = -1;
1788 goto set_wrapper_done;
1789 }
1790 }
1791
1792
1793 //copy the wrapper info from user to kernel space
1794 not_copied = copy_from_user(wrapper_info, sw->wrapper_info, sw->wrapper_info_len);
1795 if (not_copied) {
1796 DEBUG_ERROR("%s() couldn't copy wrapper info", __FUNCTION__);
1797 res = -1;
1798 goto set_wrapper_done;
1799 }
1800 //alloc the wrapper instance. On success it takes ownership of the wrapper_info and is responsible for freeing it
1801 DEBUG_INFO("%s() wrapper: %d", __FUNCTION__, sw->wrapper);
1802 cpbdev->wrapper_ctxt = cp_lkm_wrapper_instance_alloc(sw->wrapper, wrapper_info, sw->wrapper_info_len);
1803 if(!cpbdev->wrapper_ctxt){
1804 DEBUG_ERROR("%s() couldn't alloc wrapper", __FUNCTION__);
1805 res = -1;
1806 goto set_wrapper_done;
1807 }
1808 cpbdev->wrapper_hdr_size = cp_lkm_wrapper_hdr_size(cpbdev->wrapper_ctxt);
1809 cp_lkm_wrapper_set_state(cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
1810
1811 cpdev->clone_num = sw->clone_num;
1812 cpdev->mux_id = sw->mux_id;
1813
1814
1815set_wrapper_done:
1816 if(wrapper_info) {
1817 kfree(wrapper_info);
1818 }
1819
1820 spin_unlock(&cp_lkm_usb_mgr.lock);
1821 return res;
1822
1823}
1824
1825static int cp_lkm_usb_set_mux_id(struct cp_lkm_usb_set_mux_id* smi)
1826{ //unsigned long flags;
1827 struct cp_lkm_usb_dev* cpdev;
1828 //struct cp_lkm_usb_base_dev* cpbdev;
1829 int res = 0;
1830
1831 //printk("%s()\n", __FUNCTION__);
1832
1833 spin_lock(&cp_lkm_usb_mgr.lock);
1834 cpdev = cp_lkm_usb_find_dev(smi->unique_id);
1835 if(!cpdev) {
1836 spin_unlock(&cp_lkm_usb_mgr.lock);
1837 //printk("%s() failed to find cpdev for id: %d\n", __FUNCTION__, smi->unique_id);
1838 return -1;
1839 }
1840 if(cpdev->cpbdev->base_state == CP_LKM_USB_DEAD){
1841 //modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
1842 spin_unlock(&cp_lkm_usb_mgr.lock);
1843 return 0;
1844 }
1845 cpdev->mux_id = smi->mux_id;
1846 //printk("%s() unique_id: %d, mux_id: %d\n", __FUNCTION__, smi->unique_id, smi->mux_id);
1847
1848 spin_unlock(&cp_lkm_usb_mgr.lock);
1849 return res;
1850
1851}
1852
1853static int cp_lkm_usb_open_intf(struct cp_lkm_usb_open_intf* oi)
1854{
1855 //unsigned long flags;
1856 struct cp_lkm_usb_dev* cpdev;
1857
1858 //printk("%s() u-uid: %d\n", __FUNCTION__,oi->unique_id);
1859
1860 spin_lock(&cp_lkm_usb_mgr.lock);
1861 cpdev = cp_lkm_usb_find_dev(oi->unique_id);
1862
1863 //if state isn't CP_LKM_USB_CTRL, then the interface either did not plug for some reason (i.e. didn't get probe from usb),
1864 //or it plugged, but then unplugged before open was called.
1865 if(!cpdev || cpdev->cpbdev->base_state != CP_LKM_USB_CTRL) {
1866 spin_unlock(&cp_lkm_usb_mgr.lock);
1867 //printk("%s() open_intf fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev?cpdev->state:0xff);
1868 return -1;
1869 }
1870 cpdev->state = CP_LKM_USB_ACTIVE;
1871 cpdev->edi->usb_send_ctx = cpdev; //this allows the network side to call me
1872 cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_ACTIVE);
1873 spin_unlock(&cp_lkm_usb_mgr.lock);
1874 //printk("%s() done\n", __FUNCTION__);
1875 return 0;
1876
1877}
1878
1879static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci)
1880{
1881 //unsigned long flags;
1882 struct cp_lkm_usb_dev* cpdev;
1883
1884 //printk("%s() u-uid: %d\n", __FUNCTION__, ci->unique_id);
1885
1886 //down(&cp_lkm_usb_mgr.thread_sem);
1887 spin_lock(&cp_lkm_usb_mgr.lock);
1888 cpdev = cp_lkm_usb_find_dev(ci->unique_id);
1889
1890 if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_DEAD) {
1891 //device has already unplugged, or is half-unplugged, so don't allow this action to complete
1892 spin_unlock(&cp_lkm_usb_mgr.lock);
1893 //up(&cp_lkm_usb_mgr.thread_sem);
1894 return 0;
1895 }
1896 cpdev->edi->usb_send_ctx = NULL; //disconnect from network side so he won't send me any more data
1897 cpdev->state = CP_LKM_USB_CTRL;
1898 cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
1899 spin_unlock(&cp_lkm_usb_mgr.lock);
1900 //up(&cp_lkm_usb_mgr.thread_sem);
1901 //printk("%s() done\n", __FUNCTION__);
1902
1903 return 0;
1904}
1905
1906static bool cp_lkm_usb_unplug_do_disconnect_wait(void* ctx1, void* ctx2)
1907{
1908 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1909 bool done = false;
1910 if (cpbdev->disconnect_wait == false){
1911 done = true;
1912 }
1913 return done;
1914}
1915
1916/*
1917 * This function is called when the common modem stack wants to give up the interface.
1918 * There are two scenarios:
1919 * 1. Modem unplugs which leads to the following flow:
1920 * -> cp_lkm_usb_disconnect is called by USB sublayer, it cleans up bottom half of cpdev and waits for common modem stack unplug
1921 * -> common modem stack sees unplug event
1922 * -> it calls this function to finish the cleanup and deregister the driver
1923 * -> we are done
1924 *
1925 * 2. Common modem stack decides to give up the interface due to one common
1926 * modem driver relinquishing the modem and another common modem driver grabbing it.
1927 * This leads to the following flow:
1928 * -> Common modem stack calls this function.
1929 * -> it calls usb_deregister() which will call cp_lkm_usb_disconnect in context
1930 * -> cp_lkm_usb_disconnect shuts down and frees the usb interface
1931 * -> After usb_deregister() exits we finish and exit.
1932 *
1933 * Notes: This means the two shutdown functions, this one and cp_lkm_usb_disconnect can be
1934 * run in any order, so they must not stomp on each other. For example since
1935 * cp_lkm_usb_disconnect frees the interface with the kernel, this function better
1936 * not do anything that requires the interface after calling usb_deregister()
1937 *
1938 * The modem stack is single threaded so this function can never be reentrant
1939 */
1940static int cp_lkm_usb_unplug_intf(struct cp_lkm_usb_unplug_intf* ui)
1941{
1942 //find dev in list by unique id
1943 struct cp_lkm_usb_dev* cpdev;
1944 struct cp_lkm_usb_base_dev* cpbdev;
1945 bool shuter_down = true;
1946 struct list_head *pos;
1947
1948 //printk("%s() start id: %d\n", __FUNCTION__, ui->unique_id);
1949 spin_lock(&cp_lkm_usb_mgr.lock);
1950 //The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
1951 cpdev = cp_lkm_usb_find_dev(ui->unique_id);
1952 if(!cpdev) {
1953 spin_unlock(&cp_lkm_usb_mgr.lock);
1954 return -1;
1955 }
1956 cpbdev = cpdev->cpbdev;
1957
1958 cp_lkm_usb_mark_as_dead(cpdev);
1959
1960 list_for_each(pos, &cpbdev->cpdev_list){
1961 struct cp_lkm_usb_dev* tmp_cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
1962 if(tmp_cpdev->state != CP_LKM_USB_DEAD) {
1963 //don't shut down until all clone devices have unplugged
1964 shuter_down = false;
1965 break;
1966 }
1967 }
1968
1969 //free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
1970 //which will try and grab the semaphore, so we would be deadlocked
1971 spin_unlock(&cp_lkm_usb_mgr.lock);
1972
1973 if (shuter_down) {
1974 LOG("Wait for cpdev to finish unplugging");
1975 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_unplug_do_disconnect_wait,CP_LKM_TIMEOUT_MS,CP_LKM_ITER,"cpdev failed to finish disconnecting");
1976
1977 //printk("%s() usb_deregister\n",__FUNCTION__);
1978 usb_deregister(cpbdev->usb_driver);
1979
1980 /* clean up */
1981 memref_deref(cpbdev);
1982
1983 }
1984 /* IMPORTANT: don't do anything other than deref after call to deregister*/
1985
1986 LOG("cpdev done unplugging");
1987
1988 return 0;
1989}
1990
1991/*
1992 * Handle endpoint action requests from modem stack.
1993 *
1994 * Important things to know:
1995 * In normal mode:
1996 * 1. There will be 1 cpdev per cpbdev, and 1 ep per bep.
1997 * 2. Every different ep can either be listened on or recv'd on, but never both at the same time
1998 *
1999 * In clone mode:
2000 * 1. There will be n cpdevs per cpbdev, and n eps ber bep (depending on number of clones).
2001 * 2. Every different ep can either be listened on or recv'd on, but never both at the same time.
2002 * 3. All cloned data eps can be listened on at the same time (data header allows us to mux data between all the data eps, data endpoints don't use recv).
2003 * 4. With all other cloned eps of the same type (AT, CNS, QMI), only one clone can be listened on or recv'd on at a time.
2004 * This is because there are not headers on these channels to let us know where to mux the data to. Fortunately, the
2005 * modem stack enforces this, so we don't have to enforce it here, but we can use it to know how to route cloned packets
2006 * coming in on non-data channel endpoints
2007*/
2008static int cp_lkm_usb_ep_action(struct cp_lkm_usb_ep_action* ea)
2009{
2010 struct cp_lkm_ep* ep;
2011 struct cp_lkm_base_ep* bep = NULL;
2012 struct cp_lkm_usb_dev* cpdev;
2013 struct cp_lkm_usb_base_dev* cpbdev;
2014 //unsigned long flags;
2015 int pump_recv = 0;
2016
2017 //printk("%s() - action: %d, ep_num: 0x%x, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, ea->unique_id);
2018
2019 spin_lock(&cp_lkm_usb_mgr.lock);
2020 //There should always be a device, and it should always be plugged
2021 cpdev = cp_lkm_usb_find_dev(ea->unique_id);
2022 if(!cpdev) {
2023 spin_unlock(&cp_lkm_usb_mgr.lock);
2024 //printk("%s() no device found for unique id: %d\n", __FUNCTION__, ea->unique_id);
2025 return -1;
2026 }
2027
2028 cpbdev = cpdev->cpbdev;
2029 if(cpbdev->base_state == CP_LKM_USB_INIT) {
2030 spin_unlock(&cp_lkm_usb_mgr.lock);
2031 //printk("%s() no probe yet, unique_id: %d, action: %d\n", __FUNCTION__,ea->unique_id,ea->action);
2032 return -1;
2033 }
2034 if(cpbdev->base_state == CP_LKM_USB_DEAD) {
2035 // The device can unplug down here before cpusb knows about it so it can continue to send us stuff.
2036 // The modem will unplug soon so just act like we did it and return ok. I didn't want to
2037 // return an error because that might cause cpusb unnecessary heartburn.
2038 spin_unlock(&cp_lkm_usb_mgr.lock);
2039 //printk("%s() cpdev already dead, shouldn't be doing this: id: %d, action: %d cpbdev: %p, cpdev: %p\n", __FUNCTION__,ea->unique_id,ea->action,cpbdev,cpdev);
2040 return 0;
2041 }
2042 DEBUG_ASSERT(cpbdev, "cpbdev is null");
2043 //create the ep if it doesn't already exist
2044 if(ea->action == EP_ACTION_CREATE) {
2045 cp_lkm_usb_create_ep(cpdev, ea->ep_num);
2046 }
2047
2048 if (ea->action == EP_ACTION_FLUSH_CONTROL) {
2049 ep = NULL;
2050 } else {
2051 ep = cp_lkm_usb_get_ep(cpdev, ea->ep_num);
2052 if(!ep) {
2053 spin_unlock(&cp_lkm_usb_mgr.lock);
2054 //printk("%s() failed to find ep: 0x%x for action: %d\n", __FUNCTION__, ea->ep_num, ea->action);
2055 return -1;
2056 }
2057 bep = ep->bep;
2058 DEBUG_ASSERT(bep,"base ep is null");
2059 }
2060
2061
2062 //if (ep && ea->action != EP_ACTION_RECV) {
2063 // printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
2064 //}
2065
2066 //printk("ea->action: %d, ep_num: %d\n", ea->action, ea->ep_num);
2067 switch(ea->action) {
2068 case EP_ACTION_CREATE:
2069 //printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
2070 //initialize endpoint fields
2071 bep->type = ea->ep_type;
2072 bep->max_transfer_size = ea->max_transfer_size;
2073 bep->interval = ea->interval;
2074
2075 DEBUG_ASSERT(cpbdev->udev,"udev is null");
2076 if(bep->ep_num & USB_DIR_IN) { //in
2077 if(bep->type == UE_BULK) {
2078 bep->pipe = usb_rcvbulkpipe(cpbdev->udev,bep->ep_num);
2079 }
2080 else{ //interrupt
2081 bep->pipe = usb_rcvintpipe(cpbdev->udev, bep->ep_num);
2082 }
2083 }
2084 else{ //out
2085 if(bep->type == UE_BULK) {
2086 bep->pipe = usb_sndbulkpipe(cpbdev->udev,bep->ep_num);
2087 }
2088 else{ //interrupt
2089 bep->pipe = usb_sndintpipe(cpbdev->udev, bep->ep_num);
2090 }
2091 }
2092 DEBUG_TRACE("%s() create action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2093 break;
2094
2095 case EP_ACTION_LISTEN:
2096 DEBUG_TRACE("%s() listen action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2097 ep->con_flags |= CP_LKM_USB_LISTEN;
2098 //listen on any endpoint starts listen on base
2099 bep->con_flags |= CP_LKM_USB_LISTEN;
2100 pump_recv = 1;
2101 break;
2102
2103 case EP_ACTION_LISTEN_STOP:
2104 {
2105 bool listen_done = true;
2106 struct list_head *entry, *nxt;
2107 struct cp_lkm_ep *tmp_ep;
2108
2109 DEBUG_TRACE("%s() listen stop action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2110
2111 // the ep is done listening
2112 ep->con_flags &= ~CP_LKM_USB_LISTEN;
2113
2114 //now see if all eps on this bep are done listening
2115 list_for_each_safe(entry, nxt, &bep->eps) {
2116 tmp_ep = list_entry(entry, struct cp_lkm_ep, list_bep);
2117 if(tmp_ep->con_flags & CP_LKM_USB_LISTEN) {
2118 //if any of the eps on the bep still listening, then still listen on the bep
2119 listen_done = false;
2120 break;
2121 }
2122 }
2123 if(listen_done) {
2124 bep->con_flags &= ~CP_LKM_USB_LISTEN;
2125 //If RX_HALT bit set then there is an error on this endpoint and the kevent will be scheduled to fix the error. As part of the fix
2126 //he will unlink the urbs. Bad things can happen if we call cp_lkm_usb_unlink_urbs here at same time the kevent handler is calling it
2127 if(!test_bit (EVENT_RX_HALT, &bep->err_flags)){
2128 //TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
2129 cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->in_q, bep);
2130 }
2131 }
2132 }
2133 break;
2134
2135 case EP_ACTION_RECV:
2136 DEBUG_TRACE("%s() recv action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2137 // can only have one pending recv on a given ep
2138 ep->con_flags |= CP_LKM_USB_RECV;
2139 bep->con_flags |= CP_LKM_USB_RECV;
2140 pump_recv = 1;
2141 break;
2142
2143 case EP_ACTION_FLUSH_CONTROL:
2144 //printk("%s() flush control action:%d\n", __FUNCTION__, ea->action);
2145 //TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
2146 //We don't schedule kevents to clear endpoint halts since they are self recovering so we don't need to test the halt bits on the ctrl channel
2147 cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->ctrlq, NULL);
2148 break;
2149
2150 case EP_ACTION_SET_MAX_TX_SIZE:
2151 //printk("%s() set max tx size to %d on ep: 0x%x\n",__FUNCTION__,ea->max_transfer_size, ea->ep_num);
2152 bep->max_transfer_size = ea->max_transfer_size;
2153 break;
2154
2155 default:
2156 break;
2157 }
2158
2159
2160 if(pump_recv) {
2161 cp_lkm_schedule_rx_restock(cpbdev, bep);
2162 }
2163
2164 spin_unlock(&cp_lkm_usb_mgr.lock);
2165
2166 return 0;
2167}
2168
2169static bool cp_lkm_usb_do_pm_link(void* ctx1, void* ctx2)
2170{
2171 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)ctx1;
2172 struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
2173 struct cp_lkm_usb_pm_link* upl = (struct cp_lkm_usb_pm_link*)ctx2;
2174 unsigned long flags;
2175 bool done = false;
2176 int rc;
2177
2178 //printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
2179
2180 // We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
2181 // function pointers that are used by the data processing state machine and by the code that schedules the data
2182 // processing machine.
2183 //
2184 // We need to shut both of those down before doing the linking.
2185 // 1: We shut the machine down by setting the state to USB_PROCESS_STATE_PAUSED.
2186 // 2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
2187 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2188 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2189 cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //pauses the data processing soft irq handler
2190
2191 spin_lock(&cpbdev->data_q_lock);
2192 cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT; //stops the hw irq handlers from trying to schedule the soft irq handler
2193 spin_unlock(&cpbdev->data_q_lock);
2194
2195 if(upl->link) {
2196 cpdev->edi->usb_send_ctx = cpdev;
2197 }
2198
2199 //release lock while calling pm since we don't know how long they may take. We have already set the processing_state to
2200 //paused so the soft interrupt routines won't try to do anything so we are safe.
2201 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2202
2203 rc = cp_lkm_pm_usb_link(cpdev->edi, upl->pm_unique_id, upl->link);
2204 DEBUG_ASSERT(rc == 0, "Failed to link usb and pm");
2205
2206 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2207 if(upl->link) {
2208 if (cpdev->edi->pm_get_hdr_size && cpdev->edi->pm_recv_ctx) {
2209 cpdev->edi->pm_get_hdr_size(cpdev->edi->pm_recv_ctx, cpbdev->wrapper_hdr_size, &cpbdev->pm_hdr_size, &cpbdev->pm_hdr_offset);
2210 }
2211 }
2212 else{
2213 cpdev->edi->usb_send_ctx = NULL;
2214 }
2215
2216 cpdev->pm_id = upl->pm_unique_id;
2217
2218 spin_lock(&cpbdev->data_q_lock);
2219 //set things back up properly before re-enabling the soft irq and hardware handlers
2220 cpbdev->data_q_len = cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen; //this must be set before calling schedule_data_process
2221 spin_unlock(&cpbdev->data_q_lock);
2222
2223 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
2224 done = true;
2225 }
2226 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2227
2228 return done;
2229}
2230
2231static int cp_lkm_usb_pm_link(struct cp_lkm_usb_pm_link* upl)
2232{
2233 struct cp_lkm_usb_dev* cpdev;
2234 struct cp_lkm_usb_base_dev* cpbdev;
2235
2236 spin_lock(&cp_lkm_usb_mgr.lock);
2237 //There should always be a device, and it should always be plugged
2238 cpdev = cp_lkm_usb_find_dev(upl->usb_unique_id);
2239
2240 //printk("%s() cpdev: %p, u-uid: %d, pm-uid: %d, up: %d\n", __FUNCTION__, cpdev, upl->usb_unique_id, upl->pm_unique_id, upl->link);
2241
2242 if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_INIT) {
2243 spin_unlock(&cp_lkm_usb_mgr.lock);
2244 //printk("%s() no device or no probe yet\n", __FUNCTION__);
2245 return -1;
2246 }
2247 cpbdev = cpdev->cpbdev;
2248 // The device can unplug down here before cpusb knows about it so it can continue to send us stuff.
2249 // The modem will unplug soon so just act like we did it and return ok. I didn't want to
2250 // return an error because that might cause cpusb unnecessary heartburn.
2251 if(cpbdev->base_state == CP_LKM_USB_DEAD) {
2252 spin_unlock(&cp_lkm_usb_mgr.lock);
2253 //printk("%s() device already unplugged\n", __FUNCTION__);
2254 return 0;
2255 }
2256
2257 //printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
2258 // We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
2259 // function pointers that are used by the data processing state machine and by the code that schedules the data
2260 // processing machine.
2261 //
2262 // We need to shut both of those down before doing the linking.
2263 // 1: We shut the machine down by setting the state to USB_processing_state_PAUSED.
2264 // 2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
2265 cp_lkm_do_or_die(cpdev, upl, cp_lkm_usb_do_pm_link, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "cpdev failed to link with pm");
2266
2267 //printk("%s() done\n", __FUNCTION__);
2268 spin_unlock(&cp_lkm_usb_mgr.lock);
2269 //force a resume
2270 cp_lkm_schedule_data_process(cpbdev, false, true, false);
2271 return 0;
2272}
2273
2274static int cp_lkm_usb_is_alive_intf(struct cp_lkm_usb_is_alive_intf *alivei)
2275{
2276 //find dev in list by unique id
2277 struct cp_lkm_usb_dev *cpdev;
2278 int alive;
2279
2280 //printk("%s() start\n", __FUNCTION__);
2281 spin_lock(&cp_lkm_usb_mgr.lock);
2282 //The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
2283 cpdev = cp_lkm_usb_find_dev(alivei->unique_id);
2284
2285 if(!cpdev) {
2286 spin_unlock(&cp_lkm_usb_mgr.lock);
2287 return -1;
2288 }
2289
2290 alive = (cpdev->state == CP_LKM_USB_DEAD) ? -1 : 0;
2291 //free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
2292 //which will try and grab the semaphore, so we would be deadlocked
2293 spin_unlock(&cp_lkm_usb_mgr.lock);
2294
2295 return alive;
2296}
2297static bool cp_lkm_usb_is_attached(struct cp_lkm_usb_dev* cpdev)
2298{
2299 return (cpdev->state == CP_LKM_USB_ACTIVE || cpdev->state == CP_LKM_USB_CTRL);
2300}
2301
2302static bool cp_lkm_usb_is_base_attached(struct cp_lkm_usb_base_dev* cpbdev)
2303{
2304 //base has three possible states: INIT, CTRL, DEAD (it never goes to ACTIVE, only the cpdev's do that)
2305 return cpbdev->base_state == CP_LKM_USB_CTRL;
2306}
2307
2308
2309//
2310// Input:
2311// if_data: set to true if caller only wants to schedule if there is data pending
2312// is_reschedule: set to true if the caller is the scheduled handler to see if it should be rescheduled
2313// have_lock: true if the caller already has the lock
2314//
2315// returns:
2316// true if scheduled new processing
2317// false if didn't schedule.
2318//
2319// Note: returns false if it was currently scheduled
2320static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_reschedule, bool have_lock)
2321{
2322 unsigned long flags;
2323 bool res = false;
2324
2325 if (!have_lock) {
2326 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2327 }
2328
2329 //never schedule processing when we are paused
2330 if (cpbdev->data_q_len == CP_LKM_USB_PAUSED_CNT) {
2331 goto schedule_done;
2332 }
2333
2334 if (is_reschedule) {
2335 cpbdev->scheduled = false;
2336 }
2337
2338 if (cpbdev->scheduled == true) {
2339 goto schedule_done;
2340 }
2341
2342 if (if_data) {
2343 if(!cp_lkm_usb_have_data(cpbdev)){
2344 goto schedule_done;
2345 }
2346 }
2347
2348 cpbdev->scheduled = true;
2349 res = true;
2350
2351 //cpdev->dbg_total_tasklet_sched++;
2352 tasklet_schedule(&cpbdev->data_process_tasklet);
2353
2354schedule_done:
2355 if (!have_lock) {
2356 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2357 }
2358 return res;
2359}
2360
2361static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep)
2362{
2363 if(bep == NULL) {
2364 cp_lkm_schedule_data_process(cpbdev,false,false,false);
2365 tasklet_schedule(&cpbdev->other_process_tasklet);
2366 }
2367 else if(bep->ep_num == cpbdev->data_in_bep_num) {
2368 //printk("start data ep listen\n");
2369 cp_lkm_schedule_data_process(cpbdev,false,false,false);
2370 }
2371 else{
2372 tasklet_schedule(&cpbdev->other_process_tasklet);
2373 }
2374}
2375
2376#define DATA_SRC_TX 0
2377#define DATA_SRC_RX 1
2378#define DATA_SRC_OTHER 2
2379static void cp_lkm_usb_done_and_defer_data(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb, int src)
2380{
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2384 if(src == DATA_SRC_TX) {
2385 __skb_queue_tail(&cpbdev->data_tx_done, skb);
2386 }
2387 else{
2388 __skb_queue_tail(&cpbdev->data_rx_done, skb);
2389 }
2390 if(cpbdev->data_q_len != CP_LKM_USB_PAUSED_CNT) {
2391 cpbdev->data_q_len++;
2392 cp_lkm_schedule_data_process(cpbdev,true,false,true);
2393 }
2394 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2395
2396}
2397
2398//for non data endpoint pkts
2399static void cp_lkm_usb_done_and_defer_other(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb)
2400{
2401 unsigned long flags;
2402
2403 spin_lock_irqsave(&cpbdev->other_done.lock, flags);
2404 __skb_queue_tail(&cpbdev->other_done, skb);
2405 //only rearm the softirq if the list was empty
2406 if(cpbdev->other_done.qlen == 1) {
2407 tasklet_schedule(&cpbdev->other_process_tasklet);
2408 }
2409 spin_unlock_irqrestore(&cpbdev->other_done.lock, flags);
2410}
2411
2412static void cp_lkm_usb_process_other_done_tasklet (unsigned long param)
2413{
2414 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2415 struct sk_buff *skb;
2416 struct skb_data *entry;
2417 bool timed_out = false;
2418 unsigned long time_limit = jiffies + 2;
2419 bool can_restock = true;
2420 unsigned long flags;
2421
2422 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
2423 if(cpbdev->other_state != USB_PROCESS_STATE_IDLE){
2424 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2425 return;
2426 }
2427 cpbdev->other_state = USB_PROCESS_STATE_ACTIVE;
2428 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2429
2430 if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
2431 //printk("%s(), cpbdev %p delaying or no longer attached, base_state: %d\n", __FUNCTION__,cpbdev,cpbdev->base_state);
2432 can_restock = false;
2433 }
2434 //cpdev->dbg_total_o_done++;
2435
2436 while(!timed_out) {
2437 skb = skb_dequeue(&cpbdev->other_done);
2438 if(skb == NULL) {
2439 break;
2440 }
2441 entry = (struct skb_data *) skb->cb;
2442
2443 //printk("%s(), other data cpbdev: %p, bep: %p, num: 0x%x\n",__FUNCTION__,cpbdev,entry->bep,(entry->bep?entry->bep->ep_num:0));
2444
2445 //cp_lkm_usb_cnts(entry->state,-1);
2446 switch (entry->state) {
2447 case in_other_done:
2448 if(entry->urb) {
2449 //cp_lkm_usb_urb_cnt(-1);
2450 usb_free_urb (entry->urb);
2451 }
2452 cp_lkm_usb_other_recv_process(cpbdev, skb);
2453 break;
2454 case ctrl_done:
2455 if(entry->urb) {
2456 //cp_lkm_usb_urb_cnt(-1);
2457 usb_free_urb (entry->urb);
2458 }
2459 cp_lkm_usb_ctrl_process(cpbdev, skb);
2460 break;
2461 case out_done:
2462 case in_other_cleanup:
2463 if(entry->urb) {
2464 //cp_lkm_usb_urb_cnt(-1);
2465 usb_free_urb (entry->urb);
2466 }
2467 dev_kfree_skb_any(skb);
2468 break;
2469 case unlink_start:
2470 default:
2471 //printk("!!other: unknown skb state: %d\n",entry->state);
2472 break;
2473 }
2474
2475 if(time_after_eq(jiffies, time_limit)) {
2476 //ran out of time, process this one and then bail
2477 timed_out = true;
2478 }
2479 }
2480
2481 if(can_restock) {
2482 cp_lkm_usb_rx_other_restock(cpbdev);
2483 }
2484
2485 if(timed_out) {
2486 tasklet_schedule(&cpbdev->other_process_tasklet);
2487 }
2488
2489 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
2490 cpbdev->other_state = USB_PROCESS_STATE_IDLE;
2491 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2492
2493 return ;
2494}
2495
2496// Timer callback. This runs in soft interrupt context.
2497//
2498// The call to restock can blow chow (actually when it calls cp_lkm_schedule_data_process)
2499// if an unlink or unplug happens while we are still in the call.
2500//
2501// Unlink or plug can happen during this call on multi core platforms with kernel preemption enabled.
2502// This timer is scheduled if we ran into some unexpected USB error and want
2503// to give the USB endpoint some time before trying to reschedule recv urbs on it.
2504//
2505// The whole purpose of this function is to pump the system if it is otherwise idle. If
2506// it isn't idle, we can count on those processes to call cp_lkm_schedule_rx_restock when done.
2507static void cp_lkm_usb_delay_timer (unsigned long param)
2508{
2509 unsigned long flags;
2510
2511 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2512 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2513 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2514 cp_lkm_schedule_rx_restock(cpbdev,NULL);
2515 }
2516 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2517}
2518
2519#if 0
2520static void cp_lkm_usb_dbg_memleak_timer (unsigned long param)
2521{
2522 printk("+=+=+=+=+=!!!!mem: %d, urb: %d, skb: data: %d, other: %d, xmit: %d, ctrl: %d, unplug:%d, stck_cnt: %d, stck_chk: %d, unlink: %d\n",g_dbg_memalloc_cnt,g_dbg_urballoc_cnt,g_dbg_data_skballoc_cnt,g_dbg_other_skballoc_cnt,g_dbg_xmit_skballoc_cnt,g_dbg_ctrl_skballoc_cnt,g_dbg_unplug_cnt,g_stuck_cnt,g_stuck_chk,g_unlink_cnt);
2523 mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(5000));
2524}
2525#endif
2526
2527
2528/*
2529 * We pause the transmit if there are too many urbs down at the usb layer.
2530 * The Broadcom processor's USB block sometimes gets stuck meaning we will never
2531 * unpause. This function is used to detect if we are paused because of a stuck and
2532 * try to recover it.
2533*/
2534static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action)
2535{
2536 //only broadcom has the stuck problem
2537 if (cp_lkm_is_broadcom == 0) {
2538 //printk("Not BRCM!!!!\n");
2539 return;
2540 }
2541
2542 //TODO: it seems like this might work fine with clones. I don't think it hurts to be inited,
2543 // started or stopped multiple times??
2544 //g_stuck_chk++;
2545 switch(action) {
2546 case CP_LKM_STUCK_INIT:
2547 cpbdev->usb_pause_stuck_timer.function = cp_lkm_usb_pause_stuck_timer;
2548 cpbdev->usb_pause_stuck_timer.data = (unsigned long)cpbdev;
2549 init_timer(&cpbdev->usb_pause_stuck_timer);
2550 break;
2551 case CP_LKM_STUCK_START:
2552 mod_timer(&cpbdev->usb_pause_stuck_timer, jiffies + msecs_to_jiffies(3000));
2553 cpbdev->tx_proc_cnt_at_pause = cpbdev->tx_proc_cnt;
2554 break;
2555 case CP_LKM_STUCK_STOP:
2556 case CP_LKM_STUCK_DEINIT:
2557 del_timer_sync(&cpbdev->usb_pause_stuck_timer);
2558 break;
2559 }
2560}
2561
2562// Broadcom has a problem in the EHCI controller where if it gets a NAK on an out packet
2563// it occassionally doesn't update the status of the URB and retry it. This results in the endpoint getting stuck.
2564// If we detect that it is stuck (if the tx has been paused for more than 3 seconds) then we cancel the
2565// struck urb and this gets things going again. The cancelled urb results in a dropped packet which is undesirable,
2566// but preferrable to being stuck.
2567static void cp_lkm_usb_pause_stuck_timer (unsigned long param)
2568{
2569 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2570 struct skb_data *entry;
2571 struct sk_buff *skb;
2572 struct urb *urb = NULL;
2573 unsigned long flags;
2574
2575 spin_lock_irqsave(&cpbdev->out_q.lock, flags);
2576 if (cpbdev->tx_paused) {
2577 // cancel stuck urb?
2578 skb = skb_peek(&cpbdev->out_q);
2579 if (skb) {
2580 entry = (struct skb_data *) skb->cb;
2581 if (entry) {
2582 if(cpbdev->tx_proc_cnt_at_pause == cpbdev->tx_proc_cnt){
2583 //printk("\n!!!!!!Canceling stuck URB, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n", cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
2584 urb = entry->urb;
2585 usb_get_urb(urb);
2586 }
2587 //else{
2588 //some pkts were transmitted successfully while waiting, though not enough to unpause us.
2589 //this means the tx is not stuck, so don't need to cancel anything
2590 //printk("\n!!!!!!Restarting stuck URB timer, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n",cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
2591 //}
2592 // restart just in case this doesn't unpause tx
2593 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
2594 //g_stuck_cnt++;
2595 }
2596 }
2597 }
2598 spin_unlock_irqrestore(&cpbdev->out_q.lock, flags);
2599 if (urb) {
2600 //printk("\n!!!!!!Canceling stuck URB!!!!!!!!!!\n");
2601 //cpbdev->dbg_total_stuck_cnt++;
2602 usb_unlink_urb (urb);
2603 usb_put_urb(urb);
2604 }
2605}
2606
2607#if 0
2608static void cp_lkm_usb_dbg_timer (unsigned long param)
2609{
2610 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
2611 struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
2612 printk("!!!!cpdev: %p, clone: %d, id: 0x%x, q_cnt: %d, p: %d, stuck_cnt: %d, tx done: %d, ip_copies: %d!!!!!!!\n",cpdev, cpdev->clone_num,cpdev->mux_id,cpbdev->tx_usb_q_count,cpbdev->tx_paused, cpbdev->dbg_total_stuck_cnt, cpbdev->tx_proc_cnt,num_ip_copies);
2613
2614 //printk("!!!!Stuck urb count: %d, total_pause: %d, cpdev: %p, is_brcm: %d!!!!!!!\n",cpdev->dbg_total_stuck_cnt,cpdev->dbg_total_pause,cpdev,cp_lkm_is_broadcom);
2615 //printk("!!!!!!!!!!!\n");
2616 #if 0
2617 int txa;
2618 int rxa;
2619 int drql;
2620 int dtql;
2621 //int ab;
2622 int tx,rx;
2623 int pkt_avg;
2624 //int epqc, in_q;
2625
2626 cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
2627 cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
2628
2629 //ab = cpdev->dbg_total_budget/(cpdev->dbg_total_d_done+1);
2630 txa = cpdev->dbg_total_tx_proc/(cpdev->dbg_total_d_done+1);
2631 rxa = cpdev->dbg_total_rx_proc/(cpdev->dbg_total_d_done+1);
2632 drql = cpdev->dbg_total_rx_qlen/(cpdev->dbg_total_d_done+1);
2633 dtql = cpdev->dbg_total_tx_qlen/(cpdev->dbg_total_d_done+1);
2634 //epqc = cpdev->in_eps[CP_LKM_DATA_INDEX].q_cnt;
2635 //in_q = cpdev->in_q.qlen;
2636 tx = cpdev->dbg_total_tx_irq;
2637 rx = cpdev->dbg_total_rx_irq;
2638 pkt_avg = (tx+rx)/5;
2639 printk("tot: %d, tx: %d, rx: %d, pa: %d, dones: %d, p: %d\n", tx+rx, tx, rx, pkt_avg, cpdev->dbg_total_d_done, cpdev->dbg_total_pause);
2640 printk("resch: %d, d_c: %d, sch_n: %d, sch_t: %d, sch_wq: %d, sch_sk: %d, ds: %d\n", cpdev->dbg_total_d_resched, cpdev->dbg_total_d_comp, cpdev->dbg_total_napi_sched,cpdev->dbg_total_tasklet_sched, cpdev->dbg_total_wq_sched,cpdev->dbg_total_sch_sk, cpdev->data_state);
2641 printk("txa: %d, rxa: %d, to: %d, HZ:%d \n", txa , rxa, cpdev->dbg_total_timeout, HZ);
2642 printk("nrm_t: %d, blk_t: %d, nrm: %d, blk: %d, ntmrs: %d \n", cpdev->dbg_total_num_normal_t,cpdev->dbg_total_num_hybrid_t,cpdev->dbg_total_num_normal,cpdev->dbg_total_num_hybrid, cpdev->dbg_total_num_d_timers);
2643 printk("psd: %d, tuqc: %d, schd: %d, dql: %d, rql: %d, tql: %d, toq: %d\n",cpdev->tx_paused,cpdev->tx_usb_q_count,cpdev->scheduled,cpdev->data_q_len,cpdev->data_rx_done.qlen,cpdev->data_tx_done.qlen,cpdev->out_q.qlen);
2644 printk("txirq: %d, txprc: %d\n",cpdev->dbg_total_tx_irq, cpdev->dbg_total_tx_proc);
2645
2646 //printk("ipqc: %d, in_q: %d\n", epqc, in_q);
2647 //printk("d0: %p,d1: %p,d2: %p,d3: %p,d4: %p\n", devs[0],devs[1],devs[2],devs[3],devs[4]);
2648 cpdev->dbg_total_d_done = cpdev->dbg_total_d_resched = cpdev->dbg_total_d_comp = 0;
2649 cpdev->dbg_total_pause = cpdev->dbg_total_max_work = cpdev->dbg_total_budget = 0;
2650 cpdev->dbg_total_tx_irq = cpdev->dbg_total_rx_irq = 0;
2651 cpdev->dbg_total_tx_proc = cpdev->dbg_total_rx_proc = 0;
2652 cpdev->dbg_total_rx_qlen = cpdev->dbg_total_tx_qlen = 0;
2653 cpdev->dbg_total_napi_sched=cpdev->dbg_total_tasklet_sched=cpdev->dbg_total_wq_sched=0;
2654 cpdev->dbg_total_num_normal_t=cpdev->dbg_total_num_hybrid_t=cpdev->dbg_total_num_normal=cpdev->dbg_total_num_hybrid=cpdev->dbg_total_num_d_timers = 0;
2655 #endif
2656
2657 mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(5000));
2658
2659}
2660#endif
2661
2662
2663//Caller must have the data_q_lock before calling
2664static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev)
2665{
2666 //return the amount of work to be done if it exceeds the threshold, else return 0
2667 if(cpbdev->data_rx_done.qlen >= cpbdev->rx_schedule_threshold || cpbdev->data_tx_done.qlen >= cpbdev->tx_schedule_threshold){
2668 return cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen;
2669 }
2670 return 0;
2671}
2672
2673
2674#if 1
2675static int cp_lkm_usb_process_data_done(struct cp_lkm_usb_base_dev *cpbdev, int budget)
2676{
2677 struct sk_buff *skb;
2678 struct skb_data *entry;
2679 struct cp_lkm_usb_dev* cpdev __attribute__((unused));
2680 unsigned long time_limit = jiffies + 3;
2681 int retval;
2682 int restock = 0;
2683 unsigned long flags;
2684 int rx_work_done = 0;
2685 int tx_work_done = 0;
2686 int work_done = 0;
2687 int can_restock = 1;
2688 int i;
2689 int loop;
2690 int num_proc;
2691 int actual_budget;
2692 int num_rx;
2693 int num_tx;
2694 struct sk_buff_head done_q;
2695 bool paused;
2696
2697 skb_queue_head_init (&done_q);
2698
2699 //cpdev->dbg_total_d_done++;
2700 //cpdev->dbg_total_budget += budget;
2701 //cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
2702 //cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
2703
2704 // if the delay timer is running, we aren't supposed to send any more recv urbs to the usb layer.
2705 // if the device has detached, we need to finish processing done pkts, but don't resubmit any new urbs
2706 if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
2707 //printk("%s(), cpdev delaying or no longer attached\n", __FUNCTION__);
2708 can_restock = 0;
2709 }
2710
2711 paused = cpbdev->tx_paused;
2712
2713 actual_budget = CP_LKM_USB_NAPI_MAX_WORK;
2714 for(loop=0;loop<CP_LKM_USB_PROCESS_DIVISOR;loop++) {
2715 if(time_after_eq(jiffies, time_limit)) {
2716 //ran out of time, process this one and then bail
2717 work_done = budget;
2718 //cpdev->dbg_total_timeout++;
2719 break;
2720 }
2721 //keep restocking the q until we max out the budget or timeout or runout
2722 if(rx_work_done >= actual_budget || (paused && tx_work_done >= actual_budget)) {
2723 work_done = budget;
2724 break;
2725 }
2726 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2727 num_rx = cpbdev->data_rx_done.qlen;
2728 num_tx = cpbdev->data_tx_done.qlen;
2729 num_proc = max(num_rx,num_tx);
2730 num_proc = min(num_proc,actual_budget/CP_LKM_USB_PROCESS_DIVISOR); //grab 1/divisor of remaining budget each time
2731 // Note: A unit of work for the shim is either a lone tx, a lone rx or a combo of a rx and a tx.
2732 // Here we calculate how much work to do on this poll. If there was work left over from last time
2733 // finish processing it.
2734 for(i = 0; i < num_proc; i++) {
2735 skb = __skb_dequeue (&cpbdev->data_rx_done);
2736 if(skb){
2737 cpbdev->data_q_len--;
2738 __skb_queue_tail(&done_q, skb);
2739 }
2740 skb = __skb_dequeue (&cpbdev->data_tx_done);
2741 if(skb){
2742 cpbdev->data_q_len--;
2743 __skb_queue_tail(&done_q, skb);
2744 }
2745 }
2746 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2747
2748 //nothing in the q, we are done
2749 if(done_q.qlen == 0) {
2750 break;
2751 }
2752
2753 while((skb = __skb_dequeue(&done_q))){
2754 entry = (struct skb_data *) skb->cb;
2755 //cp_lkm_usb_cnts(entry->state,-1);
2756 switch (entry->state) {
2757 case in_data_done:
2758 //cpdev->dbg_total_rx_proc++;
2759 entry->bep->q_cnt--;
2760 restock++;
2761 rx_work_done++;
2762 work_done++;
2763 if(can_restock && restock == CP_LKM_USB_RESTOCK_MULTIPLE) {
2764 restock = 0;
2765
2766 retval = cp_lkm_usb_submit_recv (cpbdev, entry->urb, GFP_ATOMIC, entry->bep, true);
2767 if (retval < 0) {
2768 //printk("%s(), can't resubmit\n", __FUNCTION__);
2769 //cp_lkm_usb_urb_cnt(-1);
2770 usb_free_urb (entry->urb);
2771 can_restock = 0;
2772 }
2773 }
2774 else{
2775 //cp_lkm_usb_urb_cnt(-1);
2776 usb_free_urb (entry->urb);
2777 }
2778 cp_lkm_usb_data_recv_process(cpbdev, skb);
2779 break;
2780 case out_done:
2781 work_done++;
2782 tx_work_done++;
2783 //fall through on purpose
2784 case in_data_cleanup:
2785 if(entry->urb) {
2786 //cp_lkm_usb_urb_cnt(-1);
2787 usb_free_urb (entry->urb);
2788 }
2789 dev_kfree_skb_any(skb);
2790 break;
2791
2792 case unlink_start:
2793 default:
2794 //printk("!!data: unknown skb state: %d\n",entry->state);
2795 break;
2796 }
2797 }
2798 }
2799
2800 //restock recv urbs to usb layer if we processed any
2801 if(can_restock) {
2802 cp_lkm_usb_rx_data_restock(cpbdev);
2803 }
2804
2805 //see if we need to resume the tx side
2806 if(tx_work_done) {
2807 spin_lock_irqsave (&cpbdev->out_q.lock, flags);
2808 cpbdev->tx_proc_cnt += tx_work_done;
2809
2810 if(tx_work_done > cpbdev->tx_usb_q_count) {
2811 cpbdev->tx_usb_q_count = 0;
2812 }
2813 else{
2814 cpbdev->tx_usb_q_count -= tx_work_done;
2815 }
2816 if(cpbdev->tx_usb_q_count <= cpbdev->tx_resume_threshold) {
2817 if(cpbdev->tx_paused){
2818 //unpause all cpdevs
2819 cp_lkm_usb_dev_pause(cpbdev, false);
2820 // cancel usb_pause_stuck_timer
2821 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_STOP);
2822 }
2823
2824 }
2825 spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
2826 }
2827
2828 //if(work_done > cpdev->dbg_total_max_work){
2829 // cpdev->dbg_total_max_work = work_done;
2830 //}
2831
2832 //can't return greater than the passed in budget
2833 if(work_done > budget) {
2834 work_done = budget;
2835 }
2836
2837 return work_done;
2838 //return 1;
2839}
2840#endif
2841
2842static int cp_lkm_usb_common_process_data_done(struct cp_lkm_usb_base_dev* cpbdev, int budget)
2843{
2844 unsigned long flags;
2845 int work_done = -1;
2846 bool rescheduled;
2847 bool ran_data_done = false;
2848 if(NULL == cpbdev) {
2849 //printk("%s() !!!!!!!!!!!!!!!!no ctxt\n", __FUNCTION__);
2850 return work_done;
2851 }
2852
2853 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2854 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2855 cpbdev->processing_state = USB_PROCESS_STATE_ACTIVE;
2856 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2857 work_done = cp_lkm_usb_process_data_done(cpbdev, budget);
2858 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2859 ran_data_done = true;
2860 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
2861 }
2862 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2863 if (ran_data_done) {
2864 rescheduled = cp_lkm_schedule_data_process(cpbdev,true,true,false);
2865 if (rescheduled) {
2866 work_done = budget;
2867 //cpdev->dbg_total_d_resched++;
2868 }
2869 else if(work_done){
2870 work_done--;
2871 //cpdev->dbg_total_d_comp++;
2872 }
2873 }
2874 else{
2875 //cpdev->dbg_total_sch_sk++;
2876 }
2877 return work_done;
2878}
2879
2880
2881static void cp_lkm_usb_process_data_done_tasklet (unsigned long param)
2882{
2883 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2884
2885 cp_lkm_usb_common_process_data_done(cpbdev, CP_LKM_PM_NAPI_WEIGHT);
2886}
2887
2888
2889static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpbdev)
2890{
2891 //struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
2892 //int cur_token;
2893 struct urb *urb;
2894 //int ep_index;
2895 int q_len;
2896 struct cp_lkm_base_ep* bep;
2897 int retval;
2898 int q_cnt;
2899
2900 // timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
2901 if (timer_pending(&cpbdev->rx_delay)) {
2902 return;
2903 }
2904
2905 // restock the recv queues on any ep's that are listening
2906 bep = cp_lkm_usb_get_bep(cpbdev, cpbdev->data_in_bep_num);
2907 if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
2908 return;
2909 }
2910 if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
2911 return;
2912 }
2913
2914 if(bep->con_flags & CP_LKM_USB_RECV) {
2915 //only post 1 for recv's
2916 q_len = 1;
2917 }
2918 else{
2919 //its a listen
2920 q_len = CP_LKM_USB_MAX_RX_QLEN;
2921 }
2922
2923 // Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
2924 // there is a problem with usb, so only try up to q_len times to insert them.
2925 retval = 0;
2926 q_cnt = bep->q_cnt;
2927
2928 while(q_cnt < q_len) {
2929 urb = usb_alloc_urb (0, GFP_ATOMIC);
2930 if (!urb) {
2931 if (q_cnt == 0) {
2932 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
2933 }
2934 break;
2935 }
2936 //cp_lkm_usb_urb_cnt(1);
2937 retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, true);
2938 if (retval < 0) {
2939 //cp_lkm_usb_urb_cnt(-1);
2940 usb_free_urb (urb);
2941 break;
2942 }
2943 q_cnt++;
2944 }
2945}
2946
2947static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev)
2948{
2949 struct urb *urb;
2950 int q_len;
2951 struct cp_lkm_base_ep* bep;
2952 int retval;
2953 int q_cnt;
2954 struct list_head *entry, *nxt;
2955
2956 // timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
2957 if (timer_pending(&cpbdev->rx_delay)) {
2958 return;
2959 }
2960
2961 // restock the recv queues on any ep's that are listening
2962 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
2963 bep = list_entry(entry, struct cp_lkm_base_ep, list);
2964 if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
2965 continue;
2966 }
2967 if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
2968 continue;
2969 }
2970 if(bep->ep_num == cpbdev->data_in_bep_num) {
2971 continue;
2972 }
2973
2974 if(bep->con_flags & CP_LKM_USB_RECV) {
2975 //only post 1 for recv's
2976 q_len = 1;
2977 }
2978 else{
2979 //its a listen
2980 q_len = CP_LKM_USB_MAX_OTHER_QLEN;
2981 }
2982
2983 // Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
2984 // there is a problem with usb, so only try up to q_len times to insert them.
2985 retval = 0;
2986 q_cnt = bep->q_cnt;
2987
2988 while(q_cnt < q_len) {
2989 urb = usb_alloc_urb (0, GFP_ATOMIC);
2990 if (!urb) {
2991 if (q_cnt == 0) {
2992 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
2993 }
2994 break;
2995 }
2996 //cp_lkm_usb_urb_cnt(1);
2997 retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, false);
2998 if (retval < 0) {
2999 //cp_lkm_usb_urb_cnt(-1);
3000 usb_free_urb (urb);
3001 break;
3002 }
3003 q_cnt++;
3004 }
3005 }
3006}
3007
3008//unlink all urbs with the given ep, or all if ep is NULL
3009static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep)
3010{
3011 unsigned long flags;
3012 struct sk_buff *skb;
3013 int count = 0;
3014
3015 spin_lock_irqsave (&q->lock, flags);
3016 while (!skb_queue_empty(q)) {
3017 struct skb_data *entry;
3018 struct urb *urb;
3019 int retval;
3020
3021 skb_queue_walk(q, skb) {
3022 entry = (struct skb_data *) skb->cb;
3023 urb = entry->urb;
3024 if(urb && (entry->state != unlink_start) && (entry->bep == bep || bep == NULL)) {
3025 goto found;
3026 }
3027 }
3028 break;
3029found:
3030 entry->state = unlink_start;
3031
3032 /*
3033 * Get reference count of the URB to avoid it to be
3034 * freed during usb_unlink_urb, which may trigger
3035 * use-after-free problem inside usb_unlink_urb since
3036 * usb_unlink_urb is always racing with .complete
3037 * handler(include defer_bh).
3038 */
3039 usb_get_urb(urb);
3040 spin_unlock_irqrestore(&q->lock, flags);
3041 // during some PM-driven resume scenarios,
3042 // these (async) unlinks complete immediately
3043 //usb_kill_urb(urb);
3044 retval = usb_unlink_urb (urb);
3045 //g_unlink_cnt++;
3046 if (retval != -EINPROGRESS && retval != 0){
3047 //netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
3048 } else{
3049 count++;
3050 }
3051 usb_put_urb(urb);
3052 spin_lock_irqsave(&q->lock, flags);
3053 }
3054 spin_unlock_irqrestore (&q->lock, flags);
3055 return count;
3056}
3057
3058
3059static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work)
3060{
3061 set_bit (work, &bep->err_flags);
3062 if (!schedule_work (&cpbdev->kevent)) {
3063 //deverr (dev, "kevent %d may have been dropped", work);
3064 } else {
3065 //devdbg (dev, "kevent %d scheduled", work);
3066 }
3067}
3068
3069// Workqueue callback function. This runs in thread context
3070static void cp_lkm_usb_kevent (struct work_struct *work)
3071{
3072 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)container_of(work, struct cp_lkm_usb_base_dev, kevent);
3073 int status;
3074 struct cp_lkm_base_ep* bep;
3075 struct list_head *entry, *nxt;
3076
3077
3078 //grab global lock while testing dev state so it can't change on us.
3079 spin_lock(&cp_lkm_usb_mgr.lock);
3080 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3081 spin_unlock(&cp_lkm_usb_mgr.lock);
3082 return;
3083 }
3084
3085 //don't want to hold global lock while doing this since don't know how long this will take, see next note
3086 spin_unlock(&cp_lkm_usb_mgr.lock);
3087
3088
3089 //NOTE: if kernel preemption is enabled and the disconnect gets called right here, bad things could happen if the cpdev->udev
3090 // is released. Fortunately, cp_lkm_usb_disconnect() calls cancel_work_sync() before releasing it. This will either cancel this
3091 // function if it isn't currently running, or will wait until it exits before returning if it is running. This protects us.
3092
3093 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
3094 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3095 /* usb_clear_halt() needs a thread context */
3096 if (test_bit (EVENT_TX_HALT, &bep->err_flags)) {
3097 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, bep);
3098 status = usb_clear_halt (cpbdev->udev, bep->pipe);
3099 DEBUG_TRACE("%s() EVENT_TX_HALT status:%d", __FUNCTION__, status);
3100 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
3101 //if (netif_msg_tx_err (dev))
3102 // deverr (dev, "can't clear tx halt, status %d",
3103 DEBUG_TRACE("%s() failed EVENT_TX_HALT status:%d", __FUNCTION__, status);
3104 // status);
3105 } else {
3106 clear_bit (EVENT_TX_HALT, &bep->err_flags);
3107 //if (status != -ESHUTDOWN)
3108 // netif_wake_queue (dev->net);
3109 }
3110 }
3111 }
3112
3113 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
3114 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3115 if (test_bit (EVENT_RX_HALT, &bep->err_flags)) {
3116 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, bep);
3117 status = usb_clear_halt (cpbdev->udev, bep->pipe);
3118 DEBUG_TRACE("%s() EVENT_RX_HALT status:%d", __FUNCTION__, status);
3119 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
3120 DEBUG_TRACE("%s() failed EVENT_RX_HALT status:%d", __FUNCTION__, status);
3121 //if (netif_msg_rx_err (dev))
3122 // deverr (dev, "can't clear rx halt, status %d",
3123 // status);
3124 } else {
3125 clear_bit (EVENT_RX_HALT, &bep->err_flags);
3126 //grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
3127 spin_lock(&cp_lkm_usb_mgr.lock);
3128 if (cp_lkm_usb_is_base_attached(cpbdev)){
3129 cp_lkm_schedule_rx_restock(cpbdev,bep);
3130 }
3131 spin_unlock(&cp_lkm_usb_mgr.lock);
3132
3133 }
3134 }
3135 }
3136 /* tasklet could resubmit itself forever if memory is tight */
3137 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
3138 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3139 if (test_bit (EVENT_RX_MEMORY, &bep->err_flags)) {
3140 DEBUG_TRACE("%s() EVENT_RX_MEMORY", __FUNCTION__);
3141
3142 clear_bit (EVENT_RX_MEMORY, &bep->err_flags);
3143
3144 //grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
3145 spin_lock(&cp_lkm_usb_mgr.lock);
3146 if (cp_lkm_usb_is_base_attached(cpbdev) && bep->q_cnt == 0){
3147 cp_lkm_schedule_rx_restock(cpbdev,bep);
3148
3149 }
3150 spin_unlock(&cp_lkm_usb_mgr.lock);
3151 }
3152 }
3153 //if (test_bit (EVENT_LINK_RESET, &cpdev->flags)) {
3154 // struct driver_info *info = dev->driver_info;
3155 // int retval = 0;
3156 //
3157 // clear_bit (EVENT_LINK_RESET, &dev->flags);
3158 // if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
3159 // devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
3160 // retval,
3161 // dev->udev->bus->bus_name, dev->udev->devpath,
3162 // info->description);
3163 // }
3164 //}
3165
3166 //if (dev->flags)
3167 // devdbg (dev, "kevent done, flags = 0x%lx",
3168 // dev->flags);
3169}
3170
3171static void cp_lkm_usb_ctrl_complete(struct urb *urb)
3172{
3173 unsigned long flags;
3174 struct sk_buff *skb = (struct sk_buff *) urb->context;
3175 struct skb_data *entry = (struct skb_data *) skb->cb;
3176 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3177
3178 //remove skb from the list first thing so no other code conext looking at the
3179 //list (such as unlink_urbs) can mess with it.
3180 spin_lock_irqsave(&cpbdev->ctrlq.lock, flags);
3181 __skb_unlink(skb, &cpbdev->ctrlq);
3182 spin_unlock_irqrestore(&cpbdev->ctrlq.lock,flags);
3183
3184 skb->len = urb->actual_length;
3185
3186 //skip status and error checking if the device has unplugged
3187 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3188 urb->status = -ENODEV;
3189 goto ctrl_done;
3190 }
3191
3192 if (urb->status != 0) {
3193 switch (urb->status) {
3194 case -EPIPE:
3195 break;
3196
3197 /* software-driven interface shutdown */
3198 case -ECONNRESET: // async unlink
3199 case -ESHUTDOWN: // hardware gone
3200 break;
3201
3202 case -ENODEV:
3203 //printk("ctrl fail, no dev\n");
3204 break;
3205
3206 case -EPROTO:
3207 case -ETIME:
3208 case -EILSEQ:
3209 //CA: decided not to throttle on ctrl channel transfers since they are a different beast
3210 //if (!timer_pending (&cpdev->rx_delay)) {
3211 // mod_timer (&cpdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3212 //if (netif_msg_link (dev))
3213 // devdbg (dev, "tx throttle %d",
3214 // urb->status);
3215 //}
3216 //netif_stop_queue (dev->net);
3217 break;
3218 default:
3219 //if (netif_msg_tx_err (dev))
3220 // devdbg (dev, "tx err %d", entry->urb->status);
3221 break;
3222 }
3223 }
3224
3225ctrl_done:
3226 urb->dev = NULL;
3227 entry->state = ctrl_done;
3228 entry->status = urb->status;
3229 entry->urb = NULL;
3230 if(urb->setup_packet) {
3231 kfree(urb->setup_packet);
3232 }
3233 //cp_lkm_usb_urb_cnt(-1);
3234 usb_free_urb (urb);
3235 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3236}
3237
3238
3239static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in)
3240{
3241 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3242 struct cp_lkm_usb_base_dev* cpbdev;
3243 int retval = NET_XMIT_SUCCESS;
3244 struct urb *urb = NULL;
3245 struct skb_data *entry;
3246 unsigned long flags;
3247 int pipe;
3248 u8* tmp8;
3249 u16* tmp16;
3250 struct usb_ctrlrequest *req = NULL;
3251
3252 if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
3253 //printk("%s() no ctxt\n", __FUNCTION__);
3254 goto ctrl_done;
3255 }
3256
3257 cpbdev = cpdev->cpbdev;
3258
3259 DEBUG_TRACE("%s()", __FUNCTION__);
3260
3261 if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
3262 retval = -ENOMEM;
3263 goto ctrl_done;
3264 }
3265 //cp_lkm_usb_urb_cnt(1);
3266
3267 if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
3268 //cp_lkm_usb_urb_cnt(-1);
3269 usb_free_urb(urb);
3270 retval = -ENOMEM;
3271 goto ctrl_done;
3272 }
3273
3274 //The upper layer driver put all the ctrl stuff at the end of the buffer (in correct le order)
3275 //This layer puts it in a separate buffer
3276 tmp8 = (u8*)skb_in->data;
3277 req->bRequestType = *tmp8;
3278 skb_pull(skb_in, 1);
3279
3280 tmp8 = (u8*)skb_in->data;
3281 req->bRequest = *tmp8;
3282 skb_pull(skb_in, 1);
3283
3284 tmp16 = (u16*)skb_in->data;
3285 req->wValue = *tmp16;
3286 skb_pull(skb_in, 2);
3287
3288 tmp16 = (u16*)skb_in->data;
3289 req->wIndex = *tmp16;
3290 skb_pull(skb_in, 2);
3291
3292 tmp16 = (u16*)skb_in->data;
3293 req->wLength = *tmp16;
3294 skb_pull(skb_in, 2);
3295 //printk("%s() RT:%x, R:%x, V:%x, I:%x, L:%x\n", __FUNCTION__, req->bRequestType, req->bRequest, req->wValue, req->wIndex, req->wLength);
3296
3297 entry = (struct skb_data *) skb_in->cb;
3298 entry->urb = urb;
3299 entry->cpbdev = cpbdev;
3300 entry->state = ctrl_start;
3301 entry->status = 0;
3302 entry->bep = NULL;
3303 entry->unique_id = cpdev->unique_id;
3304
3305 if(req->bRequestType & USB_DIR_IN) {
3306 DEBUG_TRACE("%s() ctrl in len: %d", __FUNCTION__,skb_in->len);
3307 pipe = usb_rcvctrlpipe(cpbdev->udev, 0);
3308 }
3309 else{
3310 DEBUG_TRACE("%s() ctrl out len: %d", __FUNCTION__,skb_in->len);
3311 pipe = usb_sndctrlpipe(cpbdev->udev, 0);
3312 }
3313
3314 usb_fill_control_urb(urb, cpbdev->udev, pipe,
3315 (void *)req, skb_in->data, skb_in->len,
3316 cp_lkm_usb_ctrl_complete, skb_in);
3317
3318 //cp_lkm_usb_cnts(ctrl_start,1);
3319 spin_lock_irqsave (&cpbdev->ctrlq.lock, flags);
3320 retval = usb_submit_urb (urb, GFP_ATOMIC);
3321 switch (retval) {
3322 case 0:
3323 //net->trans_start = jiffies;
3324 //success: queue it
3325 __skb_queue_tail (&cpbdev->ctrlq, skb_in);
3326 skb_in = NULL;
3327 urb = NULL;
3328 req = NULL;
3329 break;
3330 case -ENODEV:
3331 break;
3332 case -EPROTO:
3333 case -EPIPE:
3334 break;
3335 default:
3336 break;
3337 }
3338 spin_unlock_irqrestore (&cpbdev->ctrlq.lock, flags);
3339
3340ctrl_done:
3341 if(req) {
3342 kfree(req);
3343 }
3344 if(urb) {
3345 //cp_lkm_usb_urb_cnt(-1);
3346 usb_free_urb(urb);
3347 }
3348 if(skb_in) {
3349 //cp_lkm_usb_cnts(ctrl_start,-1);
3350 dev_kfree_skb_any (skb_in);
3351 }
3352
3353 DEBUG_TRACE("%s() retval %d", __FUNCTION__, retval);
3354
3355 return retval;
3356}
3357
3358
3359#define THROTTLE_JIFFIES (HZ/8)
3360/*
3361 * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here.
3362*/
3363static void cp_lkm_usb_xmit_complete (struct urb *urb)
3364{
3365 unsigned long flags;
3366 struct sk_buff *skb = (struct sk_buff *) urb->context;
3367 struct skb_data *entry = (struct skb_data *) skb->cb;
3368 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3369 struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)entry->bep;
3370 bool is_data = false;
3371 struct cp_lkm_usb_dev* cpdev;
3372
3373 //remove skb from the list first thing so no other code context looking at the
3374 //list (such as unlink_urbs) can mess with it.
3375 spin_lock_irqsave(&cpbdev->out_q.lock,flags);
3376 __skb_unlink(skb, &cpbdev->out_q);
3377 spin_unlock_irqrestore(&cpbdev->out_q.lock,flags);
3378
3379 bep->q_cnt--;
3380
3381 if(bep->ep_num == cpbdev->data_out_bep_num) {
3382 is_data = true;
3383 }
3384
3385 // we save mux id of the cpdev that sent each tx pckt.
3386 cpdev = cp_lkm_usb_find_dev(entry->unique_id);
3387
3388 //skip status and error checking if the device has unplugged
3389 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3390 goto xmit_done;
3391 }
3392
3393 if (urb->status != 0) {
3394 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3395 switch (urb->status) {
3396 case -EPIPE:
3397 //don't have to clear halts on ctrl ep
3398 if (bep->ep_num != 0) {
3399 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_TX_HALT);
3400 }
3401 break;
3402
3403 /* software-driven interface shutdown */
3404 case -ECONNRESET: // async unlink
3405 case -ESHUTDOWN: // hardware gone
3406 break;
3407
3408 case -ENODEV:
3409 break;
3410
3411 // like rx, tx gets controller i/o faults during khubd delays
3412 // and so it uses the same throttling mechanism.
3413 case -EPROTO:
3414 case -ETIME:
3415 case -EILSEQ:
3416 if (!timer_pending (&cpbdev->rx_delay)) {
3417 mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3418 //if (netif_msg_link (dev))
3419 // devdbg (dev, "tx throttle %d",
3420 // urb->status);
3421 }
3422 //netif_stop_queue (dev->net);
3423 break;
3424 default:
3425 //if (netif_msg_tx_err (dev))
3426 // devdbg (dev, "tx err %d", entry->urb->status);
3427 break;
3428 }
3429 }
3430
3431xmit_done:
3432 entry->state = out_done;
3433
3434 if(is_data) {
3435 //cpdev->dbg_total_tx_irq++;
3436 cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_TX);
3437 }
3438 else{
3439 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3440 }
3441}
3442
3443static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb_in, int src, struct cp_lkm_ep* ep)
3444{
3445 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3446 struct cp_lkm_usb_base_dev* cpbdev;
3447 struct cp_lkm_base_ep* bep;
3448 int length;
3449 int retval = NET_XMIT_SUCCESS;
3450 struct urb *urb = NULL;
3451 struct skb_data *entry;
3452 unsigned long flags;
3453 struct sk_buff* skb_out = NULL;
3454 int wres;
3455
3456 if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
3457 //printk("%s() no ctxt\n", __FUNCTION__);
3458 dev_kfree_skb_any(skb_in);
3459 return -1;
3460 }
3461
3462 cpbdev = cpdev->cpbdev;
3463
3464 //the network doesn't have a pointer to the ep readily available so he passes in NULL for ep so we can
3465 //fetch the well known ep for the data out ep
3466 length = 0;
3467 if(src == CP_LKM_WRAPPER_SRC_DATA && ep == NULL){
3468 ep = cp_lkm_usb_get_ep(cpdev,cpdev->data_out_ep_num);
3469 length = skb_in->len;
3470 }
3471 bep = ep->bep;
3472
3473 while(1) {
3474 skb_out = NULL;
3475 urb = NULL;
3476 retval = NET_XMIT_SUCCESS;
3477
3478 //DEBUG_ERROR("%s() wrap it skb_in:%p", __FUNCTION__, skb_in);
3479
3480 //only use wrappers on the data endpoint
3481 if(ep->ep_num == cpdev->data_out_ep_num) {
3482 //DEBUG_ERROR("%s() wrap it", __FUNCTION__);
3483 //spin_lock_irqsave (&cp_lkm_usb_mgr.lock, flags);
3484 wres = cp_lkm_wrapper_send(cpbdev->wrapper_ctxt, src, cpdev->mux_id, skb_in, &skb_out);
3485 skb_in = NULL; //we no longer own skb so null its pointer for future call if we loop
3486 //spin_unlock_irqrestore (&cp_lkm_usb_mgr.lock, flags);
3487 if (wres == CP_LKM_WRAPPER_RES_ERROR) {
3488 DEBUG_ERROR("%s() wrapper error wres:0x%x, skb_out:%p", __FUNCTION__, wres, skb_out);
3489 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
3490 retval = -ENOMEM;
3491 goto xmit_done;
3492 }
3493 }
3494 else{
3495 //Not a data ep, send the skb and then we are done
3496 skb_out = skb_in;
3497 skb_in = NULL;
3498 wres = CP_LKM_WRAPPER_RES_DONE;
3499 }
3500
3501 //If we get here, send returned either done or again. skb_out can be NULL if there is nothing to
3502 //send, so check that first
3503 if(NULL == skb_out) {
3504// DEBUG_INFO("%s() no wrapped data", __FUNCTION__);
3505 goto xmit_done;
3506 }
3507
3508 if(cp_lkm_is_broadcom && ((uintptr_t)(skb_out->data) & 0x3)) {
3509 //broadcom unaligned packets that are multiples of 512 plus 3,4 or 5 bytes (515,516,517,1027,1028,1029,etc)
3510 //are corrupted for some reason, so need to copy into an aligned buffer
3511 int r = skb_out->len & 0x000001FF; //poor man's mod
3512 if (r >= 3 && r <= 5) {
3513 struct sk_buff* skb_new = skb_copy_expand(skb_out, 0, 0, GFP_ATOMIC);
3514 if(!skb_new) {
3515 retval = -ENOMEM;
3516 goto xmit_done;
3517 }
3518 //printk("%s() unaligned: %p, aligned: %p, len: %d, r: %d\n",__FUNCTION__,skb_out->data, skb_new->data, skb_out->len, r);
3519 dev_kfree_skb_any(skb_out);
3520 skb_out=skb_new;
3521 }
3522 }
3523
3524 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
3525 //if (netif_msg_tx_err (dev))
3526 // devdbg (dev, "no urb");
3527 DEBUG_ERROR("%s() urb alloc failed", __FUNCTION__);
3528 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
3529 retval = -ENOMEM;
3530 goto xmit_done;
3531 }
3532 //cp_lkm_usb_urb_cnt(1);
3533 entry = (struct skb_data *) skb_out->cb;
3534 entry->urb = urb;
3535 entry->cpbdev = cpbdev;
3536 entry->bep = bep;
3537 entry->state = out_start;
3538 entry->unique_id = cpdev->unique_id;
3539 //cp_lkm_usb_cnts(out_start,1);
3540
3541 if(bep->type == UE_BULK) {
3542 usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb_out->data,
3543 skb_out->len, cp_lkm_usb_xmit_complete, skb_out);
3544 }
3545 else{
3546 usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb_out->data, skb_out->len,
3547 cp_lkm_usb_xmit_complete, skb_out, bep->interval);
3548 }
3549
3550 if (!(cpbdev->feature_flags & CP_LKM_FEATURE_NO_ZERO_PACKETS)) {
3551 urb->transfer_flags |= URB_ZERO_PACKET;
3552 }
3553
3554 // DEBUG_INFO("%s()", __FUNCTION__);
3555 // DEBUG_INFO("%s() send to ep: 0x%x type:%d, pipe:0x%x", __FUNCTION__, ep->ep_num, ep->type, ep->pipe);
3556
3557 spin_lock_irqsave (&cpbdev->out_q.lock, flags);
3558 retval = usb_submit_urb (urb, GFP_ATOMIC);
3559 switch (retval) {
3560 case 0:
3561 //net->trans_start = jiffies;
3562 //success: queue it
3563 __skb_queue_tail (&cpbdev->out_q, skb_out);
3564 bep->q_cnt++;
3565 skb_out = NULL;
3566 urb = NULL;
3567 if(ep->ep_num == cpdev->data_out_ep_num) {
3568 cpbdev->tx_usb_q_count++;
3569 if(cpbdev->tx_usb_q_count >= CP_LKM_USB_TX_PAUSE_Q_PKTS){
3570 if(!cpbdev->tx_paused) {
3571 //pause all cpdevs
3572 cp_lkm_usb_dev_pause(cpbdev, true);
3573 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
3574 }
3575 }
3576 }
3577 break;
3578 case -EPIPE:
3579 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3580 //don't clear halts on ctrl ep
3581 if(ep->ep_num != 0) {
3582 cp_lkm_usb_defer_kevent(cpbdev, bep, EVENT_TX_HALT);
3583 }
3584 break;
3585 case -ENODEV:
3586 break;
3587 case -EPROTO:
3588 default:
3589 //if (netif_msg_tx_err (dev))
3590 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3591 // devdbg (dev, "tx: submit urb err %d", retval);
3592 break;
3593 }
3594 spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
3595
3596xmit_done:
3597 if (retval) {
3598 DEBUG_TRACE("%s() failed to send: %d", __FUNCTION__, retval);
3599 //cp_lkm_usb_cnts(out_start,-1);
3600 }
3601
3602 //if these are non null then they weren't sent so free them
3603 if (skb_out){
3604 dev_kfree_skb_any (skb_out);
3605 }
3606 if(urb) {
3607 //cp_lkm_usb_urb_cnt(-1);
3608 usb_free_urb (urb);
3609 }
3610
3611 //Bail out of while loop unless the wrapper asked to be called again
3612 if(wres != CP_LKM_WRAPPER_RES_AGAIN) {
3613 break;
3614 }
3615
3616 length = 0;
3617
3618 }
3619 return retval;
3620}
3621
3622static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb)
3623{
3624 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3625 struct cp_lkm_usb_base_dev* cpbdev;
3626 int res;
3627
3628 if(NULL == cpdev){
3629 DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
3630 dev_kfree_skb_any(skb);
3631 return -1;
3632 }
3633 cpbdev = cpdev->cpbdev;
3634 if(cpbdev->tx_paused || CP_LKM_USB_ACTIVE != cpdev->state) {
3635 DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
3636 dev_kfree_skb_any(skb);
3637 return -1;
3638 }
3639 res = cp_lkm_usb_start_xmit_common(ctx, skb, CP_LKM_WRAPPER_SRC_DATA, NULL);
3640 return res;
3641}
3642
3643static int cp_lkm_usb_to_cplkm_status(int usb_status)
3644{
3645 int cplkm_status;
3646 switch(usb_status) {
3647 case 0:
3648 cplkm_status = CP_LKM_STATUS_OK;
3649 break;
3650 default:
3651 //printk("usb err: %d\n", usb_status);
3652 cplkm_status = CP_LKM_STATUS_ERROR;
3653 break;
3654 }
3655 return cplkm_status;
3656}
3657
3658static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3659{
3660 struct skb_data *entry;
3661 struct cp_lkm_msg_hdr hdr;
3662 int status;
3663 struct cp_lkm_base_ep* bep;
3664 struct cp_lkm_usb_dev* cpdev = NULL;
3665 struct list_head *tmp, *nxt;
3666 struct cp_lkm_ep *ep;
3667
3668 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3669 //printk("%s(), cpbdev: %p not attached. state: %d\n",__FUNCTION__,cpbdev,cpbdev->base_state);
3670 dev_kfree_skb_any (skb_in);
3671 return;
3672 }
3673 entry = (struct skb_data *)skb_in->cb;
3674 bep = entry->bep;
3675
3676 //Note: pkts on non-data endpoints when running with clones present a problem because there are no headers on these
3677 // pkts to tell us which clone ep to send this to. Fortunately, the modem stack serializes clone instances so
3678 // only one can be accessing the non-data endpoints at a time. In order to get any responses from the module
3679 // over their endpoint, they must be either listening or have posted a recv. We use this fact to find the
3680 // ep we need to send the recv back on.
3681 list_for_each_safe(tmp, nxt, &bep->eps) {
3682 ep = list_entry(tmp, struct cp_lkm_ep, list_bep);
3683 if (ep->con_flags & (CP_LKM_USB_LISTEN | CP_LKM_USB_RECV)) {
3684 cpdev = ep->cpdev;
3685 if (ep->con_flags & CP_LKM_USB_RECV) {
3686 //can only have one recv pending on non-data endpoints for a given ep number.
3687 //therefor when the clone is done, the base is done
3688 ep->con_flags &= ~CP_LKM_USB_RECV;
3689 bep->con_flags &= ~CP_LKM_USB_RECV;
3690 }
3691 //printk("%s(), other data cpdev: %p, ep: %p, num: 0x%x, flags: 0x%x\n",__FUNCTION__,cpdev,ep, ep->ep_num,ep->con_flags);
3692 break;
3693 }
3694 }
3695
3696 if (!cpdev) {
3697 //printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
3698 dev_kfree_skb_any (skb_in);
3699 return;
3700 }
3701
3702 status = cp_lkm_usb_to_cplkm_status(entry->status);
3703 //printk("%s() other data uid: %d, ep_num:0x%x, status:%d, len: %d\n", __FUNCTION__, cpdev->unique_id,bep->ep_num, entry->status, skb_in->len);
3704
3705 memset(&hdr,0,sizeof(hdr));
3706 hdr.instance_id = cpdev->unique_id;
3707 hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
3708 hdr.status = status;
3709 hdr.len = skb_in?skb_in->len:0;
3710 hdr.arg1 = bep->ep_num;
3711 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in);
3712
3713 return;
3714}
3715
3716
3717static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3718{
3719 struct skb_data *entry;
3720 struct cp_lkm_msg_hdr hdr;
3721 int status;
3722 static struct cp_lkm_usb_dev* cpdev = NULL;
3723
3724 DEBUG_TRACE("%s()", __FUNCTION__);
3725 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3726 dev_kfree_skb_any (skb_in);
3727 return;
3728 }
3729
3730 entry = (struct skb_data *)skb_in->cb;
3731 cpdev = cp_lkm_usb_find_dev(entry->unique_id);
3732 if (!cpdev) {
3733 //printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
3734 dev_kfree_skb_any (skb_in);
3735 return;
3736 }
3737
3738 status = cp_lkm_usb_to_cplkm_status(entry->status);
3739 memset(&hdr,0,sizeof(hdr));
3740 hdr.instance_id = cpdev->unique_id;
3741 hdr.cmd = CP_LKM_USB_CMD_CTRL_RECV;
3742 hdr.status = status;
3743 hdr.len = skb_in?skb_in->len:0;
3744 hdr.arg1 = 0; //ctrl channel ep is always 0
3745
3746 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in);
3747 DEBUG_TRACE("%s() ctrl response status:%d", __FUNCTION__, entry->status);
3748
3749 return;
3750}
3751
3752
3753//This function runs in an interrupt context so it can't be preempted. This means cpdev can't
3754//be deleted out from under
3755static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3756{
3757 struct sk_buff *skb_out;
3758 int res;
3759 int dst;
3760 struct skb_data *entry;
3761 struct cp_lkm_usb_dev* cpdev;
3762 struct cp_lkm_base_ep* bep;
3763 int ep_num;
3764 int mux_id;
3765
3766 // WARNING: The memory this pointer points to will be freed by the wrapper, so copy everything you need
3767 // out of it here before going into the while loop
3768 entry = (struct skb_data *)skb_in->cb;
3769 bep = entry->bep;
3770 ep_num = bep->ep_num;
3771
3772 //printk("%s() cpbdev: %p, bep: %p base_state: %d\n", __FUNCTION__, cpbdev, bep, cpbdev->base_state);
3773
3774 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3775 dev_kfree_skb_any (skb_in);
3776 return;
3777 }
3778
3779 while(1) {
3780 skb_out = NULL;
3781
3782 mux_id = 0;
3783
3784 res = cp_lkm_wrapper_recv(cpbdev->wrapper_ctxt, &dst, &mux_id, skb_in, &skb_out);
3785
3786 if (dst != CP_LKM_WRAPPER_DST_CTRL && dst != CP_LKM_WRAPPER_DST_DATA) {
3787 // this is something other than data that we don't know what to do with, so drop it.
3788 goto recv_done;
3789 }
3790
3791 cpdev = cp_lkm_usb_find_muxed_dev(cpbdev, mux_id);
3792
3793 skb_in = NULL;
3794
3795 if (NULL == cpdev) {
3796 //LOG("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
3797 DEBUG_WARN("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
3798 goto recv_done;
3799 }
3800
3801 if(res == CP_LKM_WRAPPER_RES_ERROR) {
3802 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, rx_dropped, 1);
3803 goto recv_done;
3804 }
3805
3806 //printk("%s() cpdev: %p, ep_num: 0x%x, dst: %d, mux_id: %d, state: %d, res: %d\n", __FUNCTION__, cpdev, ep_num, dst, mux_id, cpdev->state, res);
3807
3808 //DEBUG_INFO("%s() while() - skb_out:%p, dst:%d, res:%d", __FUNCTION__, skb_out, dst, res);
3809
3810 //if nothing to send, see if we can bail or if need to call again
3811 if(NULL == skb_out){
3812 goto recv_done;
3813 }
3814
3815 if(dst == CP_LKM_WRAPPER_DST_CTRL) {
3816 //printk("%s() ctrl pkt cpdev: %p\n", __FUNCTION__, cpdev);
3817 if (skb_out->len) { // watch for 0 length short packets
3818 struct cp_lkm_msg_hdr hdr;
3819
3820 DEBUG_TRACE("%s() recv app pkt", __FUNCTION__);
3821 memset(&hdr,0,sizeof(hdr));
3822 hdr.instance_id = cpdev->unique_id;
3823 hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
3824 hdr.status = CP_LKM_STATUS_OK;
3825 hdr.len = skb_out->len;
3826 hdr.arg1 = ep_num;
3827
3828 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_out);
3829 skb_out = NULL;
3830 }
3831 }
3832 //dst == CP_LKM_WRAPPER_DST_DATA
3833 else{
3834 //printk("%s() data pkt cpdev: %p\n", __FUNCTION__, cpdev);
3835 if (skb_out->len && cpdev->edi->pm_recv){
3836 //printk("%s() data pkt send to pm cpdev: %p, first byte: 0x%x\n", __FUNCTION__, cpdev, skb_out->data[0]);
3837 cpdev->edi->pm_recv(cpdev->edi->pm_recv_ctx, skb_out);
3838 skb_out = NULL;
3839 }
3840 }
3841
3842recv_done:
3843 if(skb_out) {
3844 dev_kfree_skb_any(skb_out);
3845 }
3846
3847 //if wrapper didn't ask to be called back, then done
3848 if(res != CP_LKM_WRAPPER_RES_AGAIN) {
3849 break;
3850 }
3851
3852 }
3853
3854 return;
3855}
3856
3857/*
3858 * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here.
3859*/
3860static void cp_lkm_usb_recv_complete (struct urb *urb)
3861{
3862 unsigned long flags;
3863 struct sk_buff *skb = (struct sk_buff *) urb->context;
3864 struct skb_data *entry = (struct skb_data *) skb->cb;
3865 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3866 struct cp_lkm_usb_dev* cpdev_stats_only;
3867 int urb_status = urb->status;
3868 struct cp_lkm_base_ep* bep = entry->bep;
3869 bool is_data = false;
3870 //if(urb->status) {
3871 // printk("recv_done: status: %d, len:%d\n", urb->status, urb->actual_length);
3872 //}
3873
3874 // we don't know what cpdev recv packets are destined for when running muxed clones, so report all errors
3875 // to the base device (for non cloned cases, this will always be the correct cpdev)
3876 cpdev_stats_only = cp_lkm_usb_find_dev(cpbdev->base_id);
3877
3878 //remove skb from the list first thing so no other code conext looking at the
3879 //list (such as unlink_urbs) can mess with it.
3880 spin_lock_irqsave(&cpbdev->in_q.lock,flags);
3881 __skb_unlink(skb, &cpbdev->in_q);
3882 spin_unlock_irqrestore(&cpbdev->in_q.lock,flags);
3883
3884 skb_put (skb, urb->actual_length);
3885 if(bep->ep_num == cpbdev->data_in_bep_num) {
3886 is_data = true;
3887 entry->state = in_data_done;
3888 //note we don't decrement the data ep cnt until we process the pkt
3889 } else{
3890 bep->q_cnt--;
3891 entry->state = in_other_done;
3892 }
3893 entry->status = urb->status;
3894
3895 //skip status and error checking if the device has unplugged
3896 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3897 entry->status = -ENODEV;
3898 goto recv_done;
3899 }
3900
3901 switch (urb_status) {
3902 // success
3903 case 0:
3904 break;
3905
3906 // stalls need manual reset. this is rare ... except that
3907 // when going through USB 2.0 TTs, unplug appears this way.
3908 // we avoid the highspeed version of the ETIMEOUT/EILSEQ
3909 // storm, recovering as needed.
3910 case -EPIPE:
3911 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3912 //don't clear halts on ctrl ep
3913 if(bep->ep_num != 0) {
3914 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
3915 }
3916 goto block;
3917
3918 // software-driven interface shutdown
3919 case -ECONNRESET: // async unlink
3920 case -ESHUTDOWN: // hardware gone
3921 goto block;
3922
3923 case -ENODEV:
3924 //printk("recv_done nodev:%d\n", ENODEV);
3925 goto block;
3926
3927 // we get controller i/o faults during khubd disconnect() delays.
3928 // throttle down resubmits, to avoid log floods; just temporarily,
3929 // so we still recover when the fault isn't a khubd delay.
3930 case -EPROTO:
3931 case -ETIME:
3932 case -EILSEQ:
3933 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3934 if (!timer_pending (&cpbdev->rx_delay)) {
3935 mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3936 }
3937block:
3938 if(bep->ep_num == cpbdev->data_in_bep_num) {
3939 bep->q_cnt--;
3940 entry->state = in_data_cleanup;
3941 }
3942 else{
3943 entry->state = in_other_cleanup;
3944 }
3945
3946 break;
3947
3948 // data overrun ... flush fifo?
3949 case -EOVERFLOW:
3950 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_over_errors, 1);
3951
3952 // FALLTHROUGH
3953
3954 default:
3955 if(bep->ep_num == cpbdev->data_in_bep_num) {
3956 bep->q_cnt--;
3957 entry->state = in_data_cleanup;
3958 }
3959 else{
3960 entry->state = in_other_cleanup;
3961 }
3962 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3963 break;
3964 }
3965
3966 // on responses to a requested recv from the app driver, we need to always return something even on error so force it here
3967 if(bep->con_flags & CP_LKM_USB_RECV) {
3968 if(is_data){
3969 entry->state = in_data_done; //this should never happen, data endpoints always listen, they don't post recv's
3970 }
3971 else{
3972 entry->state = in_other_done;
3973 }
3974 }
3975
3976recv_done:
3977 //do not use the 'entry' struct after this call. It is part of the skb and the skb will be freed when the _bh function runs.
3978 //if you need something from it save it off before calling this
3979 if(is_data) {
3980 //cpdev->dbg_total_rx_irq++;
3981 //printk("%s(), got data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
3982 cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_RX);
3983 }
3984 else{
3985 //printk("%s(), got other data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
3986 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3987 }
3988}
3989
3990//static int g_num_adjusts = 0;
3991//static int g_num_recv_pkts = 0;
3992//static int g_num_iters = 0;
3993static int cp_lkm_usb_submit_recv(struct cp_lkm_usb_base_dev* cpbdev , struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data)
3994{
3995 struct sk_buff *skb;
3996 struct skb_data *entry;
3997 int retval = 0;
3998 unsigned long lockflags;
3999 size_t size;
4000 int hdr_size = 0;
4001 int hdr_offset = 0;
4002 int pad = 0; //some platforms require alignment override. pad takes care of that.
4003
4004 //g_num_recv_pkts++;
4005 //g_num_iters++;
4006 //if(g_num_iters > 10000){
4007 // printk("%s() num pkts: %d, num adjusts: %d\n",__FUNCTION__,g_num_recv_pkts,g_num_adjusts);
4008 // g_num_iters = 0;
4009 //}
4010 size = bep->max_transfer_size;
4011 if (data) {
4012 hdr_size = cpbdev->pm_hdr_size;
4013 hdr_offset = cpbdev->pm_hdr_offset;
4014 }
4015
4016 if(cp_lkm_is_broadcom && (hdr_offset & 0x3)) {
4017 //Jira issue FW-14929: On broadcom, we have to keep the buffers four byte aligned else the USB block
4018 //corrupts the data (no idea why).
4019 //Round up the hdr_offset to nearest 4 byte boundary. This means pkts may not be aligned as expected,
4020 //so recieve function will need to either realign with a copy, or send up to the stack unaligned
4021 // See cp_lkm_pm_net_recv() to see how we decided to deal with it (subject to change).
4022 pad = 4 - (hdr_offset&0x3);
4023 //g_num_adjusts++;
4024 }
4025
4026 if ((skb = alloc_skb (size+hdr_size+pad, flags)) == NULL) {
4027 //if (netif_msg_rx_err (dev))
4028 // devdbg (dev, "no rx skb");
4029 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
4030 return -ENOMEM;
4031 }
4032 if (data) {
4033 skb_reserve(skb, hdr_offset+pad);
4034 //printk("%s(), data: %p, len: %d, whs:%d, hs:%d, ho:%d\n",__FUNCTION__,skb->data,skb->len,wrapper_hdr_size,hdr_size,hdr_offset);
4035 }
4036 entry = (struct skb_data *) skb->cb;
4037 entry->urb = urb;
4038 entry->cpbdev = cpbdev;
4039 if(data) {
4040 entry->state = in_data_start;
4041 }
4042 else{
4043 entry->state = in_other_start;
4044 }
4045
4046 entry->status = 0;
4047 entry->bep = bep;
4048
4049 if(bep->type == UE_BULK) {
4050 usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb->data, size,
4051 cp_lkm_usb_recv_complete, skb);
4052 }
4053 else{
4054 usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb->data, size,
4055 cp_lkm_usb_recv_complete, skb, bep->interval);
4056 }
4057 //cp_lkm_usb_cnts(entry->state,1);
4058 spin_lock_irqsave (&cpbdev->in_q.lock, lockflags);
4059 if (cp_lkm_usb_is_base_attached(cpbdev) && !test_bit (EVENT_RX_HALT, &bep->err_flags)) {
4060 DEBUG_TRACE("%s() ep:0x%x, size:%d, type:%d, pipe:0x%x",__FUNCTION__, bep->ep_num, size, bep->type, bep->pipe);
4061 retval = usb_submit_urb (urb, GFP_ATOMIC);
4062 switch (retval) {
4063 case -EPIPE:
4064 //don't clear halts on ctrl ep
4065 if(bep->ep_num != 0) {
4066 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
4067 }
4068 break;
4069 case -ENOMEM:
4070 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
4071 break;
4072 case -ENODEV:
4073 //if (netif_msg_ifdown (dev))
4074 // devdbg (dev, "device gone");
4075 //netif_device_detach (dev->net);
4076 break;
4077 case -EPROTO:
4078 default:
4079 //if (netif_msg_rx_err (dev))
4080 // devdbg (dev, "rx submit, %d", retval);
4081 cp_lkm_schedule_rx_restock(cpbdev,bep);
4082 break;
4083 case 0:
4084 __skb_queue_tail (&cpbdev->in_q, skb);
4085 bep->q_cnt++;
4086 //if(cpdev->in_q.qlen == 1 && ep->index == CP_LKM_DATA_INDEX){
4087 // printk("rx q empty\n");
4088 //}
4089
4090 }
4091 } else {
4092 //if (netif_msg_ifdown (dev))
4093 // devdbg (dev, "rx: stopped");
4094 retval = -ENOLINK;
4095 }
4096 spin_unlock_irqrestore (&cpbdev->in_q.lock, lockflags);
4097 if (retval) {
4098 DEBUG_TRACE("%s() FAILED ep_num:0x%x ep_type:%d, retval: %d",__FUNCTION__, bep->ep_num, bep->type, retval);
4099 //cp_lkm_usb_cnts(entry->state,-1);
4100 dev_kfree_skb_any (skb);
4101 }
4102
4103 return retval;
4104}
4105
4106
4107static int cp_lkm_usb_init(void)
4108{
4109 DEBUG_TRACE("%s()", __FUNCTION__);
4110 memset(&cp_lkm_usb_mgr, 0x00, sizeof(struct cp_lkm_usb_ctx));
4111 cp_lkm_usb_mgr.common.open = cp_lkm_usb_open;
4112 cp_lkm_usb_mgr.common.close = cp_lkm_usb_close;
4113 cp_lkm_usb_mgr.common.handle_msg = cp_lkm_usb_handle_msg;
4114 cp_lkm_usb_mgr.common.handle_ioctl = cp_lkm_usb_handle_ioctl;
4115 INIT_LIST_HEAD(&cp_lkm_usb_mgr.dev_list);
4116
4117 cp_lkm_common_ctx_init(&cp_lkm_usb_mgr.common);
4118
4119 spin_lock_init(&cp_lkm_usb_mgr.lock);
4120 //sema_init(&cp_lkm_usb_mgr.thread_sem, 1);
4121
4122 if(!strcmp(PRODUCT_PLATFORM, "brcm_arm")) {
4123 LOG("cp_lkm: Broadcom platform");
4124 cp_lkm_is_broadcom = 1;
4125 }
4126
4127 LOG("cp_lkm: Product chipset %s",PRODUCT_INFO_CHIPSET);
4128 LOG("cp_lkm: Product platform %s",PRODUCT_PLATFORM);
4129
4130 //Things work better if the napi weight here matchs the global weight set in service_manager/services/firewall.py
4131 //This is even true if we don't use napi here since ethernet on some platforms use it
4132 if ((strcmp(PRODUCT_PLATFORM,"ramips")==0) && (strcmp(PRODUCT_INFO_CHIPSET, "3883")!=0)){
4133 //all ralink (mediatek) platforms except for 3883 use the low settings
4134 //use_high = false;
4135 CP_LKM_PM_NAPI_WEIGHT = 32;
4136 }
4137 else{
4138 //use_high = true;
4139 CP_LKM_PM_NAPI_WEIGHT = 64;
4140 }
4141
4142 //set up default settings for all platforms
4143 CP_LKM_USB_NAPI_MAX_WORK = CP_LKM_PM_NAPI_WEIGHT;
4144 CP_LKM_USB_MAX_RX_QLEN = CP_LKM_USB_NAPI_MAX_WORK;
4145 CP_LKM_USB_MAX_OTHER_QLEN = 2;
4146 CP_LKM_USB_TX_PAUSE_Q_PKTS = CP_LKM_USB_NAPI_MAX_WORK;
4147 CP_LKM_USB_TX_RESUME_Q_PKTS = CP_LKM_USB_TX_PAUSE_Q_PKTS/4;
4148 CP_LKM_USB_TX_SCHED_CNT = 1;
4149 CP_LKM_USB_RX_SCHED_CNT = 1;
4150 CP_LKM_USB_RESTOCK_MULTIPLE = 1; //restock rx as we process them
4151 CP_LKM_USB_TASKLET_CNT = 10;
4152 CP_LKM_USB_WORKQUEUE_CNT = 5;
4153 CP_LKM_USB_PROCESS_DIVISOR = 4;
4154
4155 LOG("cp_lkm: Processor: %s, Max work: %d, NAPI budget: %d, QLEN: %d.",PRODUCT_INFO_CHIPSET, CP_LKM_USB_NAPI_MAX_WORK, CP_LKM_PM_NAPI_WEIGHT, CP_LKM_USB_MAX_RX_QLEN);
4156
4157 return 0;
4158
4159}
4160
4161static int cp_lkm_usb_cleanup(void)
4162{
4163 //module is unloading, clean up everything
4164 // empty pending posted messages
4165 cp_lkm_cleanup_msg_list(&cp_lkm_usb_mgr.common);
4166
4167 cp_lkm_usb_close(&cp_lkm_usb_mgr.common);
4168 return 0;
4169}
4170
4171static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx)
4172{
4173 //struct cp_lkm_usb_ctx* mgr;
4174
4175 DEBUG_TRACE("%s()", __FUNCTION__);
4176 //mgr = (struct cp_lkm_usb_ctx*)ctx;
4177
4178 return 0;
4179}
4180
4181static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx)
4182{
4183 //unsigned long flags;
4184 //struct cp_lkm_usb_dev* cpdev;
4185 //struct cp_lkm_usb_close_intf ci;
4186 //struct cp_lkm_usb_unplug_intf ui;
4187 LOG("%s() called unexpectedly.", __FUNCTION__);
4188
4189 //NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
4190 // things are in a bad state and the router will be rebooting. We decided not
4191 // to clean things up here because this code got into an infinite loop in
4192 // certain fail situations, which prevented the router from rebooting.
4193 // Revisit if close ever becomes a normal event.
4194
4195 /*
4196 while(1) {
4197 spin_lock(&cp_lkm_usb_mgr.lock);
4198
4199 cpdev = cp_lkm_usb_get_head_dev();
4200
4201 spin_unlock(&cp_lkm_usb_mgr.lock);
4202 if(!cpdev) {
4203 return 0;
4204 }
4205
4206 //TODO - when this closed we have a modem plugged, we will be deleting the top half of the driver while the bottom half is
4207 // still plugged. Figure out how to force the driver to disconnect the modem
4208 ci.unique_id = cpdev->unique_id;
4209 cp_lkm_usb_close_intf(&ci);
4210
4211 //the unplug removes the device from the list which prevents us from infinite looping here
4212 ui.unique_id = cpdev->unique_id;
4213 cp_lkm_usb_unplug_intf(&ui);
4214 }
4215
4216 cp_lkm_cleanup_msg_list(ctx);
4217 */
4218 return 0;
4219}
4220
4221static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
4222{
4223 int retval = -1;
4224 struct cp_lkm_ep* ep;
4225 struct cp_lkm_usb_dev* cpdev;
4226 struct cp_lkm_usb_base_dev* cpbdev;
4227
4228 //grab lock to protect global device list before searching (don't want to search it if another thread is adding or removing a cpdev)
4229 spin_lock(&cp_lkm_usb_mgr.lock);
4230 cpdev = cp_lkm_usb_find_dev(hdr->instance_id);
4231
4232 //grab thread semaphore so disconnect can't run and delete the cpdev while we are running here
4233 if(!cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
4234 spin_unlock(&cp_lkm_usb_mgr.lock);
4235 dev_kfree_skb_any (skb);
4236 //printk("%s() no device or no probe yet\n", __FUNCTION__);
4237 return 0;
4238 }
4239 cpbdev = cpdev->cpbdev;
4240 switch(hdr->cmd) {
4241 case CP_LKM_USB_CMD_DATA_SEND:
4242 {
4243 ep = cp_lkm_usb_get_ep(cpdev, hdr->arg1);
4244 if(ep) {
4245 //printk("%s(), send other data cpbdev: %p, cpdev: %p, bep: %p, ep: %p, num: 0x%x\n",__FUNCTION__,cpdev->cpbdev,cpdev,ep->bep,ep,ep->ep_num);
4246 retval = cp_lkm_usb_start_xmit_common(cpdev, skb, CP_LKM_WRAPPER_SRC_CTRL, ep);
4247 skb = NULL;
4248 }
4249 else{
4250 DEBUG_TRACE("%s() Invalid EP number 0x%x", __FUNCTION__, hdr->arg1);
4251 retval = -1;
4252 }
4253 }
4254 break;
4255 case CP_LKM_USB_CMD_CTRL_SEND:
4256 {
4257 retval = cp_lkm_usb_start_ctrl_xmit(cpdev, skb);
4258 skb = NULL;
4259 }
4260 break;
4261 }
4262
4263 spin_unlock(&cp_lkm_usb_mgr.lock);
4264
4265 if(skb) {
4266 dev_kfree_skb_any (skb);
4267 }
4268 return retval;
4269}
4270
4271static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
4272{
4273 int retval = -1;
4274 //printk("%s(), cmd:0x%x\n", __FUNCTION__, _IOC_NR(cmd));
4275
4276 switch(cmd) {
4277 case CP_LKM_IOCTL_USB_PLUG_INTF:
4278 {
4279 struct cp_lkm_usb_plug_intf* pi = (struct cp_lkm_usb_plug_intf*)k_argp;
4280 retval = cp_lkm_usb_plug_intf(pi);
4281 }
4282 break;
4283 case CP_LKM_IOCTL_USB_SET_WRAPPER:
4284 {
4285 struct cp_lkm_usb_set_wrapper* sw = (struct cp_lkm_usb_set_wrapper*)k_argp;
4286 retval = cp_lkm_usb_set_wrapper(sw);
4287 }
4288 break;
4289 case CP_LKM_IOCTL_USB_SET_MUX_ID:
4290 {
4291 struct cp_lkm_usb_set_mux_id* smi = (struct cp_lkm_usb_set_mux_id*)k_argp;
4292 retval = cp_lkm_usb_set_mux_id(smi);
4293 }
4294 break;
4295 case CP_LKM_IOCTL_USB_OPEN_INTF:
4296 {
4297 struct cp_lkm_usb_open_intf* oi = (struct cp_lkm_usb_open_intf*)k_argp;
4298 retval = cp_lkm_usb_open_intf(oi);
4299 }
4300 break;
4301 case CP_LKM_IOCTL_USB_CLOSE_INTF:
4302 {
4303 struct cp_lkm_usb_close_intf* ci = (struct cp_lkm_usb_close_intf*)k_argp;
4304 retval = cp_lkm_usb_close_intf(ci);
4305 }
4306 break;
4307 case CP_LKM_IOCTL_USB_UNPLUG_INTF:
4308 {
4309 struct cp_lkm_usb_unplug_intf* ui = (struct cp_lkm_usb_unplug_intf*)k_argp;
4310 retval = cp_lkm_usb_unplug_intf(ui);
4311 }
4312 break;
4313 case CP_LKM_IOCTL_USB_EP_ACTION:
4314 {
4315 struct cp_lkm_usb_ep_action* ea = (struct cp_lkm_usb_ep_action*)k_argp;
4316 retval = cp_lkm_usb_ep_action(ea);
4317 }
4318 break;
4319 case CP_LKM_IOCTL_USB_PM_LINK:
4320 {
4321 struct cp_lkm_usb_pm_link *upl = (struct cp_lkm_usb_pm_link *)k_argp;
4322 retval = cp_lkm_usb_pm_link(upl);
4323 }
4324 break;
4325 case CP_LKM_IOCTL_USB_IS_ALIVE_INTF:
4326 {
4327 struct cp_lkm_usb_is_alive_intf* alivei = (struct cp_lkm_usb_is_alive_intf*)k_argp;
4328 retval = cp_lkm_usb_is_alive_intf(alivei);
4329 }
4330 }
4331
4332 return retval;
4333}
4334
4335
4336/******************************* kernel module PM instance functionality **********************************/
4337struct cp_lkm_pm_ctx {
4338 struct cp_lkm_common_ctx common;
4339 struct list_head pm_list;
4340 spinlock_t pm_list_lock;
4341};
4342
4343struct cp_lkm_pm_ctx cp_lkm_pm_mgr;
4344
4345
4346static void cp_lkm_pm_filter_empty_list(struct cp_lkm_pm_common *pm)
4347{
4348
4349 struct cp_lkm_pm_filter *filter;
4350 struct list_head *entry, *tmp;
4351
4352 list_for_each_safe(entry, tmp, &pm->filter_list) {
4353 filter = list_entry(entry, struct cp_lkm_pm_filter, list);
4354 list_del(&filter->list);
4355 kfree(filter);
4356 }
4357}
4358
4359static bool cp_lkm_pm_filter_ok(struct cp_lkm_pm_common *pm, unsigned char *buf, unsigned int buf_len)
4360{
4361 bool allow = true; // default allow the egress packet
4362
4363 struct list_head *pos;
4364
4365 struct in_device *in_dev;
4366 struct in_ifaddr *ifa;
4367 struct iphdr *ipv4_hdr;
4368 u32 ipv4_src_addr = 0;
4369 u32 ipv4_net_addr = 0;
4370 u32 ipv4_net_mask = 0;
4371
4372 ipv4_hdr = (struct iphdr *)buf;
4373
4374 // these are the include filters (white list) - exclude filters (black list) are not currently supported
4375 // exclude filters may need to be processed in another loop through the filters
4376 list_for_each(pos, &pm->filter_list) {
4377 struct cp_lkm_pm_filter *filter = list_entry(pos, struct cp_lkm_pm_filter, list);
4378 switch(filter->type) {
4379 case CP_LKM_PM_FILTER_TYPE_IP_SRC_WAN_SUBNET_INCLUDE:
4380 if (4 == ipv4_hdr->version) {
4381 // ipv4
4382 allow = false;
4383 ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
4384 if(ipv4_src_addr == 0){
4385 //DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
4386 allow = true;
4387 }
4388 else{
4389 // get network device IP address and check against src packet ip address
4390 rcu_read_lock();
4391 in_dev = rcu_dereference(pm->net_dev->ip_ptr);
4392 // in_dev has a list of IP addresses (because an interface can have multiple - check them all)
4393 for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) {
4394 ipv4_net_addr = __be32_to_cpu(ifa->ifa_local);
4395 ipv4_net_mask = __be32_to_cpu(ifa->ifa_mask);
4396 if ((ipv4_net_addr & ipv4_net_mask) == (ipv4_src_addr & ipv4_net_mask)) {
4397 // allow the packet
4398 allow = true;
4399 break;
4400 }
4401 }
4402 rcu_read_unlock();
4403 }
4404 }/* benk needs to be tested before ok to execute
4405 else if (6 == ipv4_hdr->version) {
4406 struct in6_addr *addr = (struct in6_addr *)&buf[2 * sizeof(u32)];
4407 if (ipv6_chk_prefix(addr, pm->net_dev)) {
4408 allow = true;
4409 }
4410 } */
4411 break;
4412 case CP_LKM_PM_FILTER_TYPE_IP_SRC_SUBNET_INCLUDE:
4413 if (4 == ipv4_hdr->version) {
4414 // ipv4
4415 allow = false;
4416 ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
4417 if(ipv4_src_addr == 0){
4418 //DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
4419 allow = true;
4420 }
4421 else if ((filter->subnet.ipv4_addr & filter->subnet.ipv4_mask) == (ipv4_src_addr & filter->subnet.ipv4_mask)) {
4422 allow = true;
4423 }
4424 }
4425
4426 default:
4427 break;
4428 }
4429
4430 if (allow) {
4431 break;
4432 }
4433 }
4434
4435 if (!allow) {
4436 DEBUG_WARN("%s() dropping packet - src:0x%x\n", __FUNCTION__, ipv4_src_addr);
4437 }
4438
4439 return allow;
4440}
4441/******************************* kernel module pm common functionality **********************************/
4442int cp_lkm_common_init(struct cp_lkm_pm_common *pmc)
4443{
4444 // allocate stats struct
4445 pmc->pcpu_stats64 = netdev_alloc_pcpu_stats(struct cp_lkm_pm_stats64);
4446 if (!pmc->pcpu_stats64) {
4447 return -ENOMEM;
4448 }
4449
4450
4451 pmc->pm_link_count = 0;
4452 spin_lock_init(&pmc->pm_link_lock);
4453 INIT_LIST_HEAD(&pmc->filter_list);
4454
4455 return 0;
4456}
4457
4458void cp_lkm_common_deinit(struct cp_lkm_pm_common *pmc)
4459{
4460 if (!pmc->pcpu_stats64) {
4461 return;
4462 }
4463 free_percpu(pmc->pcpu_stats64);
4464 pmc->pcpu_stats64 = NULL;
4465}
4466// The pm_link_lock is used to coordinate activity between xmit, poll, and link/unlink
4467// It is okay to poll and xmit at the same time, but we don't want to do either if we are linking or unlinking.
4468// link/unlink sets the pm_link_count negative to block both poll and xmit. If pm_link_count is not negative then
4469// both poll and xmit are free to grab the link at any time and at the same time.
4470//retval:
4471// 0 = you have the token, proceed
4472// -1 = you don't have the token, do not pass go
4473int cp_lkm_common_inc_link_lock(struct cp_lkm_pm_common* pmc)
4474{
4475 unsigned long flags;
4476 int retval = 0;
4477 spin_lock_irqsave(&pmc->pm_link_lock, flags);
4478 if(pmc->pm_link_count < 0) {
4479 retval = -1;
4480 }
4481 else{
4482 pmc->pm_link_count++;
4483 }
4484 spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
4485 return retval;
4486}
4487
4488int cp_lkm_common_dec_link_lock(struct cp_lkm_pm_common* pmc)
4489{
4490 unsigned long flags;
4491 int retval = 0;
4492 spin_lock_irqsave(&pmc->pm_link_lock, flags);
4493 if(pmc->pm_link_count > 0) {
4494 pmc->pm_link_count--;
4495 }
4496 else{
4497 //should never hit this
4498 retval = -1;
4499 }
4500 spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
4501 return retval;
4502}
4503
4504/******************************* kernel module net PM functionality **********************************/
4505
4506// common structure for ethernet and IP protocol managers
4507struct cp_lkm_pm_net {
4508 struct cp_lkm_pm_common common;
4509 struct ethhdr eth_hdr;
4510
4511};
4512
4513static struct rtnl_link_stats64 *cp_lkm_pm_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
4514{
4515 struct cp_lkm_pm_net *pm_net;
4516 int i;
4517 struct cp_lkm_pm_stats64 *pstats;
4518
4519 pm_net = netdev_priv(netdev);
4520
4521 for_each_possible_cpu(i) {
4522 u64 rx_packets, rx_bytes, rx_errors, rx_dropped, rx_over_errors;
4523 u64 tx_packets, tx_bytes, tx_errors, tx_dropped;
4524 unsigned int start;
4525 pstats = per_cpu_ptr(pm_net->common.pcpu_stats64, i);
4526 do {
4527 start = u64_stats_fetch_begin_irq(&pstats->syncp);
4528 rx_packets = pstats->rx_packets;
4529 tx_packets = pstats->tx_packets;
4530 rx_bytes = pstats->rx_bytes;
4531 tx_bytes = pstats->tx_bytes;
4532 rx_errors = pstats->rx_errors;
4533 tx_errors = pstats->tx_errors;
4534 rx_dropped = pstats->rx_dropped;
4535 tx_dropped = pstats->tx_dropped;
4536 rx_over_errors = pstats->rx_over_errors;
4537 } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
4538
4539 stats->rx_packets += rx_packets;
4540 stats->tx_packets += tx_packets;
4541 stats->rx_bytes += rx_bytes;
4542 stats->tx_bytes += tx_bytes;
4543 stats->rx_errors += rx_errors;
4544 stats->tx_errors += tx_errors;
4545 stats->rx_dropped += rx_dropped;
4546 stats->tx_dropped += tx_dropped;
4547 stats->rx_over_errors += rx_over_errors;
4548 }
4549
4550 return stats;
4551}
4552
4553static int cp_lkm_pm_net_open(struct net_device *dev)
4554{
4555 struct cp_lkm_pm_net *pm_net;
4556
4557 DEBUG_TRACE("%s()", __FUNCTION__);
4558
4559 pm_net = netdev_priv(dev);
4560 netif_start_queue(dev);
4561
4562 // is this link up?
4563 return 0;
4564}
4565
4566static int cp_lkm_pm_net_close(struct net_device *dev)
4567{
4568 struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
4569 struct cp_lkm_msg_hdr hdr;
4570
4571 DEBUG_TRACE("%s()", __FUNCTION__);
4572
4573 // link change
4574 netif_stop_queue(dev);
4575
4576 // post message to indicate link down
4577 memset(&hdr,0,sizeof(hdr));
4578 hdr.instance_id = pm_net->common.unique_id;
4579 hdr.cmd = CP_LKM_PM_LINK_DOWN;
4580 hdr.status = CP_LKM_STATUS_OK;
4581 cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
4582 LOG("Link Down indicated - id:%d\n", hdr.instance_id);
4583
4584
4585 return 0;
4586}
4587
4588static int cp_lkm_pm_net_xmit(struct sk_buff *skb, struct net_device *dev)
4589{
4590 struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
4591 bool filter_ok = true;
4592 int link_res;
4593
4594 //see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
4595 link_res = cp_lkm_common_inc_link_lock(&pm_net->common);
4596 if(link_res < 0) {
4597 dev_kfree_skb_any(skb);
4598 return NETDEV_TX_OK;
4599 }
4600
4601 if (!pm_net->common.edi) {
4602 // cannot do anything without edi
4603 dev_kfree_skb_any(skb);
4604 goto net_xmit_done;
4605 }
4606
4607 //DEBUG_INFO("%s() - %s len:%d", __FUNCTION__, pm_net->common.net_dev->name, skb->len);
4608 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_bytes, (skb->len - sizeof(struct ethhdr)));
4609 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_packets, 1);
4610 /* Drop packet if interface is not attached */
4611 if (0 == pm_net->common.attached)
4612 goto drop;
4613
4614 if (!pm_net->common.edi->usb_send) {
4615 goto drop;
4616 }
4617
4618 filter_ok = cp_lkm_pm_filter_ok(&pm_net->common, skb->data + sizeof(struct ethhdr), skb->len - sizeof(struct ethhdr));
4619 if (!filter_ok) {
4620 pm_net->common.filter_drop_cnt++;
4621 DEBUG_WARN("%s() filter dropped packet cnt:%u", __FUNCTION__, pm_net->common.filter_drop_cnt);
4622 goto drop;
4623 }
4624
4625 switch(pm_net->common.type) {
4626 case CP_LKM_PM_TYPE_IP_DHCP:
4627 case CP_LKM_PM_TYPE_IP_STATIC:
4628 skb_pull(skb, sizeof(struct ethhdr)); // strip off the ethernet header
4629 break;
4630 default:
4631 break;
4632 }
4633
4634 // send data to USB module
4635 pm_net->common.edi->usb_send(pm_net->common.edi->usb_send_ctx, skb);
4636 goto net_xmit_done;
4637
4638drop:
4639 DEBUG_INFO("%s() - dropped", __FUNCTION__);
4640 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_dropped, 1);
4641 dev_kfree_skb_any(skb);
4642
4643net_xmit_done:
4644 cp_lkm_common_dec_link_lock(&pm_net->common);
4645 return NETDEV_TX_OK;
4646}
4647
4648
4649#if 0
4650static u8 cp_lkm_pm_test_find(u8* pkt, u32 pkt_len, u8* pattern, u32 pattern_len)
4651{
4652 s32 i;
4653 for(i = 0; i < (pkt_len - pattern_len); i++) {
4654 if (memcmp(&pkt[i],pattern,pattern_len) == 0) {
4655 return 1;
4656 }
4657 }
4658 return 0;
4659}
4660
4661static int cp_lkm_pm_test(struct sk_buff *skb)
4662{
4663static u8 first_pkt = 1;
4664static u8 started = 0;
4665static unsigned long total_data = 0;
4666static unsigned long start_time = 0;
4667static unsigned long stop_time = 0;
4668
4669static unsigned long invalid_pkts = 0;
4670static unsigned long total_pkts = 0;
4671
4672 int drop = 0;
4673 unsigned char *ptr = skb->data;
4674 u32 pkt_len = skb->len;
4675 u8 prot;
4676 //u8 type;
4677 u16 udp_len;
4678 u16 dst_port;
4679
4680 if (pkt_len < 20) {
4681 return 0;
4682 }
4683 //function is set up to parse IP pkts, may be called with ether framed pkts as well.
4684 //auto detect ether hdr and remove it
4685 if (ptr[0] != 0x45) {
4686 //ether header
4687 if(ptr[14] == 0x45){
4688 ptr+=14;
4689 pkt_len -= 14;
4690 }
4691 //vlan hdr
4692 else if (ptr[12] == 0x81 && ptr[18] == 0x45) {
4693 ptr+=18;
4694 pkt_len -=18;
4695 }
4696 }
4697
4698 if (ptr[0] != 0x45) {
4699 invalid_pkts++;
4700 }
4701
4702 //printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
4703 //printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
4704 if (pkt_len >= 28) {
4705 prot = ptr[9];
4706 if (prot == 0x11) {
4707 ptr += 20; //skip ip header
4708 pkt_len -= 20;
4709 dst_port = ntohs(*((u16*)(&ptr[2])));
4710 udp_len = ntohs(*((u16*)(&ptr[4])));
4711 //printk("Got UDP pkt\n");
4712 if (started && dst_port == 5001) {
4713 drop = 1;
4714 if (first_pkt == 1) {
4715 first_pkt = 0;
4716 total_data = 0;
4717 start_time = jiffies;
4718 invalid_pkts = 0;
4719 total_pkts = 0;
4720 }
4721 total_data += (udp_len+34); //add ip and ether hdrs
4722 stop_time = jiffies;
4723 total_pkts++;
4724 }
4725 else if(dst_port == 5002) {
4726 drop = 1;
4727 ptr += 8; //skip udp header
4728 printk("SHIM START PORT len: %d data: 0x%x, start=%x, stop=%x\n",udp_len, ptr[0], start_time, stop_time);
4729 if(cp_lkm_pm_test_find(ptr, udp_len, "START", 5)){
4730 printk("Got IPERF START\n");
4731 first_pkt = 1;
4732 started = 1;
4733 cp_lkm_wrapper_start_debug();
4734 }
4735 else if (cp_lkm_pm_test_find(ptr, udp_len, "STOP", 4)) {
4736 u32 delta_time = (stop_time - start_time)*1000/HZ;
4737 u32 bits_per_sec = (total_data/delta_time)*8000; //in bytes per milisecond, need bits per second
4738 delta_time -= 2; //iperf has 2 second delay waiting for an ack we won't send
4739 started = 0;
4740 printk("Got IPERF STOP: Total data: %u, Total pkts: %u, Total invalid: %u, Total time: %u msec, BitsPerSec: %u\n",total_data, total_pkts, invalid_pkts, delta_time,bits_per_sec);
4741 cp_lkm_wrapper_stop_debug();
4742 }
4743 }
4744 }
4745 }
4746 return drop;
4747}
4748#endif
4749
4750// called in soft interrupt context - otherwise some protection around pm_net is required
4751//int num_ip_copies = 0;
4752//int num_eth_copies = 0;
4753//int num_pkts = 0;
4754//int num_iters = 0;
4755//int num_unaligned = 0;
4756static int cp_lkm_pm_net_recv(void *ctx, struct sk_buff *skb)
4757{
4758 struct cp_lkm_pm_net *pm_net;
4759 int err;
4760 int recv_bytes;
4761 struct sk_buff *skb_new;
4762 int align = 0; //set to 1 to always send 4 byte aligned IP pkts to network stack
4763 int pad = 20; //number of bytes to put on front of new skbs
4764
4765 //DEBUG_INFO("%s()", __FUNCTION__);
4766 if(NULL == ctx) {
4767 dev_kfree_skb_any(skb);
4768 return 0;
4769 }
4770
4771 //num_pkts++;
4772 //num_iters++;
4773 pm_net = (struct cp_lkm_pm_net *)ctx;
4774
4775 //printk("%s() pm_net: %p\n", __FUNCTION__, pm_net);
4776
4777
4778 skb->dev = pm_net->common.net_dev;
4779
4780 switch(pm_net->common.type) {
4781 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
4782 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
4783 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
4784 //this strips the ether header off the packet
4785 skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
4786 //Need IP hdr aligned for IP stack to avoid unaligned access interrupts
4787 if(align && ((uintptr_t)(skb->data) & 0x3)) {
4788 //num_eth_copies++;
4789 skb_new = skb_copy_expand(skb, pad, 0, GFP_ATOMIC);
4790 dev_kfree_skb_any(skb);
4791 skb=skb_new;
4792 }
4793 if (!skb) {
4794 // packet dropped
4795 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
4796 return -ENOMEM;
4797 }
4798 break;
4799
4800 case CP_LKM_PM_TYPE_IP_DHCP:
4801 case CP_LKM_PM_TYPE_IP_STATIC:
4802 // Need to add ether header first for processing, then remove it. Need IP hdr aligned when done.
4803 //
4804 // Note: avoid the temptation to skip adding the ether header and doing manually what the call
4805 // to eth_type_trans() does. We did that and it bit us (see Jira issue FW-16149)
4806 // The kernel expects the ether header to be present in the skb buff even though the data ptr
4807 // has been moved past it. Also, if the skb has been cloned, then we are dealing with an
4808 // aggregated modem protocol (multiple pkts per skb), so we have to make a copy to guarantee
4809 // our tmp ether header isn't written into the data space of the previous pkt from the set.
4810 //
4811 if((align && ((uintptr_t)(skb->data) & 0x3)) || (skb_headroom(skb) < ETH_HLEN) || skb_cloned(skb)){
4812 //printk("copy: align: %d, head: %d, cloned: %d, len: %d\n", ((uintptr_t)(skb->data) & 0x3), skb_headroom(skb), skb_cloned(skb), skb->len);
4813 //num_ip_copies++;
4814 skb_new = skb_copy_expand(skb, 16+pad, 0, GFP_ATOMIC);
4815 dev_kfree_skb_any(skb);
4816 skb=skb_new;
4817 }
4818
4819 if (!skb) {
4820 // packet dropped
4821 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
4822 return -ENOMEM;
4823 }
4824
4825 if (0x60 == (skb->data[0] & 0xF0)) { //mask off version bits of first byte of IP packet to check for ip version
4826 // set the hdr protocol type to IPV6
4827 pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IPV6);
4828 } else {
4829 // probably ipv4, but not explicitly checking
4830 // set the hdr protocol type to IPV4
4831 pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IP);
4832 }
4833 memcpy(skb_push(skb, sizeof(struct ethhdr)), (unsigned char *)&pm_net->eth_hdr, sizeof(struct ethhdr));
4834 //this strips the ether hdr off the packet
4835 skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
4836 break;
4837
4838 default:
4839 DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, pm_net->common.type);
4840 // packet dropped
4841 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_errors, 1);
4842 kfree(skb);
4843 return NET_RX_DROP;
4844 }
4845
4846 recv_bytes = skb->len;
4847
4848 //if (cp_lkm_pm_test(skb) == 1) {
4849 // dev_kfree_skb_any(skb);
4850 // return NET_RX_SUCCESS;
4851 //}
4852
4853 //if((int)(skb->data) & 0x3){
4854 //printk("Unaligned IP pkt!!!!!!!!!!!!\n");
4855 //num_unaligned++;
4856 //}
4857
4858
4859 //if(num_iters >= 10000) {
4860 // num_iters = 0;
4861 // printk("num_ip_copies: %d, num_eth_copies: %d, num_unaligned: %d, num_pkts: %d\n",num_ip_copies,num_eth_copies,num_unaligned,num_pkts);
4862 //}
4863
4864 netif_rx(skb);
4865 err = NET_RX_SUCCESS;
4866
4867 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_packets, 1);
4868 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_bytes, recv_bytes);
4869
4870 return 0;
4871}
4872
4873
4874static void cp_lkm_pm_net_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
4875{
4876 struct cp_lkm_pm_net *pm_net;
4877 int pad;
4878 int tmp_size;
4879 int pm_hdr = ETH_HLEN;
4880 int pm_extra = 6;
4881
4882 *hdr_size = 0;
4883 *hdr_offset = 0;
4884
4885 pm_net = (struct cp_lkm_pm_net *)ctx;
4886 if(!pm_net) {
4887 return;
4888 }
4889 //temp return here
4890 //return;
4891
4892 //calculate how much header space there is before the IP hdr.
4893 //this is needed to align the IP hdr properly for optimal performance
4894 switch(pm_net->common.type) {
4895 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
4896 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
4897 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
4898 //pkts will need room for the wrapper header and the ether hdr.
4899 //both headers will be present at the same time.
4900 tmp_size = wrapper_hdr_size + pm_hdr + pm_extra;
4901 pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
4902 *hdr_size = tmp_size + pad;
4903 *hdr_offset = pad+pm_extra;
4904 break;
4905
4906 case CP_LKM_PM_TYPE_IP_DHCP:
4907 case CP_LKM_PM_TYPE_IP_STATIC:
4908 //pkts will need room for the wrapper header or the ether hdr
4909 //both headers won't be present at the same time. The wrapper is present
4910 //up through the USB side of the shim. We (the pm) add a temp ether header
4911 //for processing after the wrapper header is removed
4912 tmp_size = max(wrapper_hdr_size, pm_hdr+pm_extra);
4913 pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
4914 *hdr_size = tmp_size + pad;
4915 *hdr_offset = *hdr_size - wrapper_hdr_size;
4916 break;
4917 default:
4918 break;
4919 }
4920}
4921
4922
4923static u32 cp_lkm_pm_net_get_link(struct net_device *dev)
4924{
4925 struct cp_lkm_pm_net *pm_net;
4926
4927 DEBUG_TRACE("%s()", __FUNCTION__);
4928 pm_net = netdev_priv(dev);
4929 if(!pm_net) {
4930 return 0;
4931 }
4932 return pm_net->common.attached;
4933}
4934
4935
4936#ifndef KERNEL_2_6_21
4937static const struct net_device_ops cp_lkm_pm_net_device_ops = {
4938 .ndo_open = cp_lkm_pm_net_open,
4939 .ndo_start_xmit = cp_lkm_pm_net_xmit,
4940 .ndo_stop = cp_lkm_pm_net_close,
4941 .ndo_get_stats64 = cp_lkm_pm_get_stats64
4942};
4943#endif
4944
4945static const struct ethtool_ops cp_lkm_pm_net_ethtool_ops = {
4946 .get_link = cp_lkm_pm_net_get_link,
4947};
4948
4949static void cp_lkm_pm_net_setup(struct net_device *net_dev)
4950{
4951 struct cp_lkm_pm_net *pm_net;
4952
4953 DEBUG_INFO("%s()", __FUNCTION__);
4954 pm_net = netdev_priv(net_dev);
4955 ether_setup(net_dev);
4956
4957#ifdef KERNEL_2_6_21
4958 net_dev->open = cp_lkm_pm_net_open;
4959 net_dev->hard_start_xmit = cp_lkm_pm_net_xmit;
4960 net_dev->stop = cp_lkm_pm_net_close;
4961#else
4962 net_dev->netdev_ops = &cp_lkm_pm_net_device_ops;
4963 net_dev->needed_headroom = 48;
4964 net_dev->needed_tailroom = 8;
4965#endif
4966
4967 net_dev->ethtool_ops = &cp_lkm_pm_net_ethtool_ops;
4968
4969}
4970
4971static int cp_lkm_pm_net_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name, unsigned char *mac)
4972{
4973 int err;
4974 struct cp_lkm_pm_net *pm_net;
4975 struct net_device *net_dev;
4976 unsigned long flags;
4977#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17,0)
4978 net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, NET_NAME_UNKNOWN, cp_lkm_pm_net_setup);
4979#else
4980 net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, cp_lkm_pm_net_setup);
4981#endif
4982 if (!net_dev) {
4983 DEBUG_INFO("%s() alloc failed: %s", __FUNCTION__, name);
4984 return -ENOMEM;
4985 }
4986
4987 pm_net= netdev_priv(net_dev);
4988
4989 err = cp_lkm_common_init(&pm_net->common);
4990 if (err) {
4991 free_netdev(net_dev);
4992 return err;
4993 }
4994
4995 pm_net->common.net_dev = net_dev;
4996 pm_net->common.unique_id = uid;
4997 pm_net->common.type = type;
4998 pm_net->common.edi = NULL;
4999
5000 //printk("%s(%p) pm-uid: %d, pm_net: %p\n", __FUNCTION__, mgr, uid, pm_net);
5001
5002 switch (type) {
5003 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5004 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5005 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5006 if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN)) {
5007 random_ether_addr(net_dev->dev_addr);
5008 } else {
5009 memcpy (net_dev->dev_addr, mac, ETH_ALEN);
5010 }
5011
5012 /////////////////////////Need to only do if driver says so.
5013 if (type == CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP) {
5014 net_dev->flags |= IFF_NOARP;
5015 }
5016 break;
5017 case CP_LKM_PM_TYPE_IP_DHCP:
5018 case CP_LKM_PM_TYPE_IP_STATIC:
5019 // random addr for DHCP functionality
5020 if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) || !memcmp(mac, "\x00\x30\x44\x00\x00\x00", ETH_ALEN)) {
5021 random_ether_addr(net_dev->dev_addr);
5022 } else {
5023 memcpy (net_dev->dev_addr, mac, ETH_ALEN);
5024 }
5025
5026 net_dev->flags |= IFF_NOARP;
5027 memcpy(pm_net->eth_hdr.h_dest, net_dev->dev_addr, ETH_ALEN);
5028 random_ether_addr(pm_net->eth_hdr.h_source);
5029 break;
5030 default:
5031 DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, type);
5032 cp_lkm_common_deinit(&pm_net->common);
5033 free_netdev(net_dev);
5034 return -EINVAL;
5035 }
5036
5037 DEBUG_INFO("%s register netdev", __FUNCTION__);
5038 err = register_netdev(net_dev);
5039 if (err < 0) {
5040 DEBUG_INFO("%s netdev registration error", __FUNCTION__);
5041 cp_lkm_common_deinit(&pm_net->common);
5042 free_netdev(net_dev);
5043 return err;
5044 }
5045
5046 netif_device_attach(pm_net->common.net_dev);
5047
5048 netif_stop_queue(pm_net->common.net_dev);
5049
5050 pm_net->common.attached = 1;
5051
5052 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5053 list_add(&pm_net->common.list, &mgr->pm_list);
5054 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5055
5056 return 0;
5057}
5058
5059static int cp_lkm_pm_net_detach(struct cp_lkm_pm_ctx *mgr, int uid)
5060{
5061
5062 // find the object in the list
5063 struct list_head *pos;
5064 struct cp_lkm_pm_common *pm = NULL;
5065 unsigned long flags;
5066
5067 DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
5068
5069 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5070 list_for_each(pos, &mgr->pm_list){
5071 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5072 if(pm_tmp->unique_id == uid) {
5073 pm = pm_tmp;
5074 break;
5075 }
5076 }
5077
5078 if (!pm) {
5079 // already detached
5080 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5081 DEBUG_INFO("%s() already detached", __FUNCTION__);
5082 return 0;
5083 }
5084
5085 // remove the object
5086 list_del(&pm->list);
5087 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5088
5089 if (pm->attached) {
5090 DEBUG_INFO("%s() detaching", __FUNCTION__);
5091 netif_device_detach(pm->net_dev);
5092 pm->attached = 0;
5093 }
5094
5095 unregister_netdev(pm->net_dev);
5096
5097 // clean the filter list
5098 cp_lkm_pm_filter_empty_list(pm);
5099
5100 cp_lkm_common_deinit(pm);
5101 free_netdev(pm->net_dev); // this also frees the pm since it was allocated as part of the net_dev
5102
5103 return 0;
5104}
5105
5106static int cp_lkm_pm_net_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
5107{
5108 // find the object in the list
5109 struct list_head *pos;
5110 struct cp_lkm_pm_common *pm = NULL;
5111 unsigned long flags;
5112 //printk("%s(%p) activate: %d\n", __FUNCTION__, mgr, activate);
5113
5114 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5115 list_for_each(pos, &mgr->pm_list){
5116 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5117 if(pm_tmp->unique_id == uid) {
5118 pm = pm_tmp;
5119 break;
5120 }
5121 }
5122
5123 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5124
5125 if (!pm) {
5126 // couldn't find object - already unplugged
5127 DEBUG_INFO("%s() already unplugged", __FUNCTION__);
5128 return 0;
5129 }
5130
5131 if (activate) {
5132 //netif_start_queue(pm->net_dev);
5133 if (pm->edi) {
5134 pm->edi->pm_recv_ctx = pm;
5135 }
5136 netif_wake_queue(pm->net_dev);
5137 } else {
5138 netif_stop_queue(pm->net_dev);
5139 if (pm->edi) {
5140 pm->edi->pm_recv_ctx = NULL;
5141 //printk("pm_recv_ctx null\n");
5142 }
5143
5144 // remove the filters - will be added back in before activate
5145 cp_lkm_pm_filter_empty_list(pm);
5146 }
5147
5148 return 0;
5149}
5150
5151int cp_lkm_pm_net_pause(void *ctx)
5152{
5153 struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
5154 if(!ctx) {
5155 return 0;
5156 }
5157 netif_stop_queue(pm->net_dev);
5158 return 0;
5159
5160}
5161int cp_lkm_pm_net_resume(void *ctx)
5162{
5163 struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
5164 if(!ctx) {
5165 return 0;
5166 }
5167 //netif_start_queue(pm->net_dev);
5168 netif_wake_queue(pm->net_dev);
5169 return 0;
5170}
5171
5172
5173/******************************* kernel module PPP/tty PM functionality **********************************/
5174struct cp_lkm_pm_ppp {
5175 struct cp_lkm_pm_common common;
5176 u8 *no_carrier_ptr;
5177 bool in_frame;
5178
5179 struct tty_struct *tty; // pointer to the tty for this device
5180 int minor;
5181 int open_count;
5182};
5183
5184#define CP_TTY_MINORS 10
5185#define CP_TTY_DEVICE_NAME "ttyCP"
5186
5187#define PPP_MGR_NO_CARRIER "NO CARRIER"
5188#define PPP_FLAG 0x7E
5189
5190static struct cp_lkm_pm_ppp *cp_lkm_pm_ppp_table[CP_TTY_MINORS];
5191static struct tty_driver *cp_lkm_pm_tty_driver = NULL;
5192static struct tty_port cp_lkm_pm_tty_port[CP_TTY_MINORS];
5193
5194static void cp_lkm_pm_ppp_finalize(void *arg)
5195{
5196 struct cp_lkm_pm_ppp *pm_ppp = (struct cp_lkm_pm_ppp *)arg;
5197 tty_unregister_device(cp_lkm_pm_tty_driver, pm_ppp->minor);
5198 cp_lkm_pm_ppp_table[pm_ppp->minor] = NULL;
5199 if (pm_ppp->common.edi) {
5200 pm_ppp->common.edi = NULL;
5201 }
5202 // clean the filter list
5203 cp_lkm_pm_filter_empty_list(&pm_ppp->common);
5204}
5205
5206static int cp_lkm_pm_ppp_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name)
5207{
5208 int minor;
5209 int err;
5210 unsigned long flags;
5211 struct cp_lkm_pm_ppp *pm_ppp;
5212
5213 DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
5214
5215 //printk("%s() uid: %d, type: %d\n", __FUNCTION__, uid, type);
5216
5217 // find an empty minor device slot and register
5218 for (minor = 0; minor < CP_TTY_MINORS && cp_lkm_pm_ppp_table[minor]; minor++);
5219
5220 if (minor == CP_TTY_MINORS) {
5221 DEBUG_WARN("%s(%p) - out of devices", __FUNCTION__, mgr);
5222 return -ENODEV;
5223 }
5224
5225 if (!(pm_ppp = memref_alloc_and_zero(sizeof(struct cp_lkm_pm_ppp), cp_lkm_pm_ppp_finalize))) {
5226 DEBUG_WARN("%s(%p) - no memory", __FUNCTION__, mgr);
5227 return -ENOMEM;
5228 }
5229
5230 err = cp_lkm_common_init(&pm_ppp->common);
5231 if (err) {
5232 return -ENOMEM;
5233 }
5234 pm_ppp->common.type = type;
5235 pm_ppp->common.unique_id = uid;
5236
5237 pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
5238
5239 pm_ppp->minor = minor;
5240
5241 cp_lkm_pm_ppp_table[minor] = pm_ppp;
5242 sprintf(name, "%s%d", CP_TTY_DEVICE_NAME, minor);
5243
5244 //printk("%s(%p) attached\n", __FUNCTION__, &pm_ppp->common);
5245 pm_ppp->common.attached = 1;
5246 pm_ppp->open_count = 0;
5247
5248 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5249 list_add(&pm_ppp->common.list, &mgr->pm_list);
5250 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5251
5252 tty_port_register_device(&cp_lkm_pm_tty_port[minor], cp_lkm_pm_tty_driver, minor, NULL);
5253
5254 return 0;
5255}
5256
5257static int cp_lkm_pm_ppp_detach(struct cp_lkm_pm_ctx *mgr, int uid)
5258{
5259
5260 // find the object in the list
5261 struct list_head *pos;
5262 struct cp_lkm_pm_common *pm = NULL;
5263 struct cp_lkm_pm_ppp *pm_ppp;
5264 unsigned long flags;
5265
5266 DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
5267 //printk("%s() uid: %d\n", __FUNCTION__, uid);
5268
5269 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5270 list_for_each(pos, &mgr->pm_list){
5271 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5272 if(pm_tmp->unique_id == uid) {
5273 pm = pm_tmp;
5274 break;
5275 }
5276 }
5277
5278 if (!pm) {
5279 // already detached
5280 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5281 DEBUG_INFO("%s() already detached", __FUNCTION__);
5282 return 0;
5283 }
5284
5285 // remove the object
5286 list_del(&pm->list);
5287
5288 pm_ppp = (struct cp_lkm_pm_ppp *)pm;
5289
5290 //printk("%s() !attached\n", __FUNCTION__);
5291 pm->attached = 0;
5292
5293 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5294
5295 // clean the filter list
5296 cp_lkm_pm_filter_empty_list(pm);
5297
5298 cp_lkm_common_deinit(pm);
5299
5300 memref_deref(pm_ppp);
5301
5302 return 0;
5303}
5304
5305static int cp_lkm_pm_ppp_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
5306{
5307 // find the object in the list
5308 struct list_head *pos;
5309 struct cp_lkm_pm_common *pm = NULL;
5310 unsigned long flags;
5311
5312 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5313 list_for_each(pos, &mgr->pm_list){
5314 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5315 if(pm_tmp->unique_id == uid) {
5316 pm = pm_tmp;
5317 break;
5318 }
5319 }
5320
5321 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5322
5323 if (!pm) {
5324 // already detached
5325 DEBUG_INFO("%s() already detached", __FUNCTION__);
5326 return 0;
5327 }
5328 //printk("%s(%p) activate: %d, attached: %d\n", __FUNCTION__, pm, activate, pm->attached);
5329
5330 if (activate) {
5331 if (pm->edi) {
5332 pm->edi->pm_recv_ctx = pm;
5333 }
5334 } else {
5335 if (pm->edi) {
5336 pm->edi->pm_recv_ctx = NULL;
5337 //printk("pm_recv_ctx null\n");
5338 }
5339 // clean the filter list
5340 cp_lkm_pm_filter_empty_list(pm);
5341 }
5342
5343 return 0;
5344}
5345
5346
5347static int cp_lkm_pm_tty_open(struct tty_struct * tty, struct file * filp)
5348{
5349 struct cp_lkm_pm_ppp *pm_ppp;
5350 int index;
5351 unsigned long flags;
5352
5353 DEBUG_INFO("%s()", __FUNCTION__);
5354
5355 index = tty->index;
5356
5357 // get the pm_ppp associated with this tty pointer
5358 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5359 pm_ppp = cp_lkm_pm_ppp_table[index];
5360 if (!pm_ppp /*|| tty->driver_data */|| !pm_ppp->common.attached) {
5361 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5362 return -EINVAL;
5363 }
5364
5365 if (pm_ppp->open_count++) {
5366 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5367 return 0;
5368 }
5369
5370 memref_ref(pm_ppp);
5371 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5372
5373 // save our structure within the tty structure
5374 tty->driver_data = pm_ppp;
5375 pm_ppp->tty = tty;
5376
5377 // XXX 3.10 hack
5378 //tty->low_latency = 0;
5379
5380 return 0;
5381}
5382
5383static void cp_lkm_pm_tty_close(struct tty_struct * tty, struct file * filp)
5384{
5385 struct cp_lkm_pm_ppp *pm_ppp;
5386 unsigned long flags;
5387
5388 DEBUG_INFO("%s()", __FUNCTION__);
5389
5390 pm_ppp = tty->driver_data;
5391 if(!pm_ppp) {
5392 return;
5393 }
5394
5395 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5396 if (--pm_ppp->open_count) {
5397 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5398 return;
5399 }
5400 tty->driver_data = NULL;
5401 pm_ppp->tty = NULL;
5402 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5403 memref_deref(pm_ppp);
5404}
5405static bool cp_lkm_pm_ppp_check_match(struct cp_lkm_pm_ppp *pm_ppp, u8 ch)
5406{
5407 if (*(pm_ppp->no_carrier_ptr) == ch) {
5408 // character match - advance to next character
5409 pm_ppp->no_carrier_ptr++;
5410 if (! *(pm_ppp->no_carrier_ptr)) {
5411 // end of no carrier string - found oob no carrier
5412 return true;
5413 }
5414 return false;
5415 }
5416 // characters don't match
5417 if (pm_ppp->no_carrier_ptr != (u8 *)PPP_MGR_NO_CARRIER) {
5418 // characters don't match - start over
5419 pm_ppp->no_carrier_ptr = (u8 *)PPP_MGR_NO_CARRIER;
5420 // check not matching character against first character of no carrier - 1 level of recursion
5421 return cp_lkm_pm_ppp_check_match(pm_ppp, ch);
5422 }
5423
5424 return false;
5425}
5426
5427static bool cp_lkm_pm_ppp_is_no_carrier(struct cp_lkm_pm_ppp *pm_ppp, struct sk_buff *skb)
5428{
5429 // search thru skb for data between frame markers for NO CARRIER
5430 bool no_carrier = false;
5431 unsigned int len = skb->len;
5432 u8 *pos = skb->data;
5433
5434 DEBUG_TRACE("%s()", __FUNCTION__);
5435
5436 while (len--) {
5437 if (PPP_FLAG == (*pos)) {
5438 pm_ppp->in_frame = !pm_ppp->in_frame;
5439 } else if (!pm_ppp->in_frame) {
5440 // look for match
5441 no_carrier = cp_lkm_pm_ppp_check_match(pm_ppp, *pos);
5442 if (no_carrier) {
5443 DEBUG_INFO("%s() found no carrier", __FUNCTION__);
5444 return true;
5445 }
5446 } else {
5447 pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
5448 }
5449
5450 pos++;
5451 }
5452
5453 return false;
5454}
5455
5456static void cp_lkm_pm_ppp_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
5457{
5458 *hdr_size = 0;
5459 *hdr_offset = 0;
5460}
5461
5462// called in soft interrupt context
5463static int cp_lkm_pm_ppp_recv(void *ctx, struct sk_buff *skb)
5464{
5465#ifdef KERNEL_2_6_21
5466 int size;
5467#endif
5468 struct cp_lkm_pm_ppp *pm_ppp;
5469 bool oob_no_carrier;
5470
5471 if(NULL == ctx || !skb->len) {
5472 DEBUG_INFO("%s() - null ctx - dropped", __FUNCTION__);
5473 goto done;
5474 }
5475
5476 pm_ppp = (struct cp_lkm_pm_ppp *)ctx;
5477
5478 if (!pm_ppp) {
5479 DEBUG_INFO("%s() - NULL pm_ppp - dropped", __FUNCTION__);
5480 goto done;
5481 }
5482
5483 // check for OOB NO CARRIER - signal up through file descriptor
5484 oob_no_carrier = cp_lkm_pm_ppp_is_no_carrier(pm_ppp, skb);
5485 if (oob_no_carrier) {
5486 struct cp_lkm_msg_hdr hdr;
5487
5488 DEBUG_INFO("%s() - posting no carrier", __FUNCTION__);
5489 memset(&hdr,0,sizeof(hdr));
5490 hdr.instance_id = pm_ppp->common.unique_id;
5491 hdr.cmd = CP_LKM_PM_LINK_DOWN;
5492 hdr.status = CP_LKM_STATUS_OK;
5493 hdr.len = 0;
5494
5495 LOG("Received NO CARRIER\n");
5496 DEBUG_INFO("%s() - posting link down", __FUNCTION__);
5497 cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
5498
5499 goto done;
5500 }
5501
5502 if (!pm_ppp->tty || !pm_ppp->tty->driver_data) {
5503 DEBUG_INFO("%s() - not setup - dropped", __FUNCTION__);
5504 goto done;
5505 }
5506
5507#ifdef KERNEL_2_6_21
5508 size = tty_buffer_request_room(pm_ppp->tty, skb->len);
5509 if(size < skb->len) {
5510 // dropped data - or we need to queue for later
5511 DEBUG_WARN("%s() - dropping network data", __FUNCTION__);
5512 goto done;
5513 }
5514#endif
5515
5516 tty_insert_flip_string(pm_ppp->tty->port, skb->data, skb->len);
5517 tty_flip_buffer_push(pm_ppp->tty->port);
5518
5519done:
5520 dev_kfree_skb_any(skb);
5521 return 0;
5522}
5523
5524// this can be called from interrupt thread or normal kernel thread
5525static int cp_lkm_pm_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
5526{
5527 struct cp_lkm_pm_ppp *pm_ppp;
5528 struct sk_buff *skb;
5529 int link_res;
5530 int retval = count;
5531
5532 if (!count) {
5533 //printk("%s() !count \n", __FUNCTION__);
5534 return 0;
5535 }
5536
5537 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5538
5539 if (!pm_ppp) {
5540 //printk("%s() !pm_ppp \n", __FUNCTION__);
5541 return -EINVAL;
5542 }
5543
5544 //printk("%s(%p) id:%d, attached: %d\n", __FUNCTION__, &pm_ppp->common, pm_ppp->common.unique_id, pm_ppp->common.attached);
5545
5546 //see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
5547 link_res = cp_lkm_common_inc_link_lock(&pm_ppp->common);
5548 if(link_res < 0) {
5549 //printk("%s() !link \n", __FUNCTION__);
5550 return 0;
5551 }
5552
5553 /* Drop packet if interface is not attached */
5554 if (!pm_ppp->common.attached){
5555 retval = 0;
5556 //printk("%s() !attached: %d \n", __FUNCTION__, pm_ppp->common.attached);
5557 goto drop;
5558 }
5559
5560 if (!(pm_ppp->common.edi) || !(pm_ppp->common.edi->usb_send) || !(pm_ppp->common.edi->usb_send_ctx)) {
5561 retval = 0;
5562 //printk("%s() !edi \n", __FUNCTION__);
5563 goto drop;
5564 }
5565
5566 //benk check for enabled filter - send in buffer pointer to ip header
5567
5568 // alloc skb to send
5569 if ((skb = alloc_skb (count, GFP_ATOMIC)) == NULL) {
5570 retval = -ENOMEM;
5571 goto pm_tty_write_done;
5572 }
5573
5574 memcpy(skb->data, buf, count);
5575 skb->len = count;
5576 skb_set_tail_pointer(skb, skb->len);
5577
5578 // send data to USB module
5579 pm_ppp->common.edi->usb_send(pm_ppp->common.edi->usb_send_ctx, skb);
5580 retval = count;
5581 goto pm_tty_write_done;
5582
5583drop:
5584pm_tty_write_done:
5585 cp_lkm_common_dec_link_lock(&pm_ppp->common);
5586 //printk("%s() done\n", __FUNCTION__);
5587
5588 return retval;
5589}
5590
5591static int cp_lkm_pm_tty_write_room(struct tty_struct *tty)
5592{
5593 struct cp_lkm_pm_ppp *pm_ppp;
5594
5595 DEBUG_INFO("%s()", __FUNCTION__);
5596
5597 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5598
5599 if (!pm_ppp) {
5600 return -EINVAL;
5601 }
5602
5603 return 2048;
5604}
5605
5606static int cp_lkm_pm_tty_chars_in_buffer(struct tty_struct *tty)
5607{
5608 struct cp_lkm_pm_ppp *pm_ppp;
5609
5610 DEBUG_INFO("%s()", __FUNCTION__);
5611
5612 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5613
5614 if (!pm_ppp) {
5615 return -EINVAL;
5616 }
5617
5618 return 0;
5619}
5620
5621static void cp_lkm_pm_tty_set_termios(struct tty_struct *tty, struct ktermios * old)
5622{
5623 DEBUG_INFO("%s()", __FUNCTION__);
5624
5625}
5626
5627#ifdef KERNEL_2_6_21
5628static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg)
5629#else
5630static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
5631#endif
5632{
5633 struct cp_lkm_pm_ppp *pm_ppp;
5634
5635 DEBUG_TRACE("%s(%x)", __FUNCTION__, cmd);
5636
5637 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5638
5639 if (!pm_ppp) {
5640 return -EINVAL;
5641 }
5642
5643 return -ENOIOCTLCMD;
5644}
5645
5646static struct tty_operations cp_lkm_pm_tty_ops = {
5647.open = cp_lkm_pm_tty_open,
5648.close = cp_lkm_pm_tty_close,
5649.write = cp_lkm_pm_tty_write,
5650.write_room = cp_lkm_pm_tty_write_room,
5651.chars_in_buffer = cp_lkm_pm_tty_chars_in_buffer,
5652.set_termios = cp_lkm_pm_tty_set_termios,
5653.ioctl = cp_lkm_pm_tty_ioctl
5654
5655/*
5656.throttle = acm_tty_throttle,
5657.unthrottle = acm_tty_unthrottle,
5658*/
5659};
5660
5661static int cp_lkm_pm_tty_init(void)
5662{
5663 int retval;
5664 int i;
5665
5666 for(i = 0; i < CP_TTY_MINORS; i++) {
5667 tty_port_init(&cp_lkm_pm_tty_port[i]);
5668 }
5669
5670 cp_lkm_pm_tty_driver = alloc_tty_driver(CP_TTY_MINORS);
5671 if (!cp_lkm_pm_tty_driver) {
5672 return -ENOMEM;
5673 }
5674
5675 // initialize the tty driver
5676 cp_lkm_pm_tty_driver->owner = THIS_MODULE;
5677 cp_lkm_pm_tty_driver->driver_name = "cptty";
5678 cp_lkm_pm_tty_driver->name = CP_TTY_DEVICE_NAME;
5679 cp_lkm_pm_tty_driver->major = 0; // dynamically assign major number
5680 cp_lkm_pm_tty_driver->minor_start = 0,
5681 cp_lkm_pm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
5682 cp_lkm_pm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
5683 cp_lkm_pm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
5684 cp_lkm_pm_tty_driver->init_termios = tty_std_termios;
5685 tty_set_operations(cp_lkm_pm_tty_driver, &cp_lkm_pm_tty_ops);
5686
5687 retval = tty_register_driver(cp_lkm_pm_tty_driver);
5688 if (retval) {
5689 DEBUG_ERROR("%s() failed to register cp tty driver", __FUNCTION__);
5690 put_tty_driver(cp_lkm_pm_tty_driver);
5691 for(i = 0; i < CP_TTY_MINORS; i++) {
5692 tty_port_destroy(&cp_lkm_pm_tty_port[i]);
5693 }
5694 }
5695 return retval;
5696
5697}
5698
5699static void cp_lkm_pm_tty_cleanup(void)
5700{
5701 int i;
5702 if (cp_lkm_pm_tty_driver) {
5703 tty_unregister_driver(cp_lkm_pm_tty_driver);
5704 put_tty_driver(cp_lkm_pm_tty_driver);
5705 for(i = 0; i < CP_TTY_MINORS; i++) {
5706 tty_port_destroy(&cp_lkm_pm_tty_port[i]);
5707 }
5708 cp_lkm_pm_tty_driver = NULL;
5709 }
5710}
5711
5712/******************************* kernel module PM mgr functionality **********************************/
5713
5714
5715static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx);
5716static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx);
5717static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
5718static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
5719
5720
5721static int cp_lkm_pm_init(void)
5722{
5723 DEBUG_INFO("%s()", __FUNCTION__);
5724
5725 memset(&cp_lkm_pm_mgr, 0x00, sizeof(struct cp_lkm_pm_ctx));
5726 cp_lkm_pm_mgr.common.open = cp_lkm_pm_open;
5727 cp_lkm_pm_mgr.common.close = cp_lkm_pm_close;
5728 cp_lkm_pm_mgr.common.handle_msg = cp_lkm_pm_handle_msg;
5729 cp_lkm_pm_mgr.common.handle_ioctl = cp_lkm_pm_handle_ioctl;
5730 INIT_LIST_HEAD(&cp_lkm_pm_mgr.pm_list);
5731 spin_lock_init(&cp_lkm_pm_mgr.pm_list_lock);
5732
5733 cp_lkm_common_ctx_init(&cp_lkm_pm_mgr.common);
5734
5735 return 0;
5736}
5737
5738static int cp_lkm_pm_cleanup(void)
5739{
5740 struct cp_lkm_pm_common *pmi;
5741 struct list_head *entry, *tmp;
5742 unsigned long flags;
5743
5744 DEBUG_INFO("%s()", __FUNCTION__);
5745
5746 // clean up msg list
5747 cp_lkm_cleanup_msg_list(&cp_lkm_pm_mgr.common);
5748
5749 // cleanup any PM in list
5750 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5751
5752 list_for_each_safe(entry, tmp, &cp_lkm_pm_mgr.pm_list) {
5753 pmi = list_entry(entry, struct cp_lkm_pm_common, list);
5754 if (pmi->edi) {
5755 pmi->edi->pm_recv_ctx = NULL;
5756 //printk("pm_recv_ctx null\n");
5757 pmi->edi->pm_stats64_ctx = NULL;
5758 pmi->edi = NULL;
5759 }
5760 list_del(&pmi->list);
5761 // clean the filter list
5762 cp_lkm_pm_filter_empty_list(pmi);
5763
5764 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5765 if (pmi->net_dev) {
5766 // network device
5767 cp_lkm_common_deinit(pmi);
5768 unregister_netdev(pmi->net_dev);
5769 free_netdev(pmi->net_dev); // this also frees the pmi since it was allocated as part of the net_dev
5770 } else {
5771 // tty device
5772 memref_deref(pmi);
5773 }
5774
5775 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5776 }
5777 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5778
5779 return 0;
5780}
5781
5782static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx)
5783{
5784// struct cp_lkm_pm_ctx *pm_mgr;
5785
5786 DEBUG_INFO("%s(%p)", __FUNCTION__, ctx);
5787
5788// pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5789
5790 return 0;
5791}
5792
5793static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx)
5794{
5795 //struct cp_lkm_pm_ctx *pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5796 //struct cp_lkm_pm_common *pm_tmp = NULL;
5797 //struct list_head *entry, *tmp;
5798 //unsigned long flags;
5799
5800 LOG("%s() called unexpectedly.", __FUNCTION__);
5801
5802 //NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
5803 // things are in a bad state and the router will be rebooting. We decided not
5804 // to clean things up here because close code on usb side got into an infinite loop
5805 // and prevented the router from rebooting. Revisit if close ever becomes a normal event.
5806
5807 /*
5808 spin_lock_irqsave(&pm_mgr->pm_list_lock, flags);
5809
5810 list_for_each_safe(entry, tmp, &pm_mgr->pm_list) {
5811 pm_tmp = list_entry(entry, struct cp_lkm_pm_common, list);
5812 spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
5813
5814 // call detach to clean up network interface
5815 if (CP_LKM_PM_TYPE_PPP_CLIENT == pm_tmp->type || CP_LKM_PM_TYPE_PPP_SERVER == pm_tmp->type) {
5816 cp_lkm_pm_ppp_detach(pm_mgr, pm_tmp->unique_id);
5817 } else {
5818 cp_lkm_pm_net_detach(pm_mgr, pm_tmp->unique_id);
5819 }
5820 }
5821
5822 spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
5823
5824 cp_lkm_cleanup_msg_list(ctx);
5825 */
5826 return 0;
5827}
5828
5829static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
5830{
5831 struct cp_lkm_pm_ctx *pm_mgr;
5832
5833 //printk("%s(%p)\n", __FUNCTION__, ctx);
5834
5835 pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5836
5837
5838 // how to write back response with common function?
5839 if (skb) {
5840 kfree(skb);
5841 }
5842
5843 return 0;
5844}
5845
5846static int cp_lkm_pm_add_filter(struct cp_lkm_pm_ctx *mgr, int uid, struct cp_lkm_pm_filter *filter)
5847{
5848 // find the object in the list
5849 struct list_head *pos;
5850 struct cp_lkm_pm_common *pm = NULL;
5851 unsigned long flags;
5852 struct cp_lkm_pm_filter *new_filter;
5853
5854 DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
5855
5856 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5857 list_for_each(pos, &mgr->pm_list){
5858 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5859 if(pm_tmp->unique_id == uid) {
5860 pm = pm_tmp;
5861 break;
5862 }
5863 }
5864
5865 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5866
5867 if (!pm) {
5868 DEBUG_WARN("%s() pm not attached", __FUNCTION__);
5869 return -ENODEV;
5870 }
5871
5872 new_filter = kmalloc(sizeof(struct cp_lkm_pm_filter), GFP_ATOMIC);
5873 if (!new_filter) {
5874 DEBUG_WARN("%s() - failed to alloc filter\n", __FUNCTION__);
5875 return -1;
5876 }
5877
5878 memcpy(new_filter, filter, sizeof(struct cp_lkm_pm_filter));
5879 INIT_LIST_HEAD(&new_filter->list);
5880
5881 list_add_tail(&new_filter->list, &pm->filter_list);
5882
5883 return 0;
5884}
5885
5886static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
5887{
5888 struct cp_lkm_pm_ctx *pm_mgr;
5889 int result = 0;
5890 struct cp_lkm_pm_attach_ioctl *attach_params;
5891 struct cp_lkm_pm_detach_ioctl *detach_params;
5892 struct cp_lkm_pm_activate_deactivate_ioctl *activate_params;
5893 struct cp_lkm_pm_add_filter_ioctl *filter_params;
5894
5895 char name[CP_LKM_MAX_IF_NAME];
5896 unsigned long not_copied;
5897
5898 //printk("%s(%p) cmd:%d\n", __FUNCTION__, ctx, _IOC_NR(cmd));
5899
5900 pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5901
5902 switch (cmd) {
5903 case CP_LKM_IOCTL_PM_ATTACH:
5904 attach_params = (struct cp_lkm_pm_attach_ioctl *)k_argp;
5905 not_copied = copy_from_user(name, attach_params->name, CP_LKM_MAX_IF_NAME);
5906 if (not_copied) {
5907 return -ENOMEM;
5908 }
5909 DEBUG_INFO("%s(%s) attach", __FUNCTION__, name);
5910 switch(attach_params->type) {
5911 case CP_LKM_PM_TYPE_PPP_CLIENT:
5912 case CP_LKM_PM_TYPE_PPP_SERVER:
5913 result = cp_lkm_pm_ppp_attach(pm_mgr, attach_params->type, attach_params->uid, name);
5914 if (!result) {
5915 not_copied = copy_to_user(attach_params->name, name, CP_LKM_MAX_IF_NAME);
5916 if (not_copied) {
5917 return -ENOMEM;
5918 }
5919 }
5920 break;
5921 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5922 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5923 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5924 case CP_LKM_PM_TYPE_IP_STATIC:
5925 case CP_LKM_PM_TYPE_IP_DHCP:
5926 result = cp_lkm_pm_net_attach(pm_mgr, attach_params->type, attach_params->uid, name, attach_params->mac);
5927 break;
5928 default:
5929 result = -ENOTSUPP;
5930 break;
5931 }
5932 break;
5933 case CP_LKM_IOCTL_PM_DETACH:
5934 detach_params = (struct cp_lkm_pm_detach_ioctl *)k_argp;
5935 DEBUG_INFO("%s() detach uid:%d", __FUNCTION__, detach_params->uid);
5936 switch(detach_params->type) {
5937 case CP_LKM_PM_TYPE_PPP_CLIENT:
5938 case CP_LKM_PM_TYPE_PPP_SERVER:
5939 result = cp_lkm_pm_ppp_detach(pm_mgr, detach_params->uid);
5940 break;
5941 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5942 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5943 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5944 case CP_LKM_PM_TYPE_IP_STATIC:
5945 case CP_LKM_PM_TYPE_IP_DHCP:
5946 result = cp_lkm_pm_net_detach(pm_mgr, detach_params->uid);
5947 break;
5948 default:
5949 result = -ENOTSUPP;
5950 break;
5951 }
5952 break;
5953 case CP_LKM_IOCTL_PM_ACTIVATE:
5954 activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
5955 switch(activate_params->type) {
5956 case CP_LKM_PM_TYPE_PPP_CLIENT:
5957 case CP_LKM_PM_TYPE_PPP_SERVER:
5958 result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, true);
5959 break;
5960 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5961 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5962 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5963 case CP_LKM_PM_TYPE_IP_STATIC:
5964 case CP_LKM_PM_TYPE_IP_DHCP:
5965 result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, true);
5966 break;
5967 default:
5968 result = -ENOTSUPP;
5969 break;
5970 }
5971 break;
5972 case CP_LKM_IOCTL_PM_DEACTIVATE:
5973 activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
5974 switch(activate_params->type) {
5975 case CP_LKM_PM_TYPE_PPP_CLIENT:
5976 case CP_LKM_PM_TYPE_PPP_SERVER:
5977 result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, false);
5978 break;
5979 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5980 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5981 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5982 case CP_LKM_PM_TYPE_IP_STATIC:
5983 case CP_LKM_PM_TYPE_IP_DHCP:
5984 result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, false);
5985 break;
5986 default:
5987 result = -ENOTSUPP;
5988 break;
5989 }
5990 break;
5991 case CP_LKM_IOCTL_PM_ADD_FILTER:
5992 filter_params = (struct cp_lkm_pm_add_filter_ioctl *)k_argp;
5993 result = cp_lkm_pm_add_filter(pm_mgr, filter_params->uid, &filter_params->filter);
5994 break;
5995 default:
5996 break;
5997 }
5998
5999 return result;
6000}
6001
6002static bool cp_lkm_pm_usb_do_link_lock(void* ctx1, void* ctx2)
6003{
6004 struct cp_lkm_pm_common *pm = (struct cp_lkm_pm_common*)ctx1;
6005 bool done = false;
6006 unsigned long flags;
6007 // grab the lock and set the link_count. The link_count is used to keep send and poll from
6008 // being called over to the USB layer while we are mucking with the send and poll pointers
6009 spin_lock_irqsave(&pm->pm_link_lock, flags);
6010 if(pm->pm_link_count <= 0) {
6011 pm->pm_link_count = -1;
6012 done = true;
6013 }
6014 spin_unlock_irqrestore(&pm->pm_link_lock, flags);
6015
6016 return done;
6017}
6018
6019// This function changes the shared edi pointers.
6020// !!!It is the only function in the pm that is permitted to change edi function pointers!!!
6021// Other functions can change the ctxt pointers
6022static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link)
6023{
6024 struct list_head *pos;
6025 struct cp_lkm_pm_common *pm = NULL;
6026 unsigned long flags;
6027 struct cp_lkm_edi *tmp_edi;
6028
6029 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
6030 list_for_each(pos, &cp_lkm_pm_mgr.pm_list){
6031 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
6032 if(pm_tmp->unique_id == pm_unique_id) {
6033 pm = pm_tmp;
6034 break;
6035 }
6036 }
6037 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
6038
6039 if (!pm) {
6040 // couldn't find object
6041 //printk("%s() unable to find protocol manager with id:%d\n", __FUNCTION__, pm_unique_id);
6042 return -EINVAL;
6043 }
6044
6045 //printk("%s() pm_net: %p\n", __FUNCTION__, pm);
6046
6047 // grab the lock and set the link_count. The link_count is used to keep send and poll from
6048 // being called over to the USB layer while we are mucking with the send and poll pointers
6049 cp_lkm_do_or_die(pm, NULL, cp_lkm_pm_usb_do_link_lock, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to grab cp pm lock");
6050
6051 //printk("%s() pm: %p, attached: %d, pm_type: %d\n", __FUNCTION__, pm, pm->attached,pm->type);
6052 tmp_edi = pm->edi;
6053 pm->edi = NULL;
6054 if (link) {
6055 if (tmp_edi) {
6056 // already linked - unlink from previous edi
6057 // just a precaution, should never happen
6058 tmp_edi->pm_recv = NULL;
6059 tmp_edi->pm_recv_ctx = NULL;
6060 tmp_edi->pm_get_hdr_size = NULL;
6061
6062 //printk("pm_recv_ctx null\n");
6063 tmp_edi->pm_send_pause = NULL;
6064 tmp_edi->pm_send_resume = NULL;
6065
6066 tmp_edi->pm_stats64_ctx = NULL;
6067
6068 //pm->edi = NULL;
6069 }
6070
6071 tmp_edi = edi;
6072 tmp_edi->pm_recv_ctx = pm;
6073
6074 switch(pm->type) {
6075 case CP_LKM_PM_TYPE_PPP_CLIENT:
6076 case CP_LKM_PM_TYPE_PPP_SERVER:
6077 tmp_edi->pm_recv = cp_lkm_pm_ppp_recv;
6078 tmp_edi->pm_get_hdr_size = cp_lkm_pm_ppp_get_hdr_size;
6079 tmp_edi->pm_stats64_ctx = NULL;
6080 break;
6081 default:
6082 tmp_edi->pm_recv = cp_lkm_pm_net_recv;
6083 tmp_edi->pm_get_hdr_size = cp_lkm_pm_net_get_hdr_size;
6084 tmp_edi->pm_send_pause = cp_lkm_pm_net_pause;
6085 tmp_edi->pm_send_resume = cp_lkm_pm_net_resume;
6086 tmp_edi->pm_stats64_ctx = pm;
6087 break;
6088 }
6089
6090 pm->edi = tmp_edi;
6091
6092 // release the link_count on link so things can start flowing.
6093 // don't release it on unlink since we don't want things to flow when unlinked
6094 spin_lock_irqsave(&pm->pm_link_lock, flags);
6095 pm->pm_link_count = 0;
6096 spin_unlock_irqrestore(&pm->pm_link_lock, flags);
6097
6098 } else {
6099 if (tmp_edi) {
6100 tmp_edi->pm_recv = NULL;
6101 tmp_edi->pm_recv_ctx = NULL;
6102 tmp_edi->pm_get_hdr_size = NULL;
6103
6104 //printk("pm_recv_ctx null\n");
6105 tmp_edi->pm_send_pause = NULL;
6106 tmp_edi->pm_send_resume = NULL;
6107 tmp_edi->pm_stats64_ctx = NULL;
6108
6109 //pm->edi = NULL;
6110 }
6111 }
6112
6113 return 0;
6114
6115}
6116
6117/******************** common user/kernel communication functions **************/
6118
6119static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common)
6120{
6121 DEBUG_WARN("%s()", __FUNCTION__);
6122
6123 INIT_LIST_HEAD(&common->read_list);
6124 spin_lock_init(&common->read_list_lock);
6125
6126 init_waitqueue_head(&common->inq);
6127 common->open_cnt = 0;
6128 common->reading_data = false;
6129 common->write_skb = NULL;
6130}
6131
6132static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common)
6133{
6134 struct cp_lkm_read_msg *msg;
6135 unsigned long flags;
6136 struct list_head *entry, *tmp;
6137
6138 spin_lock_irqsave(&common->read_list_lock, flags);
6139
6140 list_for_each_safe(entry, tmp, &common->read_list) {
6141 msg = list_entry(entry, struct cp_lkm_read_msg, list);
6142 list_del(&msg->list);
6143 dev_kfree_skb_any(msg->skb);
6144 kfree(msg);
6145 }
6146 spin_unlock_irqrestore(&common->read_list_lock, flags);
6147}
6148
6149// this may be called from soft interrupt context or normal kernel thread context
6150static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb)
6151{
6152
6153 struct cp_lkm_read_msg *msg;
6154 unsigned long flags;
6155
6156 msg = kmalloc(sizeof(struct cp_lkm_read_msg), GFP_ATOMIC);
6157 if (!msg) {
6158 if (skb) {
6159 dev_kfree_skb_any(skb);
6160 }
6161 return -ENOMEM;
6162 }
6163
6164 msg->skb = skb;
6165 memcpy(&msg->hdr, hdr, sizeof(struct cp_lkm_msg_hdr));
6166
6167 spin_lock_irqsave(&mgr->read_list_lock, flags);
6168 list_add_tail(&msg->list, &mgr->read_list);
6169 spin_unlock_irqrestore(&mgr->read_list_lock, flags);
6170
6171 mgr->q_waiting = false;
6172
6173 // signal poll
6174 wake_up_interruptible(&mgr->inq);
6175
6176 return 0;
6177}
6178
6179int cp_lkm_open(struct inode *inode, struct file *filp)
6180{
6181
6182 int result = 0;
6183 struct cp_lkm_common_ctx *common;
6184
6185 DEBUG_TRACE("%s()", __FUNCTION__);
6186
6187 try_module_get(THIS_MODULE);
6188
6189 // set private data
6190 if (iminor(inode) == CP_LKM_USB_MGR_MINOR) {
6191 filp->private_data = &cp_lkm_usb_mgr;
6192 common = &cp_lkm_usb_mgr.common;
6193 DEBUG_INFO("%s() open usb manager", __FUNCTION__);
6194 } else if (iminor(inode) == CP_LKM_PM_MGR_MINOR) {
6195 filp->private_data = &cp_lkm_pm_mgr;
6196 common = &cp_lkm_pm_mgr.common;
6197 DEBUG_INFO("%s() open pm manager", __FUNCTION__);
6198 } else {
6199 return -ENOENT;
6200 }
6201
6202 if (common->open_cnt) {
6203 return -EBUSY;
6204 }
6205
6206 common->open_cnt++;
6207
6208 if (common->open) {
6209 result = common->open(common);
6210 }
6211
6212 return result;
6213}
6214
6215int cp_lkm_release(struct inode *inode, struct file *filp)
6216{
6217
6218 int result = 0;
6219 struct cp_lkm_common_ctx *common;
6220 common = (struct cp_lkm_common_ctx *)filp->private_data;
6221
6222 DEBUG_TRACE("%s() release", __FUNCTION__);
6223
6224 if (0 == common->open_cnt) {
6225 return 0;
6226 }
6227
6228 if (common->close) {
6229 result = common->close(common);
6230 }
6231
6232 module_put(THIS_MODULE);
6233
6234 common->open_cnt--;
6235
6236 return result;
6237}
6238
6239// first read is the header
6240// second read is the data. If no data, then no second read
6241// if error in either stage, negative value is returned and next read will be for header
6242// messages are not removed until successfully read header and data (if any)
6243ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
6244{
6245
6246 struct cp_lkm_common_ctx *common;
6247 ssize_t result;
6248 struct cp_lkm_read_msg *msg;
6249 unsigned long flags;
6250 unsigned long not_copied;
6251
6252// DEBUG_INFO("%s() reading %d bytes", __FUNCTION__, count);
6253 common = (struct cp_lkm_common_ctx *)filp->private_data;
6254
6255 spin_lock_irqsave(&common->read_list_lock, flags);
6256 if (list_empty(&common->read_list)) {
6257 spin_unlock_irqrestore(&common->read_list_lock, flags);
6258 return -EAGAIN;
6259 }
6260 msg = list_first_entry(&common->read_list, struct cp_lkm_read_msg, list);
6261 spin_unlock_irqrestore(&common->read_list_lock, flags);
6262
6263 if (!common->reading_data) { // header mode
6264 // read header
6265 if (sizeof(struct cp_lkm_msg_hdr) != count) {
6266 return -EINVAL;
6267 }
6268
6269 not_copied = copy_to_user(buf, &msg->hdr, sizeof(struct cp_lkm_msg_hdr));
6270 if (not_copied) {
6271 return -ENOMEM;
6272 }
6273
6274 if (!msg->hdr.len) {
6275 result = count;
6276 goto read_free;
6277 }
6278
6279 // switch to data mode
6280 common->reading_data = !common->reading_data;
6281 return count;
6282 }
6283
6284 // switch to header mode
6285 common->reading_data = !common->reading_data;
6286
6287 // data mode - handle the data transfer
6288 if (msg->hdr.len != count) {
6289 return -EINVAL;
6290 }
6291
6292 not_copied = copy_to_user(buf, msg->skb->data, msg->hdr.len);
6293
6294 if (not_copied) {
6295 return -ENOMEM;
6296 }
6297
6298 result = count;
6299
6300read_free:
6301 spin_lock_irqsave(&common->read_list_lock, flags);
6302 list_del(&msg->list);
6303 spin_unlock_irqrestore(&common->read_list_lock, flags);
6304
6305 if (msg->skb) {
6306 dev_kfree_skb_any(msg->skb);
6307 }
6308 kfree(msg);
6309
6310 return result;
6311}
6312// the user must write the header first
6313// then the user must write the data equivalent to the hdr.len
6314// on error, a negative value is returned and the entire message is lost
6315// on error, the next write must be header
6316ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
6317{
6318 struct cp_lkm_common_ctx *common;
6319 unsigned long not_copied;
6320 int result;
6321 struct sk_buff *skb = NULL;
6322 struct cp_lkm_msg_hdr hdr;
6323 struct cp_lkm_msg_hdr *hdrp;
6324
6325// DEBUG_INFO("%s() writing %d bytes", __FUNCTION__, count);
6326
6327 common = (struct cp_lkm_common_ctx *)filp->private_data;
6328
6329 if (!common->write_skb) {
6330 // handle the header
6331 if (count != sizeof(struct cp_lkm_msg_hdr)) {
6332 return -EINVAL;
6333 }
6334 not_copied = copy_from_user(&hdr, buf, count);
6335 if (not_copied) {
6336 return -ENOMEM;
6337 }
6338
6339 if ((skb = alloc_skb (count + hdr.len, GFP_KERNEL)) == NULL) {
6340 return -ENOMEM;
6341 }
6342
6343 memcpy(skb->data, &hdr, count);
6344
6345 // setup skb pointers - skb->data points to message data with header immediately before skb->data
6346 skb->len = hdr.len;
6347 skb->data += sizeof(struct cp_lkm_msg_hdr);
6348 skb_set_tail_pointer(skb, hdr.len);
6349
6350 if (!hdr.len) {
6351 goto send_msg;
6352 }
6353
6354 // save until we get the data
6355 common->write_skb = skb;
6356
6357 return count;
6358 }
6359
6360 // handle the data
6361 skb = common->write_skb;
6362 common->write_skb = NULL;
6363
6364 hdrp = (struct cp_lkm_msg_hdr *)(skb->data) - 1;
6365 if (count != hdrp->len) {
6366 dev_kfree_skb_any(skb);
6367 return -EINVAL;
6368 }
6369
6370 not_copied = copy_from_user(skb->data, buf, count);
6371 if (not_copied) {
6372 dev_kfree_skb_any(skb);
6373 return -ENOMEM;
6374 }
6375
6376
6377send_msg:
6378 if (common->handle_msg) {
6379 result = common->handle_msg(common, (struct cp_lkm_msg_hdr *)(skb->data) - 1, skb);
6380 if (result) {
6381 return result;
6382 }
6383 }
6384
6385 return count;
6386}
6387
6388unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *wait)
6389{
6390 unsigned long flags;
6391 unsigned int mask = 0;
6392 struct cp_lkm_common_ctx *common;
6393
6394 common = (struct cp_lkm_common_ctx *)filp->private_data;
6395
6396 poll_wait(filp, &common->inq, wait);
6397
6398 spin_lock_irqsave(&common->read_list_lock, flags);
6399
6400 if (!list_empty(&common->read_list)) {
6401 mask = POLLIN | POLLRDNORM; // readable
6402 }
6403
6404 spin_unlock_irqrestore(&common->read_list_lock, flags);
6405
6406 return mask;
6407}
6408
6409#ifdef KERNEL_2_6_21
6410int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
6411#else
6412long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg)
6413#endif
6414{
6415 int result = -EINVAL;
6416
6417 void __user *uargp = (void __user *)arg;
6418 void *kargp = NULL;
6419 struct cp_lkm_common_ctx *common = (struct cp_lkm_common_ctx *)filp->private_data;
6420
6421 DEBUG_TRACE("%s(%p) - cmd:%d", __FUNCTION__, filp, _IOC_NR(cmd));
6422
6423 switch(cmd) {
6424 case CP_LKM_IOCTL_SET_LOG_LEVEL:
6425 cp_lkm_log_level = (uintptr_t)uargp;
6426 LOG("Setting debug log level:%d", cp_lkm_log_level);
6427 cp_lkm_wrapper_set_log_level(cp_lkm_log_level);
6428 return 0;
6429 default:
6430 if (_IOC_SIZE(cmd)) {
6431 kargp = kmalloc(_IOC_SIZE(cmd), GFP_ATOMIC);
6432 if (!kargp) {
6433 result = -ENOMEM;
6434 goto done;
6435 }
6436 if (copy_from_user(kargp, uargp, _IOC_SIZE(cmd))) {
6437 result = -EFAULT;
6438 goto done;
6439 }
6440 }
6441 }
6442
6443 if (common->handle_ioctl) {
6444 result = common->handle_ioctl(common, cmd, kargp);
6445 }
6446
6447
6448 if (_IOC_DIR(cmd) & _IOC_READ) {
6449 if (copy_to_user(uargp, kargp, _IOC_SIZE(cmd))) {
6450 result = -EFAULT;
6451 goto done;
6452 }
6453 }
6454
6455done:
6456 if (kargp) {
6457 kfree(kargp);
6458 }
6459
6460 return result;
6461}
6462
6463
6464static int __init cp_lkm_start(void)
6465{
6466 int err;
6467
6468 //printk("%s() Initializing module...\n", __FUNCTION__);
6469
6470 // initialize global structures
6471
6472 err = cp_lkm_pm_tty_init();
6473 if (err) {
6474 return err;
6475 }
6476
6477 cp_lkm_usb_init();
6478
6479 cp_lkm_pm_init();
6480
6481 // Allocating memory for the buffer
6482 if ((major = register_chrdev(0, "cp_lkm", &cp_lkm_fops)) < 0) {
6483 DEBUG_INFO("%s() failed dynamic registration", __FUNCTION__);
6484 cp_lkm_pm_tty_cleanup();
6485 return major;
6486 }
6487
6488 cp_lkm_class = class_create(THIS_MODULE, "cp_lkm");
6489 if (IS_ERR(cp_lkm_class)) {
6490 DEBUG_INFO("%s() failed class create", __FUNCTION__);
6491 unregister_chrdev(major, "cp_lkm");
6492 cp_lkm_pm_tty_cleanup();
6493 return -ENODEV;
6494 }
6495#ifdef KERNEL_2_6_21
6496 cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), "cp_lkm_usb");
6497#else
6498 cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), NULL, "cp_lkm_usb");
6499#endif
6500 if (IS_ERR(cp_lkm_dev[0])){
6501 DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
6502 // clean up previous devices
6503 class_destroy(cp_lkm_class);
6504 unregister_chrdev(major, "cp_lkm");
6505 cp_lkm_pm_tty_cleanup();
6506 return -ENODEV;
6507 }
6508
6509#ifdef KERNEL_2_6_21
6510 cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), "cp_lkm_pm");
6511#else
6512 cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), NULL, "cp_lkm_pm");
6513#endif
6514 if (IS_ERR(cp_lkm_dev[1])){
6515 DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
6516 // clean up previous devices
6517 device_destroy(cp_lkm_class, MKDEV(major, 0));
6518 class_destroy(cp_lkm_class);
6519 unregister_chrdev(major, "cp_lkm");
6520 cp_lkm_pm_tty_cleanup();
6521 return -ENODEV;
6522 }
6523
6524 LOG("cp_lkm: Inserting kernel module");
6525
6526 return 0;
6527}
6528
6529static void __exit cp_lkm_end(void)
6530{
6531 int i;
6532
6533 //TODO remove
6534 //del_timer_sync (&dbg_memleak_timer);
6535
6536
6537 cp_lkm_pm_cleanup();
6538 cp_lkm_usb_cleanup();
6539
6540 for (i = 0; i < 2; i++) {
6541 device_destroy(cp_lkm_class, MKDEV(major, i));
6542 }
6543 class_destroy(cp_lkm_class);
6544 unregister_chrdev(major, "cp_lkm");
6545
6546 cp_lkm_pm_tty_cleanup();
6547
6548 LOG("cp_lkm: Removing kernel module");
6549}
6550
6551module_init(cp_lkm_start);
6552module_exit(cp_lkm_end);
6553MODULE_LICENSE("GPL");
6554
6555