blob: e5bb8133eea2a3eda81db712444400139bd60eb7 [file] [log] [blame]
Kyle Swenson74ad7532023-02-16 11:05:29 -07001/*
2 * FILE NAME cpmodem_shim.c
3 *
4 * BRIEF MODULE DESCRIPTION
5 * Frankendriver - USB to ethernet, ip or PPP controlled via a block driver.
6 *
7 * Author: CradlePoint Technology, Inc. <source@cradlepoint.com>
8 * Ben Kendall <benk@cradlepoint.com>
9 * Cory Atkin <catkin@cradlepoint.com>
10 *
11 * Copyright 2012, CradlePoint Technology, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to:
24 * Free Software Foundation
25 * 51 Franklin Street, Fifth Floor
26 * Boston, MA 02111-1301 USA
27 */
28
29
30// Necessary includes for device drivers
31#include <linux/module.h> // Needed by all modules
32#include <linux/kernel.h> // Needed for KERN_xxxx
33#include <linux/init.h> // Needed for the macros
34#include <linux/cdev.h>
35#include <linux/slab.h> // kmalloc()
36#include <linux/fs.h> // everything...
37#include <linux/poll.h>
38#include <linux/errno.h> // error codes
39#include <linux/types.h> // size_t
40#include <linux/proc_fs.h>
41#include <linux/fcntl.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/if_ether.h>
45#include <linux/if_arp.h>
46#include <linux/ethtool.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include <linux/inetdevice.h>
50#include <linux/ip.h>
51#include <net/addrconf.h>
52#include <linux/tty.h>
53#include <linux/tty_flip.h>
54#include <linux/spinlock.h>
55#include <linux/ktime.h>
56/* #include <asm/system.h> // cli(), *_flags */
57#include <asm/uaccess.h> // copy_from/to_user
58#include <linux/usb.h>
59#include <linux/version.h> // LINUX_VERSION_CODE
60#include <cpmodem_shim.h>
61#include <cpmodem_wrapper.h>
62
63
64//#define KERNEL_2_6_21 // comment this out for 3.0.29 kernel
65/*********************************************** logging and debug ************************************************/
66
67#define RUNTIME_DEBUG_TRACE (1 << 0)
68#define RUNTIME_DEBUG_INFO (1 << 1)
69#define RUNTIME_DEBUG_WARN (1 << 2)
70#define RUNTIME_DEBUG_ERROR (1 << 3)
71#define RUNTIME_LOG 0
72#define RUNTIME_ASSERT -1
73
74//#undef RUNTIME_DEBUG
75//#define RUNTIME_DEBUG ( /*RUNTIME_DEBUG_TRACE |*/ RUNTIME_DEBUG_INFO | RUNTIME_DEBUG_WARN | RUNTIME_DEBUG_ERROR )
76
77
78static int cp_lkm_log_level = 0;
79
80#ifdef RUNTIME_DEBUG
81static const char *cp_lkm_shim_runtime_debug_level_str[] = {
82 "ASSERT",
83 "TRACE",
84 "INFO",
85 "WARN",
86 "ERROR",
87};
88#else
89static const char *cp_lkm_shim_debug_log_level_str[] = {
90 "ASSERT",
91 "ERROR",
92 "WARN",
93 "INFO",
94 "TRACE",
95 "PRINTF"
96};
97#endif
98
99static int cp_out_get_level_index(int level)
100{
101 int level_index = 0;
102 while (level) {
103 level = level >> 1;
104 level_index++;
105 }
106 return level_index;
107}
108
109static void cp_out(int level, const char * file, int line, const char *fmt, ...)
110{
111 int file_str_len = 0;
112 char *file_pos = (char *)file;
113 char *fmt1;
114 va_list arg;
115 int level_index = 0;
116 const char *level_str = NULL;
117 const char *kernel_lvl_str = NULL;
118
119 if (level>0) { // level of 0 is LOG and -1 is ASSERT - always output
120 level_index = cp_out_get_level_index(level);
121
122#ifdef RUNTIME_DEBUG
123 if (!(RUNTIME_DEBUG & level)) {
124 return;
125 }
126 level_str = cp_lkm_shim_runtime_debug_level_str[level_index];
127#else
128 if (!(cp_lkm_log_level & level)) {
129 return;
130 }
131 level_str = cp_lkm_shim_debug_log_level_str[level_index];
132#endif
133 }
134
135
136 switch(level) {
137 case RUNTIME_DEBUG_TRACE:
138 kernel_lvl_str = KERN_INFO;
139 break;
140 case RUNTIME_DEBUG_INFO:
141 kernel_lvl_str = KERN_INFO;
142 break;
143 case RUNTIME_DEBUG_WARN:
144 kernel_lvl_str = KERN_WARNING;
145 break;
146 case RUNTIME_DEBUG_ERROR:
147 kernel_lvl_str = KERN_ERR;
148 break;
149 case RUNTIME_LOG:
150 kernel_lvl_str = KERN_INFO;
151 break;
152 case RUNTIME_ASSERT:
153 kernel_lvl_str = KERN_ERR;
154 break;
155 default:
156 kernel_lvl_str = KERN_INFO;
157 break;
158 }
159
160
161 va_start(arg, fmt);
162
163 if (file) {
164 char *pos = (char *)file;
165 while ((pos = strchr(pos, '/'))) {
166 pos++;
167 file_pos = pos;
168 }
169
170 file_str_len = strlen(file_pos);
171 }
172
173 fmt1 = kmalloc(strlen(fmt) + file_str_len + 12 + 6 + 2, GFP_ATOMIC); // +6 for debug type indication, +2 for linux syslog level
174 if (!fmt1) {
175 return;
176 }
177 if (level_str) {
178 if (file) {
179 sprintf(fmt1, "%s%6s %s(%4d):%s\n", kernel_lvl_str, level_str, file_pos, line, fmt);
180 } else {
181 sprintf(fmt1, "%s%6s %s\n", kernel_lvl_str, level_str, fmt);
182 }
183 } else {
184 if (file) {
185 sprintf(fmt1, "%s%s(%4d):%s\n", kernel_lvl_str, file_pos, line, fmt);
186 } else {
187 sprintf(fmt1, "%s%s\n", kernel_lvl_str, fmt);
188 }
189 }
190 vprintk(fmt1, arg);
191 kfree(fmt1);
192 va_end(arg);
193}
194
195#ifdef RUNTIME_DEBUG
196// assert is always defined if RUNTIME_DEBUG is defined
197// bad idea to kill things in kernel, so we just print the assert msg and keep going
198#define DEBUG_ASSERT(a, args...) \
199 if (!(a)) { \
200 printk(KERN_ERR "\n!!! CPMODEM_SHIM ASSERT !!!\n"); \
201 cp_out(RUNTIME_ASSERT, __FILE__, __LINE__, args); \
202 dump_stack(); \
203 }
204#define DEBUG_TRACE(args...) cp_out(RUNTIME_DEBUG_TRACE, __FILE__, __LINE__, args)
205#define DEBUG_INFO(args...) cp_out(RUNTIME_DEBUG_INFO, __FILE__, __LINE__, args)
206#define DEBUG_WARN(args...) cp_out(RUNTIME_DEBUG_WARN, __FILE__, __LINE__, args)
207#define DEBUG_ERROR(args...) cp_out(RUNTIME_DEBUG_ERROR, __FILE__, __LINE__, args)
208#else
209#define DEBUG_ASSERT(a, args...)
210#define DEBUG_TRACE(args...) cp_out(LOG_DEBUG_LEVEL_TRACE, __FILE__, __LINE__, args)
211
212#define DEBUG_INFO(args...) cp_out(LOG_DEBUG_LEVEL_INFO, __FILE__, __LINE__, args)
213
214#define DEBUG_WARN(args...) cp_out(LOG_DEBUG_LEVEL_WARN, __FILE__, __LINE__, args)
215
216#define DEBUG_ERROR(args...) cp_out(LOG_DEBUG_LEVEL_ERROR, __FILE__, __LINE__, args)
217
218#define DEBUG_PRINTF(args...) cp_out(LOG_DEBUG_LEVEL_PRINTF, __FILE__, __LINE__, args)
219
220#endif
221
222#define LOG(args...) cp_out(RUNTIME_LOG, NULL, 0, args)
223
224/*********************************************** general definitions and helper functions *************************/
225
226// Buffer to store data
227struct cp_lkm_read_msg {
228 struct cp_lkm_msg_hdr hdr;
229 struct sk_buff *skb;
230 struct list_head list;
231};
232
233struct cp_lkm_common_ctx {
234 u8 open_cnt;
235
236 // read operation members
237 wait_queue_head_t inq;
238 struct list_head read_list;
239 spinlock_t read_list_lock;
240 bool reading_data;
241 bool q_waiting;
242 // write operation members
243 struct sk_buff *write_skb;
244
245 int (*open)(struct cp_lkm_common_ctx *ctx); // called at open
246 int (*close)(struct cp_lkm_common_ctx *ctx); // called at close
247 int (*handle_msg)(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb); // called at write
248 int (*handle_ioctl)(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp); // called at ioctl
249};
250
251
252int cp_lkm_open(struct inode *inode, struct file *filp);
253int cp_lkm_release(struct inode *inode, struct file *filp);
254ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos);
255ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos);
256#ifdef KERNEL_2_6_21
257int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
258#else
259long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg);
260#endif
261unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *);
262
263static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common);
264static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common);
265static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb);
266
267/* Structure that declares the usual file
268 access functions */
269struct file_operations cp_lkm_fops = {
270 .owner = THIS_MODULE,
271 .read = cp_lkm_read,
272 .write = cp_lkm_write,
273#ifdef KERNEL_2_6_21
274 .ioctl = cp_lkm_ioctl,
275#else
276 .unlocked_ioctl = cp_lkm_ioctl,
277#endif
278 .open = cp_lkm_open,
279 .poll = cp_lkm_poll,
280 .release = cp_lkm_release
281};
282
283static int major;
284static struct device *cp_lkm_dev[2];
285static struct class *cp_lkm_class;
286
287#define CP_LKM_USB_MGR_MINOR 0
288#define CP_LKM_PM_MGR_MINOR 1
289#define CP_LKM_ITER 3000 //CP_LIM_ITER * CP_LKM_TIMEOUT_MS = 30000 or 30 seconds
290#define CP_LKM_TIMEOUT_MS 10
291
292typedef int (*cp_lkm_data_transfer_t)(void *ctx, struct sk_buff *skb);
293typedef void (*cp_lkm_data_hdr_size_t)(void *ctx, int wrapper_hdr_size, int *hdr_size, int* hdr_offset);
294typedef int (*cp_lkm_poll_t)(void *ctx, int budget);
295typedef void (*cp_lkm_schedule_t)(void *ctx);
296typedef void (*cp_lkm_complete_t)(void *ctx);
297typedef int (*cp_lkm_msg_t)(void *ctx);
298struct cp_lkm_edi {
299 //values provided by usb side, called by pm side
300 cp_lkm_data_transfer_t usb_send;
301 void *usb_send_ctx;
302
303 //value provided by pm side, called by usb side
304 cp_lkm_msg_t pm_send_pause; //called by usb to pause the network q
305 cp_lkm_msg_t pm_send_resume; //called by usb to resume the network q
306 cp_lkm_data_transfer_t pm_recv;
307 cp_lkm_data_hdr_size_t pm_get_hdr_size; //ask pm how much space it needs for headers
308 void *pm_recv_ctx;
309
310 void *pm_stats64_ctx;
311};
312
313static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link);
314
315struct cp_lkm_pm_stats64 {
316 u64 rx_packets;
317 u64 tx_packets;
318 u64 rx_bytes;
319 u64 tx_bytes;
320 u64 rx_errors;
321 u64 tx_errors;
322 u64 rx_dropped;
323 u64 tx_dropped;
324
325 u64 rx_over_errors;
326
327 struct u64_stats_sync syncp;
328};
329
330struct cp_lkm_pm_common {
331 int unique_id;
332 u32 attached;
333 cp_lkm_pm_type_t type;
334 struct net_device *net_dev;
335 struct cp_lkm_edi *edi;
336 struct list_head filter_list;
337 u32 filter_drop_cnt;
338
339 // keep these in pm context so dual sim hidden unplug/plug do not affect the stats
340 struct cp_lkm_pm_stats64 *pcpu_stats64;
341
342 int pm_link_count; //token used to prevent xmit and poll from being called if we are linking or unlinking, -1 = unlinking so block xmit and poll,
343 spinlock_t pm_link_lock; //lock to protect getting and releasing the pm_link_count token
344
345 struct list_head list;
346};
347
348//static void cp_lkm_pm_update_stats64(struct cp_lkm_pm_stats64 *stats, u64 *field, u64 incr);
349#define UPDATE_STATS(stats_ctx, field, incr) if (stats_ctx) { \
350 struct cp_lkm_pm_stats64 *stats = this_cpu_ptr(((struct cp_lkm_pm_common *)stats_ctx)->pcpu_stats64); \
351 if (stats) { \
352 u64_stats_update_begin(&stats->syncp); \
353 stats->field += incr; \
354 u64_stats_update_end(&stats->syncp); \
355 } \
356 }
357
358//Keep these commented out for release
359//static int dbg_memleak_timer_started = 0;
360//static struct timer_list dbg_memleak_timer;
361//static spinlock_t dbg_state_lock;
362//static int dbg_state_init = 0;
363//static int g_dbg_memalloc_cnt = 0;
364//static int g_stuck_cnt = 0;
365//static int g_stuck_chk = 0;
366//static int g_unlink_cnt = 0;
367
368typedef size_t ref_t;
369typedef void (*memref_final_method_t)(void *buf);
370struct memref {
371 memref_final_method_t mfree;
372 atomic_t refs;
373};
374
375
376void *memref_alloc(size_t size, memref_final_method_t mfree)
377{
378 struct memref *ptr;
379
380 ptr = (struct memref *)kmalloc(sizeof(struct memref) + size, GFP_ATOMIC);
381 if (!ptr) {
382 return NULL;
383 }
384 //g_dbg_memalloc_cnt++;
385 ptr->mfree = mfree;
386 atomic_set(&ptr->refs, 1);
387
388 return (ptr + 1);
389}
390
391void *memref_alloc_and_zero(size_t size, memref_final_method_t mfree)
392{
393 void *ptr;
394
395 ptr = memref_alloc(size, mfree);
396 if (!ptr) {
397 return NULL;
398 }
399
400 memset(ptr, 0x00, size);
401
402 return ptr;
403}
404
405static void *memref_ref(void *buf)
406{
407 struct memref *mb;
408
409 if (!buf) {
410 return NULL;
411 }
412
413 mb = (struct memref *)(buf) - 1;
414
415// if (0 == atomic_read(&mb->refs)) {
416// DEBUG_INFO("%s() !refs", __FUNCTION__);
417// return NULL;
418// }
419
420 atomic_inc(&mb->refs);
421
422 return buf;
423}
424
425#if 0
426static ref_t memref_cnt(void *buf)
427{
428 struct memref *mb;
429
430 if (!buf) {
431 return 0;
432 }
433
434 mb = (struct memref *)(buf) - 1;
435 return atomic_read(&mb->refs);
436}
437#endif
438
439static ref_t memref_deref(void *buf)
440{
441 struct memref *mb;
442
443 if (!buf) {
444 return 0;
445 }
446
447 mb = (struct memref *)(buf) - 1;
448
449// if (0 == atomic_read(&mb->refs)) {
450// DEBUG_INFO("%s() !refs", __FUNCTION__);
451// return NULL;
452// }
453
454 if (atomic_dec_and_test(&mb->refs)) {
455 //g_dbg_memalloc_cnt--;
456 if (mb->mfree) {
457 mb->mfree(buf);
458 }
459 kfree(mb);
460 return 0;
461 }
462
463 return atomic_read(&mb->refs);
464}
465
466/*
467 * Generic function to repeatedly call a function until it either succeeds or the delay and iters
468 * have been exhausted. Optionally it can throw a kernel panic on failure.
469 *
470 * ctxt - the ctxt to pass into do_fun
471 * do_fun - the function to call until it returns success
472 * delay_ms - the amount of time to delay between calls to do_fun on failure
473 * iter - the number of times to call do_fun
474 * die_str - if should panic on failure, then pass in the die_str to display
475 *
476 * if die_str provided, this function will not exit on failure.
477 * else it will exit with the result of the call to do_fun
478 * Note: total wait time is delay_ms * iter
479*/
480typedef bool (*do_function_t)(void* ctx1, void* ctx2);
481bool cp_lkm_do_or_die(void* ctx1, void*ctx2, do_function_t do_fun, u32 delay_ms, u32 iter, const char* die_str)
482{
483 bool done = false;
484 //set_current_state(TASK_UNINTERRUPTIBLE);
485 while (!done && iter) {
486 iter--;
487 done = do_fun(ctx1,ctx2);
488 if (!done) {
489 msleep(delay_ms);
490 //schedule_timeout(msecs_to_jiffies(delay_ms));
491 //set_current_state(TASK_UNINTERRUPTIBLE);
492 }
493 }
494 if(!done && die_str) {
495 panic(die_str);
496 //BUG_ON()
497 }
498 //set_current_state(TASK_RUNNING);
499 return done;
500}
501
502/******************************* kernel module USB/Wrapper functionality *********************************
503 *
504 * The shim has multiple entry points. It can be pumped by hw interrupts, software interrupts, or threads.
505 * The trick to getting the shim to work properly is knowing from which contexts the different functions can be called
506 * and what you can do in that context.
507 *
508 * The biggest concern is to make sure we aren't nulling out a function or instance pointer in one context while another
509 * context is using it. Pointers are changed when linking or unlinking to the protocol manager or when the device unplugs.
510 * For link/unlink or unplug, we need to make sure all other processing has been blocked or stopped. We use a combination of
511 * tokens and spinlocks to achieve this.
512 *
513 * Another complexity is dealing with multi-core processors such as we have in some routers. With multi-core you can have
514 * a hw interrupt, software interrupt or thread running on one core and a hw interrupt, soft interrupt, or thread running on
515 * another at the same time. In addition, the same soft interrupt code can run on both cores at the same time.
516 * With single core, the hw int would block the thread. The shim was orginally designed with a single-core system, so a lot of work
517 * has been put into verifying multi-core works.
518 *
519 * Single core: We can be pumped by:
520 * Hardware interrupt - all interrupts disabled, can't be preempted
521 * Software interrupt - hw interrupts not disabled, can be preempted by hw interrupt
522 * Thread or other process - can be preempted by hw or sw interrupt.
523 *
524 * Multi core: all bets are off. Everything can run at the same time so you have to be very careful with locks and tokens to not corrupt
525 * variables and to not run funtions reentrantly.
526 *
527 * Here are the specific contexts (threads, processes)that pump us:
528 * 1. USB on a hardware interrupt context. This happens on tx and rx done (all interrupts disabled, schedule callbacks and get out fast)
529 * 2. USB on the hub thread. This happens on unplug (can sleep or pause, but be careful because it stops all USB system hub processing)
530 * 3. Kernel workqueue thread (our own callback, can sleep or pause, but be careful, it stops all the kernel workqueue processing)
531 * 4. tasklet or timer soft interrupt context (our own callbacks on sw interrupt, hw interrupts enabled, can't sleep or do pause)
532 * 5. ioctl or device write on a kernel thread (this is cpusb in app space talking to us, runs on a thread, can be prempted in multi-core)
533 * 6. network (send from network side, runs as a software interrupt)
534 *
535 * Which functions are called in which contexts and what they do:
536 * #1 - cp_lkm_usb_xmit_complete - called by usb layer when transmit is done in hw interrupt context
537 * throw transfer in done q, on success, schedule tasklet or NAPI poll (#4) by calling
538 * cp_lkm_usb_done_and_defer_data() for data packets or cp_lkm_usb_done_and_defer_other() for non-data pkts.
539 * On error schedule kevent (#3) by calling cp_lkm_usb_defer_kevent()
540 * cp_lkm_usb_recv_complete - called by usb layer when recv is done in hw interrupt context
541 * throw transfer in done q, schedule tasklet or NAPI poll (#4), on error schedule kevent (#3)
542 *
543 * #2 - cp_lkm_usb_probe - called when the usb hub layer detects a plug, called on hub thread context
544 * cp_lkm_usb_disconnect - called when the usb hub layer detects an unplug, called on hub thread context
545 * schedule mgr_kevent to clean up
546 *
547 * #3 - cp_lkm_usb_kevent - scheduled by tx and rx complete (#1) on USB halt errors or out of memory failure. Is a workqueue thread
548 * clears the halts, sees if memory available. On success, schedules the tasklet or NAPI poll(#4)
549 *
550 * #4 - cp_lkm_usb_process_data_done_tasklet - Scheduled by rx or tx complete (#1). Runs in soft int context. This function is used when we
551 * are using a non-NAPI compliant protocol manager (i.e. PPP). It processes recv'd pkts and sends
552 * them onto the protocol manager, frees all sent skb's and restock more recv urbs to the USB layer.
553 * cp_lkm_usb_process_other_done_tasklet -Same as first one except is it scheduled anytime we recv a pkt that needs to go to the common
554 * modem stack instead of to the network stack (ctrl, status or diagnostics pkt)
555 *
556 * #5 - cp_lkm_usb_handle_ioctl - ioctl mux function called by the kernel when the app ioctl is called
557 * calls the appropriate mux function
558 * cp_lkm_usb_plug_intf - called by ioctl mux to register a device. Register a usb driver to catch
559 * the plug event from the usb stack
560 * cp_lkm_usb_open_intf - called by ioctl mux indicate the data channel is active. This causes us to
561 * mux all data packets to the network stack instead of up to cpusb in app space
562 * cp_lkm_usb_close_intf - called by ioctl mux to indicate the data connection has gone down.
563 * This causes us to mux all packets up to cpusb in app space instead of to network
564 *
565 * cp_lkm_usb_unplug_intf - called by ioctl mux. Releases the interface, deregisters the usb driver, cleans up memory
566 * cp_lkm_usb_handle_msg - called by the device driver write function. This is how cpusb sends us usb packets that
567 * we need to send to usb
568 * #6 - cp_lkm_usb_start_xmit - called by the network interface
569 * sends a transmit to the usb layer
570*/
571
572
573struct cp_lkm_usb_dev;
574struct cp_lkm_usb_base_dev;
575
576
577/* we record the state for each of our queued skbs */
578enum skb_state {
579 illegal = 0,
580 out_start, // start a data or other transmit
581 out_done, // data or other transmit done
582 in_data_start, // start a recv (either data or other)
583 in_data_done, // recv data done
584 in_data_cleanup,
585 in_other_start,
586 in_other_done, // recv other done
587 in_other_cleanup,
588 ctrl_start, // start a usb ctrl transfer
589 ctrl_done, // usb ctrl transfer finished
590 unlink_start // telling usb to give our urb back
591};
592
593#define EVENT_TX_HALT 0
594#define EVENT_RX_HALT 1
595#define EVENT_RX_MEMORY 2
596#define EVENT_STS_SPLIT 3
597#define EVENT_LINK_RESET 4
598
599//These are standard USB defines
600#define UE_BULK 0x02
601#define UE_INTERRUPT 0x03
602
603#define MAX_INTF_EPS 10
604
605#define CP_LKM_USB_RECV 0x01
606#define CP_LKM_USB_LISTEN 0x02
607
608struct cp_lkm_base_ep
609{
610 struct list_head list; // for inserting in the cpbdev list of base endpoints
611 struct list_head eps; // list of cloned endpoints based off this one
612 struct cp_lkm_usb_base_dev* cpbdev; // pointer back to the cpdev this endpoint belongs to
613 int ep_num; // endpoint number
614 unsigned long err_flags; // errors on the ep (halt, no mem)
615 int con_flags; //connection flags (recv, listen)
616 int q_cnt; //number of urbs down at the lower layer
617 int type; //ep type (interrupt, bulk etc)
618 int max_transfer_size;
619 int pipe;
620 int interval; // interval for interrupt end points
621};
622
623struct cp_lkm_ep
624{
625 struct list_head list_bep; // for being inserted into the bep's list of eps
626 struct list_head list_cpdev; // for being inserted into the cpdev's list of eps
627 struct cp_lkm_base_ep* bep; // pointer to this ep's base endpoint
628 struct cp_lkm_usb_dev* cpdev; // pointer back to the cpdev this endpoint belongs to
629 int con_flags; //connection flags (recv, listen)
630 int ep_num; // duplicated from base endpoint for convenience
631};
632
633/* This struct gets stored in skb->cb which is currently a 48 byte buffer
634 The size of this struct needs to not ever be bigger than 48
635*/
636struct skb_data {
637 //if pointers and ints are 64 bits (8 bytes) then this is 48 bytes currently and
638 //no other variables can be added
639 struct urb *urb;
640 struct cp_lkm_usb_base_dev *cpbdev;
641 struct cp_lkm_base_ep* bep;
642 enum skb_state state;
643 int status;
644 int unique_id; //id of cpdev that sent the tx pkt
645};
646
647#define MAX_USB_DRVR_NAME 10
648#define USB_DRVR_FRMT_STR "cpusb%d"
649
650struct cp_lkm_usb_base_dev
651{
652 struct list_head list; //for inserting in global dev list
653 struct list_head cpdev_list; //list of cpdevs cloned from this base dev
654 struct list_head in_bep_list; // list of base in endpoints
655 struct list_head out_bep_list; // list of base out endpoints
656 int data_in_bep_num; //data in ep number
657 int data_out_bep_num; //data out ep number
658
659 struct usb_driver* usb_driver;
660 struct usb_device_id* usb_id_table;
661 int vid;
662 int pid;
663 int intf_num;
664 int alt_intf_num;
665 int usb_bus;
666 int usb_addr;
667 int feature_flags;
668 int base_id; //unique id of the first clone to plug
669 cp_lkm_usb_state_t base_state;
670
671 struct sk_buff_head in_q; //recv skb's are stored here while down at usb waiting to be filled with recv data
672 struct sk_buff_head out_q; //send skb's are stored here while down at usb waiting to be transmitted
673 struct sk_buff_head ctrlq; //ctrl skb's are stored here while down at usb waiting to be filled or transmitted
674 struct sk_buff_head data_tx_done; //tx skb's are stored here while waiting to be freed
675 struct sk_buff_head data_rx_done; //recv and ctrl skb's are stored here while waiting to have recv data processed
676 struct sk_buff_head other_done; //sent skb's are stored here while waiting to be freed
677
678 u32 data_q_len; // holds count of data pkts (both rx and tx) needing to be processed
679 spinlock_t data_q_lock; // lock to keep data_q_len sync'd
680 spinlock_t processing_state_lock;
681 cp_lkm_usb_process_state_t processing_state;
682 spinlock_t other_state_lock;
683 cp_lkm_usb_process_state_t other_state;
684 bool scheduled; //tasklet scheduled to process the pending
685
686 struct tasklet_struct other_process_tasklet;
687 struct tasklet_struct data_process_tasklet;
688
689 int rx_schedule_threshold;
690 int tx_schedule_threshold;
691 int tx_resume_threshold;
692
693 struct work_struct kevent;
694 char usb_drvr_name[MAX_USB_DRVR_NAME];
695 void* wrapper_ctxt;
696 int wrapper_hdr_size;
697 int pm_hdr_size;
698 int pm_hdr_offset;
699
700 struct usb_interface* intf;
701 struct usb_device *udev;
702
703 int plug_result;
704 bool disconnect_wait;
705
706 struct timer_list rx_delay;
707
708 int tx_usb_q_count;
709 bool tx_paused;
710
711 struct timer_list usb_pause_stuck_timer;
712 int tx_proc_cnt; //how many data tx pkts have we successfully sent
713 int tx_proc_cnt_at_pause; //how many data tx pkts we had sent when we paused
714
715 #if 0
716 //debug stuff, comment out
717 //unsigned int dbg_total_stuck_cnt;
718 //unsigned int dbg_total_tx_at_stuck_cnt;
719 //unsigned int dbg_total_tx_proc;
720 #endif
721};
722
723struct cp_lkm_usb_dev
724{
725 //init at open
726 struct cp_lkm_usb_base_dev* cpbdev;
727 int unique_id;
728 int pm_id;
729 int clone_num;
730 int mux_id;
731
732 cp_lkm_usb_state_t state;
733 struct list_head list; //for inserting in base dev list
734
735 struct cp_lkm_edi* edi;
736
737 struct list_head in_ep_list; //list of in endpoints on the dev
738 struct list_head out_ep_list; //list of out endpoints on the dev
739 int data_in_ep_num; //data in ep number
740 int data_out_ep_num; //data out ep number
741
742 //for debug
743 #if 0
744 struct timer_list dbg_timer;
745 unsigned int dbg_total_rx_irq;
746 unsigned int dbg_total_tx_irq;
747 unsigned int dbg_total_rx_proc;
748 unsigned int dbg_total_d_done;
749 unsigned int dbg_total_o_done;
750 unsigned int dbg_total_pause;
751 unsigned int dbg_total_resume;
752 unsigned int dbg_total_max_work;
753 unsigned int dbg_total_timeout;
754 unsigned int dbg_total_budget;
755 unsigned int dbg_total_o_tasklet;
756 unsigned int dbg_total_d_resched;
757 unsigned int dbg_total_wq_sched;
758 unsigned int dbg_total_napi_sched;
759 unsigned int dbg_total_tasklet_sched;
760 unsigned int dbg_total_d_comp;
761 //unsigned int dbg_total_ic;
762 //unsigned int dbg_total_tc;
763 unsigned int dbg_total_rx_qlen;
764 unsigned int dbg_total_tx_qlen;
765 unsigned int dbg_total_num_hybrid_t;
766 unsigned int dbg_total_num_normal_t;
767 unsigned int dbg_total_num_hybrid;
768 unsigned int dbg_total_num_normal;
769 unsigned int dbg_total_num_d_timers;
770 unsigned int dbg_total_sch_sk;
771 #endif
772};
773
774struct cp_lkm_usb_ctx
775{
776 struct cp_lkm_common_ctx common;
777 struct list_head dev_list;
778 spinlock_t lock; //used to protect access to dev_list from different instances. Also used to coordinate thread accesses from usb and cpmodem layers.
779 //when one thread grabs the lock, no other threads can run (soft and hw IRQs can still run). The usb hub unplug handler runs on a thread.
780 //this means if one thread grabs the lock it can be guaranteed the modem can unplug while it is doing its thing.
781};
782
783//static void cp_lkm_usb_dbg_memleak_timer (unsigned long param);
784//static void cp_lkm_usb_dbg_timer (unsigned long param);
785
786enum {
787 CP_LKM_STUCK_INIT = 0,
788 CP_LKM_STUCK_START,
789 CP_LKM_STUCK_STOP,
790 CP_LKM_STUCK_DEINIT
791};
792static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action);
Harish Ambati2e2e7b32023-02-22 14:21:36 +0000793static void cp_lkm_usb_pause_stuck_timer(struct timer_list *timer);
Kyle Swenson74ad7532023-02-16 11:05:29 -0700794
Harish Ambati2e2e7b32023-02-22 14:21:36 +0000795static void cp_lkm_usb_delay_timer (struct timer_list *timer);
Kyle Swenson74ad7532023-02-16 11:05:29 -0700796static void cp_lkm_usb_kevent (struct work_struct *work);
797static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx);
798static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx);
799static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
800static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
801
802static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb);
803static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb, int src, struct cp_lkm_ep* ep);
804static void cp_lkm_usb_xmit_complete (struct urb *urb);
805static int cp_lkm_usb_submit_recv (struct cp_lkm_usb_base_dev* cpbdev, struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data);
806static void cp_lkm_usb_recv_complete (struct urb *urb);
807
808static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
809static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb);
810static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
811
812static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci);
813static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep);
814
815static void cp_lkm_usb_process_other_done_tasklet (unsigned long param);
816static void cp_lkm_usb_process_data_done_tasklet (unsigned long param);
817static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpdev);
818static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev);
819static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work);
820static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_resume, bool have_lock);
821
822static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep);
823static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in);
824static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev);
825
826static struct cp_lkm_usb_ctx cp_lkm_usb_mgr;
827
828// Knobs we can tweak on a processor by processor basis to maximize performance
829// Dummy values filled in here so we don't get warning on using unitialized variables
830static int CP_LKM_PM_NAPI_WEIGHT = 0; //budget we register with NAPI (max number of pkts it thinks we will process).
831static int CP_LKM_USB_NAPI_MAX_WORK = 0; //actual number of pkts we will process (we're not entirely honest with NAPI)
832static int CP_LKM_USB_MAX_RX_QLEN = 0; //max number of rx data URBs we allow to flow in the shim (we alloc these)
833static int CP_LKM_USB_MAX_OTHER_QLEN = 0; //max number of rx urbs on non-data endpoints
834static int CP_LKM_USB_TX_PAUSE_Q_PKTS = 0; //max number of tx data URBs we allow to flow in the shim (alloc'd by network stack, we control this by pausing)
835static int CP_LKM_USB_TX_RESUME_Q_PKTS = 0; //un-pause network at this number
836//static int CP_LKM_USB_TX_RESUME_Q_PKTS_HYBRID = 0; //un-pause network at this number when in hybrid mode with pkt counting
837static int CP_LKM_USB_TX_SCHED_CNT = 0; //How many done tx's we allow to accumulate before scheduling cleanup in normal mode
838//static int CP_LKM_USB_TX_SCHED_CNT_HYBRID = 0; //How many done tx's we allow to accumulate before scheduling cleanup in hybrid mode with pkt counting
839static int CP_LKM_USB_RX_SCHED_CNT = 0; //How many done rx's we allow to accumulate before scheduling processing in normal mode
840//static int CP_LKM_USB_RX_SCHED_CNT_HYBRID = 0; //How many done rx's we allow to accumulate before scheduling processing in hybrid mode with pkt counting
841static int CP_LKM_USB_RESTOCK_MULTIPLE = 0; //How many rx URBs we should restock as we process them (0 means don't restock as we go, 1 means every one, 2 means 1 out of every 2 etc)
842//static int CP_LKM_USB_DATA_MAX_PPS = 0; //Packets per second that will cause us to transition from normal to hybrid mode when using pkt counting
843//static int CP_LKM_USB_DATA_MIN_PPS = 0; //packets per second that will cause us to transition from hybrid back to normal when using pkt counting
844static int CP_LKM_USB_TASKLET_CNT = 0; //in hybrid mode, schedule tasklet on cnts 0 to this number
845static int CP_LKM_USB_WORKQUEUE_CNT = 0; //in hybrid mode, schedule workqueue on cnts CP_LKM_USB_TASKLET_CNT to this number, then start cnt over
846static int CP_LKM_USB_PROCESS_DIVISOR = 0; //times to loop through the process loop, doing pkts/divisor pkts each time. Set to 1 to only process what was there when entering
847//broadcom EHCI controller has issues we need to work around
848static int cp_lkm_is_broadcom = 0;
849
850#define CP_LKM_USB_PAUSED_CNT 5000
851
852//TODO remove
853#if 0
854static int g_dbg_data_skballoc_cnt = 0;
855static int g_dbg_other_skballoc_cnt = 0;
856static int g_dbg_ctrl_skballoc_cnt = 0;
857static int g_dbg_xmit_skballoc_cnt = 0;
858static int g_dbg_urballoc_cnt = 0;
859static int g_dbg_unplug_cnt = 0;
860static void cp_lkm_usb_urb_cnt(int inc)
861{
862 unsigned long flags;
863 spin_lock_irqsave(&dbg_state_lock, flags);
864 g_dbg_urballoc_cnt += inc;
865 spin_unlock_irqrestore(&dbg_state_lock, flags); //release lock so interrupts can resume firing
866}
867static void cp_lkm_usb_cnts(int state, int inc)
868{
869 #if 1
870 unsigned long flags;
871 spin_lock_irqsave(&dbg_state_lock, flags);
872
873 switch (state) {
874 case in_other_start:
875 case in_other_done:
876 case in_other_cleanup:
877 g_dbg_other_skballoc_cnt+=inc;
878 break;
879 case ctrl_start:
880 case ctrl_done:
881 g_dbg_ctrl_skballoc_cnt+=inc;
882 break;
883 case out_start:
884 case out_done:
885 g_dbg_xmit_skballoc_cnt+=inc;
886 break;
887 case in_data_start:
888 case in_data_done:
889 case in_data_cleanup:
890 g_dbg_data_skballoc_cnt+=inc;
891 break;
892 case unlink_start:
893 g_dbg_unplug_cnt+=inc;
894 break;
895 default:
896 printk("!!clean: unknown skb state: %d\n",state);
897 break;
898 }
899 spin_unlock_irqrestore(&dbg_state_lock, flags);
900 #endif
901}
902#endif
903
904static struct cp_lkm_usb_dev* cp_lkm_usb_find_muxed_dev(struct cp_lkm_usb_base_dev* cpbdev, int mux_id)
905{
906 struct list_head *pos;
907 list_for_each(pos, &cpbdev->cpdev_list){
908 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
909 //printk("%s() cpdev: %p, cpdev->mux_id: %d\n", __FUNCTION__, cpdev, cpdev->mux_id);
910 if(cpdev->mux_id == mux_id) {
911 return cpdev;
912 }
913 }
914 return NULL;
915}
916
917static struct cp_lkm_usb_dev* cp_lkm_usb_find_dev(int uniqueid)
918{
919 struct list_head *bpos;
920 struct list_head *pos;
921 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
922 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
923 list_for_each(pos, &cpbdev->cpdev_list){
924 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
925 if(cpdev->unique_id == uniqueid) {
926 return cpdev;
927 }
928 }
929 }
930 return NULL;
931}
932
933#define CP_LKM_DEV_MATCH_ALL 1
934#define CP_LKM_DEV_MATCH_BUS_ADDR_ONLY 2
935
936// Find base device from its bus, addr and unique id
937static struct cp_lkm_usb_base_dev* cp_lkm_usb_find_base_dev(int bus, int addr, int unique_id, int match)
938{
939 struct list_head *pos;
940 struct list_head *bpos;
941 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
942 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
943 if(cpbdev->usb_bus == bus && cpbdev->usb_addr == addr) {
944 if (match == CP_LKM_DEV_MATCH_BUS_ADDR_ONLY) {
945 return cpbdev;
946 }
947 if (cpbdev->base_id == unique_id) {
948 //matches the base_id so don't need to look further
949 return cpbdev;
950 }
951 //look to see if matches the unique_id of one of the cpdevs (only hit this case when running clones)
952 list_for_each(pos, &cpbdev->cpdev_list){
953 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
954 if (cpdev->unique_id == unique_id) {
955 return cpbdev;
956 }
957 }
958 }
959 }
960 return NULL;
961}
962
963/*
964static struct cp_lkm_usb_dev* cp_lkm_usb_get_head_dev(void)
965{
966 struct list_head *bpos;
967 struct list_head *pos;
968 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
969 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
970 list_for_each(pos, &cpbdev->cpdev_list){
971 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
972 return cpdev;
973 }
974 }
975 return NULL;
976}
977*/
978
979// pause or unpause all cpdevs associated with this cpbdev
980static void cp_lkm_usb_dev_pause(struct cp_lkm_usb_base_dev* cpbdev, bool pause)
981{
982 struct list_head *pos;
983
984 list_for_each(pos, &cpbdev->cpdev_list){
985 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
986 if (pause) {
987 if(cpdev->edi->pm_send_pause) {
988 cpdev->edi->pm_send_pause(cpdev->edi->pm_recv_ctx);
989 //cpdev->dbg_total_pause++;
990 }
991 }
992 else{
993 if (cpdev->edi->pm_send_resume) {
994 //cpdev->dbg_total_resume++;
995 cpdev->edi->pm_send_resume(cpdev->edi->pm_recv_ctx);
996 }
997 }
998 }
999 cpbdev->tx_paused = pause;
1000}
1001
1002static void cp_lkm_usb_clean_list(struct sk_buff_head* list)
1003{
1004 struct sk_buff *skb;
1005 struct skb_data *entry;
1006
1007 while((skb = skb_dequeue(list)) != NULL){
1008 DEBUG_TRACE("%s() found a straggler", __FUNCTION__);
1009 entry = (struct skb_data *) skb->cb;
1010 if(entry->urb) {
1011 //cp_lkm_usb_urb_cnt(-1);
1012 usb_free_urb (entry->urb);
1013 }
1014 //cp_lkm_usb_cnts(entry->state, -1);
1015 dev_kfree_skb_any(skb);
1016 }
1017}
1018
1019static void cp_lkm_usb_mark_as_dead(struct cp_lkm_usb_dev* cpdev)
1020{
1021 cpdev->edi->usb_send_ctx = NULL;
1022 if(cpdev->state != CP_LKM_USB_DEAD) {
1023 LOG("Device with id:%d unplugged", cpdev->unique_id);
1024 }
1025 cpdev->state = CP_LKM_USB_DEAD;
1026}
1027
1028static void cp_lkm_usb_mark_base_as_dead(struct cp_lkm_usb_base_dev* cpbdev)
1029{
1030 cpbdev->base_state = CP_LKM_USB_DEAD;
1031}
1032
1033static struct cp_lkm_base_ep* cp_lkm_usb_get_bep(struct cp_lkm_usb_base_dev* cpbdev, int ep_num)
1034{
1035 struct cp_lkm_base_ep* bep = NULL;
1036 struct list_head *entry, *nxt, *head;
1037
1038 if(USB_DIR_IN & ep_num) {
1039 //printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1040 head = &cpbdev->in_bep_list;
1041 }
1042 else{
1043 //printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1044 head = &cpbdev->out_bep_list;
1045 }
1046
1047 list_for_each_safe(entry, nxt, head) {
1048 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1049 if (bep->ep_num == ep_num) {
1050 //printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
1051 return bep;
1052 }
1053 }
1054 //printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
1055
1056 return NULL;
1057}
1058
1059static struct cp_lkm_ep* cp_lkm_usb_get_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
1060{
1061 struct cp_lkm_ep* ep = NULL;
1062 struct list_head *entry, *nxt, *head;
1063
1064 if(USB_DIR_IN & ep_num) {
1065 //printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1066 head = &cpdev->in_ep_list;
1067 }
1068 else{
1069 //printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1070 head = &cpdev->out_ep_list;
1071 }
1072
1073 list_for_each_safe(entry, nxt, head) {
1074 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1075 if (ep->ep_num == ep_num) {
1076 //printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
1077 return ep;
1078 }
1079 }
1080 //printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
1081
1082 return NULL;
1083}
1084
1085static void cp_lkm_usb_bep_finalize(void *arg)
1086{
1087 struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)arg;
1088 struct list_head *entry, *nxt;
1089 struct cp_lkm_ep *ep;
1090
1091 //printk("%s() start\n", __FUNCTION__);
1092 //todo remove
1093 //del_timer_sync(&cpdev->dbg_timer);
1094
1095 //printk("%s() - free eps\n",__FUNCTION__);
1096 list_for_each_safe(entry, nxt, &bep->eps) {
1097 ep = list_entry(entry, struct cp_lkm_ep, list_bep);
1098 //printk("%s() - free ep: %p from bep: %p\n",__FUNCTION__,ep,bep);
1099 list_del(&ep->list_bep);
1100 memref_deref(ep);
1101 }
1102
1103}
1104
1105static void cp_lkm_usb_ep_finalize(void *arg)
1106{
1107 //struct cp_lkm_ep* ep = (struct cp_lkm_ep*)arg;
1108 //printk("%s() - free ep: %p, ep_num: 0x%x\n",__FUNCTION__,arg ,ep->ep_num);
1109}
1110
1111static struct cp_lkm_ep* cp_lkm_usb_create_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
1112{
1113 struct cp_lkm_ep* ep;
1114 struct cp_lkm_base_ep* bep;
1115 struct cp_lkm_usb_base_dev* cpbdev;
1116
1117 DEBUG_ASSERT(cpdev, "cpdev is null");
1118 cpbdev = cpdev->cpbdev;
1119 DEBUG_ASSERT(cpbdev, "cpbdev is null");
1120
1121 //see if already exists first
1122 ep = cp_lkm_usb_get_ep(cpdev, ep_num);
1123 if(ep) {
1124 DEBUG_TRACE("%s() ep: %p already exists", __FUNCTION__, ep);
1125 //printk("%s() ep: 0x%x already exists\n", __FUNCTION__, ep_num);
1126 return ep;
1127 }
1128 //printk("%s() - create new ep, cpdev: %p, ep_num: 0x%x\n",__FUNCTION__,cpdev, ep_num);
1129
1130 //Need to create new ep and possibly a new bep. We will alloc and init everything first and
1131 //then if that all works, we will put everything in its proper place (in lists and stuff)
1132 ep = memref_alloc_and_zero(sizeof(struct cp_lkm_ep), cp_lkm_usb_ep_finalize);
1133 if(!ep) {
1134 DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
1135 return NULL;
1136 }
1137 INIT_LIST_HEAD(&ep->list_bep);
1138 INIT_LIST_HEAD(&ep->list_cpdev);
1139 ep->ep_num = ep_num;
1140
1141 //may need to create a new base ep if this is the first time we've seen this endpoint number and direction
1142 //this is always the case for non-cloned interfaces
1143 bep = cp_lkm_usb_get_bep(cpbdev, ep_num);
1144 if (!bep) {
1145 bep = memref_alloc_and_zero(sizeof(struct cp_lkm_base_ep), cp_lkm_usb_bep_finalize);
1146 if(!bep) {
1147 DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
1148 memref_deref(ep);
1149 return NULL;
1150 }
1151 //printk("%s() - create new bep: %p, cpbdev: %p, ep_num: 0x%x\n",__FUNCTION__,bep, cpbdev, ep_num);
1152 bep->ep_num = ep_num;
1153 bep->cpbdev = cpbdev;
1154 INIT_LIST_HEAD(&bep->list);
1155 INIT_LIST_HEAD(&bep->eps);
1156 if(USB_DIR_IN & ep_num) {
1157 list_add_tail(&bep->list, &cpbdev->in_bep_list);
1158 }
1159 else{
1160 list_add_tail(&bep->list, &cpbdev->out_bep_list);
1161 }
1162 }
1163
1164 //if we get here, everything alloc'd ok, so can insert in lists and stuf
1165
1166 // Each ep will have two memrefs, one from the alloc which is for entry in the cpdev list
1167 // and another for entry into the bep list. This way the ep won't be freed until it is removed
1168 // from both lists at unplug time
1169 ep->cpdev = cpdev;
1170 ep->bep = bep;
1171 if(USB_DIR_IN & ep_num) {
1172 list_add_tail(&ep->list_cpdev, &cpdev->in_ep_list);
1173 }
1174 else{
1175 list_add_tail(&ep->list_cpdev, &cpdev->out_ep_list);
1176 }
1177 memref_ref(ep);
1178 list_add_tail(&ep->list_bep, &bep->eps);
1179 return ep;
1180
1181}
1182
1183// cp_lkm_usb_plug_intf is called by cpusb via the ioctl. It registers a driver for the interface.
1184// This function is then called by the lower usb layer so we can claim that interface.
1185int cp_lkm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1186{
1187 struct cp_lkm_usb_base_dev* cpbdev;
1188 struct usb_device* udev;
1189 struct usb_host_interface* interface;
1190 int unique_id;
1191 //unsigned long flags;
1192 int rc;
1193 uintptr_t tmp_uid;
1194
1195 usb_get_intf(intf);
1196
1197 //printk("%s()\n",__FUNCTION__);
1198
1199 udev = interface_to_usbdev (intf);
1200 interface = intf->cur_altsetting;
1201
1202 unique_id = (int)id->driver_info;
1203 tmp_uid = unique_id;
1204 spin_lock(&cp_lkm_usb_mgr.lock);
1205
1206 // Error scenario to watch for here:
1207 // 1. Device unplugs and replugs before the upper app detects the unplug and calls our unplug_intf. In
1208 // this case this driver is still registered and will get the new probe (we don't want this, we want the app driver
1209 // to get the plug and claim the device orginally). When disconnect happens we set the state to DEAD. If we get
1210 // a probe on a dead device, don't take it.
1211 cpbdev = cp_lkm_usb_find_base_dev(udev->bus->busnum, udev->devnum, unique_id, CP_LKM_DEV_MATCH_ALL);
1212 if(!cpbdev || cpbdev->base_state == CP_LKM_USB_DEAD) {
1213 spin_unlock(&cp_lkm_usb_mgr.lock);
1214
1215 DEBUG_TRACE("%s() no cpdev or already dead", __FUNCTION__);
1216 return -ENXIO;
1217 }
1218
1219 //make sure it is for our device (match the usb addresses)
1220 //printk("%s() id: %d ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!\n", __FUNCTION__, unique_id,
1221 // cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
1222 if(cpbdev->usb_bus != udev->bus->busnum || cpbdev->usb_addr != udev->devnum || cpbdev->intf_num != interface->desc.bInterfaceNumber) {
1223 spin_unlock(&cp_lkm_usb_mgr.lock);
1224
1225 DEBUG_TRACE("%s() reject ourbus: %d, probebus: %d, ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!", __FUNCTION__,
1226 cpbdev->usb_bus, udev->bus->busnum, cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
1227 return -ENXIO;
1228 }
1229 cpbdev->intf = intf;
1230 cpbdev->udev = udev;
1231
1232 spin_unlock(&cp_lkm_usb_mgr.lock);
1233
1234 if(cpbdev->alt_intf_num) {
1235 rc = usb_set_interface(udev, cpbdev->intf_num, cpbdev->alt_intf_num);
1236 if(rc) {
1237 DEBUG_ERROR("%s() set intf failed :%d", __FUNCTION__,rc);
1238 cpbdev->plug_result = -1; //only set this on failure, not reject
1239 return -1;
1240 }
1241 }
1242
1243 spin_lock(&cp_lkm_usb_mgr.lock);
1244 cpbdev->base_state = CP_LKM_USB_CTRL;
1245
1246 usb_set_intfdata(intf, (void*)tmp_uid);
1247 usb_get_dev (udev);
1248 memref_ref(cpbdev);
1249 spin_unlock(&cp_lkm_usb_mgr.lock);
1250
1251 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_INIT);
1252
1253 //throughput control stuff
1254 cpbdev->rx_schedule_threshold = CP_LKM_USB_RX_SCHED_CNT;
1255 cpbdev->tx_schedule_threshold = CP_LKM_USB_TX_SCHED_CNT;
1256 cpbdev->tx_resume_threshold = CP_LKM_USB_TX_RESUME_Q_PKTS;
1257
1258
1259 //todo remove
1260 //if (!dbg_memleak_timer_started) {
1261 // dbg_memleak_timer_started = 1;
1262 // dbg_memleak_timer.function = cp_lkm_usb_dbg_memleak_timer;
1263 // dbg_memleak_timer.data = 0;
1264
1265 // init_timer(&dbg_memleak_timer);
1266 // mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(20000));
1267 //}
1268 //if (dbg_state_init == 0) {
1269 // spin_lock_init(&dbg_state_lock);
1270 // dbg_state_init = 1;
1271 //}
1272
1273
1274
1275 DEBUG_TRACE("%s() probe done", __FUNCTION__);
1276 return 0;
1277}
1278
1279static bool cp_lkm_usb_shuter_down_do_pm_unlink(void* ctx1, void* ctx2)
1280{
1281 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1282 struct cp_lkm_usb_dev* cpdev;
1283 struct list_head *pos;
1284 unsigned long flags;
1285 //Unlink from the pm and disable the data state machine
1286 bool done = false;
1287 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
1288 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
1289 cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //data soft interrupt handlers now won't run
1290
1291 spin_lock(&cpbdev->data_q_lock);
1292 cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT;
1293 spin_unlock(&cpbdev->data_q_lock); //usb hw interrupts now won't schedule soft interrupt handlers
1294
1295 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags); //release lock so interrupts can resume firing
1296 //unlink the pm side for all cpdevs associated with this cpbdev. Once this returns we are guaranteed not to get any new xmit skb's from the pm
1297 list_for_each(pos, &cpbdev->cpdev_list){
1298 cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
1299 LOG("Unlink cpdev: %p from pm", cpdev);
1300 cp_lkm_pm_usb_link(cpdev->edi, cpdev->pm_id, 0);
1301 cpdev->edi->usb_send_ctx = NULL;
1302 }
1303
1304 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
1305 done = true;
1306 }
1307 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
1308 return done;
1309}
1310
1311static bool cp_lkm_usb_shuter_down_do_other_tasklet(void* ctx1, void* ctx2)
1312{
1313 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1314 unsigned long flags;
1315 bool done = false;
1316 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
1317 if(cpbdev->other_state == USB_PROCESS_STATE_IDLE){
1318 cpbdev->other_state = USB_PROCESS_STATE_PAUSED;
1319 done = true;
1320 }
1321 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
1322 return done;
1323}
1324
1325static bool cp_lkm_usb_shuter_down_do_empty_queues(void* ctx1, void* ctx2)
1326{
1327 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1328 bool done = false;
1329
1330 if (skb_queue_empty(&cpbdev->in_q) &&
1331 skb_queue_empty(&cpbdev->out_q) &&
1332 skb_queue_empty(&cpbdev->ctrlq)){
1333 done = true;
1334 }
1335 return done;
1336}
1337
1338static void cp_lkm_usb_shuter_down(struct cp_lkm_usb_base_dev* cpbdev)
1339{
1340 struct list_head *entry, *nxt;
1341 struct cp_lkm_base_ep *bep;
1342
1343
1344 //printk("%s() done\n", __FUNCTION__);
1345
1346 //Unlink from the pm and disable the data state machine
1347 LOG("Unlink cpdev from pm");
1348 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_pm_unlink, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to unlink pm from cpdev");
1349
1350 //disable the 'other' tasklet
1351 LOG("Disable cpdev other tasklet");
1352 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_other_tasklet, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to shutdown cpdev other tasklet");
1353
1354 //Once we get here no xmits can happen or any recv or xmit done processing can happen so no new kevents can be scheduled
1355 //so we can stop them here
1356 //clear all the flags before flushing the kevents so that we won't try to do anything during the kevent callback
1357 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
1358 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1359 bep->err_flags = 0;
1360 bep->con_flags = 0;
1361 }
1362 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
1363 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1364 bep->err_flags = 0;
1365 bep->con_flags = 0;
1366 }
1367
1368 //This forces the kernel to run all scheduled kevents, so any of our pending ones will run. (Note: Make sure
1369 //our kevent handlers check to see if we are attached before doing anything so that we don't schedule anything new while
1370 //shutting down)
1371 LOG("Cancel cpdev kevents");
1372 cancel_work_sync(&cpbdev->kevent);
1373
1374 //Make sure all the urbs have been cancelled
1375 // ensure there are no more active urbs
1376 //set_current_state(TASK_UNINTERRUPTIBLE);
1377 //these cause the urbs to be cancelled and the callbacks to be called. The urbs are removed from
1378 //the queues in the callbacks.
1379 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, NULL);
1380 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, NULL);
1381 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->ctrlq, NULL);
1382
1383 LOG("Wait for all cpdev urbs to be returned");
1384 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_empty_queues, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to empty cpdev queues");
1385
1386 //shutdown timer and tasklets
1387 LOG("Shutdown cpdev timers and tasklets");
1388 del_timer_sync (&cpbdev->rx_delay);
1389 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_DEINIT);
1390
1391 tasklet_kill(&cpbdev->data_process_tasklet);
1392 tasklet_kill(&cpbdev->other_process_tasklet);
1393
1394 // All outstanding transfers are back, so now we can clean up.
1395 cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
1396 cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
1397 cp_lkm_usb_clean_list(&cpbdev->other_done);
1398
1399 //printk("%s() done\n", __FUNCTION__);
1400 usb_set_intfdata(cpbdev->intf, NULL);
1401 usb_put_intf(cpbdev->intf);
1402 cpbdev->intf = NULL;
1403 LOG("cpdev unplug done");
1404
1405 return;
1406
1407}
1408
1409// Called when the USB hub detects that our device just unplugged.
1410// Called in a thread context. We do the lower usb cleanup here because there
1411// are some things that have to be done before exiting from disconnect.
1412// We don't clean up the upper layer stuff because the upper layer doesn't yet know
1413// we are unplugged and will continue to send us data. When the upper layer gets the
1414// unplug notify, it will call cp_lkm_usb_unplug_intf. We finish cleaning up in there.
1415void cp_lkm_usb_disconnect(struct usb_interface *intf)
1416{
1417 struct cp_lkm_usb_dev* cpdev;
1418 struct cp_lkm_usb_base_dev* cpbdev;
1419 //unsigned long flags;
1420 int unique_id;
1421
1422 // We don't want this function to run at the same time as any of the calls from the modem common stack (ioctl and write)
1423 // They all grab this lock for the duration of their calls. They also check the state of the device before proceeding.
1424 // Once we have the lock, we know none of them are running. Any new calls will block waiting on the lock.
1425 // If we then change the state to dead we can release the lock while we do the rest of cleanup. When they get the lock
1426 // they will see the state is dead and error out and return immediately. This prevents us from blocking the common modem thread.
1427 spin_lock(&cp_lkm_usb_mgr.lock);
1428
1429 //If cpdev is not in intf, then this is the close->disconnect path, so do nothing
1430 unique_id = (uintptr_t)usb_get_intfdata(intf);
1431
1432 //struct usb_device *udev;
1433 //printk("%s() start, id: %d\n", __FUNCTION__, unique_id);
1434
1435 //see if device already went away, this should be impossible
1436 //the unique id is always for the first instance if running clones
1437 cpdev = cp_lkm_usb_find_dev(unique_id);
1438 if(!cpdev) {
1439 //printk("%s() no cpdev, id: %d\n", __FUNCTION__, unique_id);
1440 spin_unlock(&cp_lkm_usb_mgr.lock);
1441 return;
1442 }
1443 cpbdev = cpdev->cpbdev;
1444 cpbdev->disconnect_wait = true;
1445
1446 // Mark the device as dead so we won't start anything new.
1447 // NOTE: make sure nothing new can be started on the USB side from this point on.
1448 // This includes transmits from the network. Transmits from cpusb.
1449 // Recv packets, halt clears, ioctls etc
1450 cp_lkm_usb_mark_base_as_dead(cpbdev);
1451
1452 // Once device is marked dead, we can release the semaphore. This is so write and ioctl from the modem stack
1453 // can return quickly with errors instead of blocking while the disconnect completes.
1454 spin_unlock(&cp_lkm_usb_mgr.lock);
1455
1456 cp_lkm_usb_shuter_down(cpbdev);
1457
1458 cpbdev->disconnect_wait = false;
1459 memref_deref(cpbdev);
1460
1461 //printk("%s() done id: %d\n", __FUNCTION__,unique_id);
1462}
1463
1464static void cp_lkm_usb_base_dev_finalize(void *arg)
1465{
1466 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)arg;
1467 struct list_head *entry, *nxt;
1468 struct cp_lkm_base_ep *bep;
1469 //int unique_id = cpbdev->base_id;
1470 //printk("%s()\n", __FUNCTION__);
1471
1472 //if was added to the list, need to remove it.
1473 if(cpbdev->list.next != &cpbdev->list) {
1474 spin_lock(&cp_lkm_usb_mgr.lock);
1475 list_del(&cpbdev->list);
1476 //printk("%s() free cpbdev from global list \n", __FUNCTION__);
1477 spin_unlock(&cp_lkm_usb_mgr.lock);
1478 }
1479
1480 //These should already be empty, but just in case
1481 //printk("%s() clean lists\n", __FUNCTION__);
1482 cp_lkm_usb_clean_list(&cpbdev->in_q);
1483 cp_lkm_usb_clean_list(&cpbdev->out_q);
1484 cp_lkm_usb_clean_list(&cpbdev->ctrlq);
1485 cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
1486 cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
1487 cp_lkm_usb_clean_list(&cpbdev->other_done);
1488
1489 if(cpbdev->wrapper_ctxt) {
1490 //printk("%s() free wrapper\n", __FUNCTION__);
1491 cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
1492 cpbdev->wrapper_ctxt = NULL;
1493 }
1494 if(cpbdev->usb_driver) {
1495 //printk("%s() free driver\n", __FUNCTION__);
1496 kfree(cpbdev->usb_driver);
1497 cpbdev->usb_driver = NULL;
1498 }
1499 if(cpbdev->usb_id_table) {
1500 //printk("%s() free id table\n", __FUNCTION__);
1501 kfree(cpbdev->usb_id_table);
1502 cpbdev->usb_id_table = NULL;
1503 }
1504 if(cpbdev->udev) {
1505 //printk("%s() free udev\n", __FUNCTION__);
1506 usb_put_dev (cpbdev->udev);
1507 cpbdev->udev = NULL;
1508 }
1509
1510 //printk("%s() - free eps\n",__FUNCTION__);
1511 list_for_each_safe(entry, nxt, &cpbdev->cpdev_list) {
1512 struct cp_lkm_usb_dev* cpdev = list_entry(entry, struct cp_lkm_usb_dev, list);
1513 //printk("%s() - free cpdev: %p from cpbdev: %p\n",__FUNCTION__, cpdev, cpbdev);
1514 list_del(&cpdev->list);
1515 memref_deref(cpdev);
1516 }
1517 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
1518 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1519 //printk("%s() - free in bep: %p from cpbdev: %p\n",__FUNCTION__,bep, cpbdev);
1520 list_del(&bep->list);
1521 memref_deref(bep);
1522 }
1523 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
1524 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1525 //printk("%s() - free out bep: %p from cpbdev: %p\n ",__FUNCTION__,bep, cpbdev);
1526 list_del(&bep->list);
1527 memref_deref(bep);
1528 }
1529 //printk("%s() done base_id: %d\n", __FUNCTION__,unique_id);
1530
1531}
1532
1533static void cp_lkm_usb_dev_finalize(void *arg)
1534{
1535 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)arg;
1536 struct list_head *entry, *nxt;
1537 struct cp_lkm_ep *ep;
1538
1539 //printk("%s() start\n", __FUNCTION__);
1540 //todo remove
1541 //del_timer_sync(&cpdev->dbg_timer);
1542
1543 //printk("%s() - free eps\n",__FUNCTION__);
1544 list_for_each_safe(entry, nxt, &cpdev->in_ep_list) {
1545 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1546 //printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
1547 list_del(&ep->list_cpdev);
1548 memref_deref(ep);
1549 }
1550 list_for_each_safe(entry, nxt, &cpdev->out_ep_list) {
1551 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1552 //printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
1553 list_del(&ep->list_cpdev);
1554 memref_deref(ep);
1555 }
1556
1557 if(cpdev->edi) {
1558 //printk("%s() free edi\n", __FUNCTION__);
1559 cpdev->edi->usb_send_ctx = NULL;
1560 cpdev->edi->usb_send = NULL;
1561
1562 memref_deref(cpdev->edi);
1563 cpdev->edi = NULL;
1564 }
1565
1566 //printk("%s() end \n", __FUNCTION__);
1567}
1568
1569static int cp_lkm_usb_plug_intf(struct cp_lkm_usb_plug_intf* pi)
1570{
1571 int retval;
1572 struct cp_lkm_usb_dev* cpdev = NULL;
1573 struct cp_lkm_usb_base_dev* cpbdev = NULL;
1574 bool need_new;
1575 bool is_cloneable;
1576
1577 //Make sure we aren't going to overflow the skb space reserved for us to use
1578 //DEBUG_ASSERT(sizeof(struct skb_data) < sizeof(((struct sk_buff*)0)->cb));
1579 //DEBUG_INFO("%s(), skb_data size: %d, skb_buff cb size: %d",__FUNCTION__,sizeof(struct skb_data),sizeof(((struct sk_buff*)0)->cb));
1580
1581 // We need to alloc a new cpbdev on plug if:
1582 // 1. The device is not cloned at this layer (thus each plug has its own cpbdev)
1583 // Note: Some devices are cloned at other layers (cpusb_linux.c), so they can be running as clones in the system, but not at this layer.
1584 // This is why we can't just look at the clone_num to determine.
1585 // 2. It is cloneable and clone_num is 0 (only the first clone gets a new cpbdev, the rest share it)
1586 is_cloneable = pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF;
1587 need_new = !is_cloneable || (is_cloneable && pi->clone_num == 0);
1588
1589 //printk("%s() start id:%d vid/pid: 0x%x/0x%x, bus/addr: %d/%d, intf: %d, flags: 0x%x, clone: %d, mux: %d\n", __FUNCTION__, pi->unique_id, pi->vid, pi->pid, pi->bus, pi->addr, pi->intf_num, pi->feature_flags, pi->clone_num, pi->mux_id);
1590
1591 if (need_new) {
1592 //first instance, so need a new cpbdev
1593 cpbdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_base_dev), cp_lkm_usb_base_dev_finalize);
1594 if(!cpbdev) {
1595 //printk("%s() failed to alloc cpbdev\n", __FUNCTION__);
1596 goto init_fail;
1597 }
1598 //printk("%s() id: %d, alloc'd new cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
1599 cpbdev->base_state = CP_LKM_USB_INIT;
1600 cpbdev->vid = pi->vid;
1601 cpbdev->pid = pi->pid;
1602 cpbdev->intf_num = pi->intf_num;
1603 cpbdev->alt_intf_num = pi->alt_intf_num;
1604 cpbdev->usb_bus = pi->bus;
1605 cpbdev->usb_addr = pi->addr;
1606 cpbdev->feature_flags = pi->feature_flags;
1607 cpbdev->base_id = pi->unique_id;
1608 INIT_LIST_HEAD(&cpbdev->in_bep_list);
1609 INIT_LIST_HEAD(&cpbdev->out_bep_list);
1610 INIT_LIST_HEAD(&cpbdev->list);
1611 INIT_LIST_HEAD(&cpbdev->cpdev_list);
1612 cpbdev->data_in_bep_num = pi->ep_in;
1613 cpbdev->data_out_bep_num = pi->ep_out;
1614
1615 //alloc and register the usb driver
1616 cpbdev->usb_driver = kzalloc(sizeof(struct usb_driver), GFP_KERNEL);
1617 if(!cpbdev->usb_driver) {
1618 //printk("%s() failed to alloc driver\n", __FUNCTION__);
1619 goto init_fail;
1620 }
1621
1622 cpbdev->usb_id_table = kzalloc(sizeof(struct usb_device_id)*2, GFP_KERNEL);
1623 if(!cpbdev->usb_id_table) {
1624 //printk("%s() failed to alloc table\n", __FUNCTION__);
1625 goto init_fail;
1626 }
1627
1628 cpbdev->usb_id_table[0].idVendor = cpbdev->vid;
1629 cpbdev->usb_id_table[0].idProduct = cpbdev->pid;
1630 cpbdev->usb_id_table[0].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
1631 cpbdev->usb_id_table[0].driver_info = (unsigned long)pi->unique_id;
1632
1633 //create unique drvr string
1634 sprintf(cpbdev->usb_drvr_name, USB_DRVR_FRMT_STR, pi->unique_id);
1635 cpbdev->usb_driver->name = cpbdev->usb_drvr_name;
1636 cpbdev->usb_driver->probe = cp_lkm_usb_probe;
1637 cpbdev->usb_driver->disconnect = cp_lkm_usb_disconnect;
1638 cpbdev->usb_driver->id_table = cpbdev->usb_id_table;
1639
1640
1641 skb_queue_head_init (&cpbdev->in_q);
1642 skb_queue_head_init (&cpbdev->out_q);
1643 skb_queue_head_init (&cpbdev->ctrlq);
1644 skb_queue_head_init (&cpbdev->data_tx_done);
1645 skb_queue_head_init (&cpbdev->data_rx_done);
1646 skb_queue_head_init (&cpbdev->other_done);
1647 cpbdev->data_q_len = 0;
1648 spin_lock_init(&cpbdev->data_q_lock);
1649 spin_lock_init(&cpbdev->processing_state_lock);
1650 spin_lock_init(&cpbdev->other_state_lock);
1651 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
1652 cpbdev->other_state = USB_PROCESS_STATE_IDLE;
1653 INIT_WORK(&cpbdev->kevent, cp_lkm_usb_kevent);
Harish Ambati2e2e7b32023-02-22 14:21:36 +00001654 timer_setup(&cpbdev->rx_delay, cp_lkm_usb_delay_timer, 0);
Kyle Swenson74ad7532023-02-16 11:05:29 -07001655
1656 cpbdev->data_process_tasklet.func = cp_lkm_usb_process_data_done_tasklet; //TODO: modify to take cpbdev
1657 cpbdev->data_process_tasklet.data = (unsigned long) cpbdev;
1658
1659 cpbdev->other_process_tasklet.func = cp_lkm_usb_process_other_done_tasklet; //TODO: modify to take cpbdev
1660 cpbdev->other_process_tasklet.data = (unsigned long) cpbdev;
1661
1662 cpbdev->disconnect_wait = false;
1663
1664 spin_lock(&cp_lkm_usb_mgr.lock);
1665 list_add_tail(&cpbdev->list, &cp_lkm_usb_mgr.dev_list);
1666 spin_unlock(&cp_lkm_usb_mgr.lock);
1667
1668 // When we call register, it calls our probe function with all available matching interfaces. In probe
1669 // we save the result of the probe so we can return fail here if it didn't go well
1670 //printk("%s() reg drvr for vid:%x, pid:%x, addr:%d, intf:%d\n", __FUNCTION__, pi->vid,pi->pid,pi->addr,pi->intf_num);
1671 retval = usb_register(cpbdev->usb_driver);
1672 if(retval || cpbdev->plug_result != 0) {
1673 //printk("%s() failed to register driver or probe failed retval:%d, plug_result:%d\n", __FUNCTION__, retval, cpbdev->plug_result);
1674 goto init_fail;
1675 }
1676 cpbdev->base_state = CP_LKM_USB_CTRL;
1677 DEBUG_TRACE("%s() done", __FUNCTION__);
1678 }
1679 else{
1680 //clone, should already have a base dev
1681 cpbdev = cp_lkm_usb_find_base_dev(pi->bus, pi->addr, pi->unique_id, CP_LKM_DEV_MATCH_BUS_ADDR_ONLY);
1682 if(!cpbdev) {
1683 //printk("%s() failed to find cpbdev\n", __FUNCTION__);
1684 goto init_fail;
1685 }
1686 //printk("%s() id: %d, already have cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
1687 }
1688
1689 // make sure base dev has all the feature flags of every clone
1690 cpbdev->feature_flags |= pi->feature_flags;
1691
1692 //printk("%s() id: %d, cpbdev: %p, alloc new cpdev\n", __FUNCTION__, pi->unique_id, cpbdev);
1693 cpdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_dev), cp_lkm_usb_dev_finalize);
1694 if(!cpdev) {
1695 //printk("%s() failed to alloc cpdev\n", __FUNCTION__);
1696 goto init_fail;
1697 }
1698 //printk("%s() id: %d, cpdev: %p\n", __FUNCTION__, pi->unique_id, cpdev);
1699
1700 INIT_LIST_HEAD(&cpdev->in_ep_list);
1701 INIT_LIST_HEAD(&cpdev->out_ep_list);
1702 INIT_LIST_HEAD(&cpdev->list);
1703 //add to list right away so if anything below fails, it will be cleaned up when cpbdev is cleaned up
1704 list_add_tail(&cpdev->list, &cpbdev->cpdev_list);
1705 cpdev->cpbdev = cpbdev;
1706 cpdev->unique_id = pi->unique_id;
1707 //clone and mux are only used with muxed clone interfaces.
1708 cpdev->clone_num = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->clone_num : 0;
1709 cpdev->mux_id = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->mux_id : CP_LKM_WRAPPER_DEFAULT_ID;
1710 //printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, pi->unique_id, pi->clone_num, cpdev->mux_id);
1711 cpdev->data_in_ep_num = pi->ep_in;
1712 cpdev->data_out_ep_num = pi->ep_out;
1713 //pre-create the data endpoints so they will be first in the list, since they are most often used
1714 cp_lkm_usb_create_ep(cpdev, pi->ep_in);
1715 cp_lkm_usb_create_ep(cpdev, pi->ep_out);
1716 cpdev->edi = memref_alloc_and_zero(sizeof(struct cp_lkm_edi), NULL);
1717 if(!cpdev->edi) {
1718 //printk("%s() failed to alloc edi\n", __FUNCTION__);
1719 goto init_fail;
1720 }
1721 cpdev->edi->usb_send = cp_lkm_usb_start_xmit;
1722
1723 //for debug, comment out before checkin
1724 //cpdev->dbg_timer.function = cp_lkm_usb_dbg_timer;
1725 //cpdev->dbg_timer.data = (unsigned long)cpdev;
1726 //init_timer(&cpdev->dbg_timer);
1727 //mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(10000));
1728
1729 //TODO CA: I think this shouldn't be set until open, commenting out for now to see if blows chow in plug fest
1730 //cpdev->edi->usb_send_ctx = cpdev;
1731
1732 cpdev->state = CP_LKM_USB_CTRL;
1733
1734 //printk("%s() done success id: %d\n", __FUNCTION__, pi->unique_id);
1735
1736 return 0;
1737
1738init_fail:
1739 if(cpbdev) {
1740 //the finalizer for cpbdev does the clean up
1741 memref_deref(cpbdev);
1742 }
1743 //returning an error to the modem stack on plug will cause it to hard reset
1744 //the modem, thus causing the rest of the driver cleanup to occur
1745 //printk("%s() open_intf fail\n", __FUNCTION__);
1746 return -1;
1747}
1748
1749static int cp_lkm_usb_set_wrapper(struct cp_lkm_usb_set_wrapper* sw)
1750{ //unsigned long flags;
1751 struct cp_lkm_usb_dev* cpdev;
1752 struct cp_lkm_usb_base_dev* cpbdev;
1753 void* wrapper_info = NULL;
1754 unsigned long not_copied;
1755 int res = 0;
1756 //printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, sw->unique_id, sw->clone_num, sw->mux_id);
1757
1758 spin_lock(&cp_lkm_usb_mgr.lock);
1759 cpdev = cp_lkm_usb_find_dev(sw->unique_id);
1760
1761 if(!cpdev) {
1762 spin_unlock(&cp_lkm_usb_mgr.lock);
1763 //printk("%s() no cpdev found for id: %d\n", __FUNCTION__, sw->unique_id);
1764 return -1;
1765 }
1766 cpbdev = cpdev->cpbdev;
1767 if(cpbdev->base_state == CP_LKM_USB_DEAD){
1768 //modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
1769 spin_unlock(&cp_lkm_usb_mgr.lock);
1770 //printk("%s() set_wrapper fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev->state);
1771 return 0;
1772 }
1773
1774// benk - what if wrapper_info_len is 0???
1775 if(cpbdev->wrapper_ctxt){
1776 //already have a wrapper so free it
1777 cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
1778 }
1779
1780 if(sw->wrapper_info_len) {
1781 wrapper_info = kzalloc(sw->wrapper_info_len, GFP_KERNEL);
1782 if(!wrapper_info) {
1783 DEBUG_ERROR("%s() couldn't alloc wrapper info", __FUNCTION__);
1784 res = -1;
1785 goto set_wrapper_done;
1786 }
1787 }
1788
1789
1790 //copy the wrapper info from user to kernel space
1791 not_copied = copy_from_user(wrapper_info, sw->wrapper_info, sw->wrapper_info_len);
1792 if (not_copied) {
1793 DEBUG_ERROR("%s() couldn't copy wrapper info", __FUNCTION__);
1794 res = -1;
1795 goto set_wrapper_done;
1796 }
1797 //alloc the wrapper instance. On success it takes ownership of the wrapper_info and is responsible for freeing it
1798 DEBUG_INFO("%s() wrapper: %d", __FUNCTION__, sw->wrapper);
1799 cpbdev->wrapper_ctxt = cp_lkm_wrapper_instance_alloc(sw->wrapper, wrapper_info, sw->wrapper_info_len);
1800 if(!cpbdev->wrapper_ctxt){
1801 DEBUG_ERROR("%s() couldn't alloc wrapper", __FUNCTION__);
1802 res = -1;
1803 goto set_wrapper_done;
1804 }
1805 cpbdev->wrapper_hdr_size = cp_lkm_wrapper_hdr_size(cpbdev->wrapper_ctxt);
1806 cp_lkm_wrapper_set_state(cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
1807
1808 cpdev->clone_num = sw->clone_num;
1809 cpdev->mux_id = sw->mux_id;
1810
1811
1812set_wrapper_done:
1813 if(wrapper_info) {
1814 kfree(wrapper_info);
1815 }
1816
1817 spin_unlock(&cp_lkm_usb_mgr.lock);
1818 return res;
1819
1820}
1821
1822static int cp_lkm_usb_set_mux_id(struct cp_lkm_usb_set_mux_id* smi)
1823{ //unsigned long flags;
1824 struct cp_lkm_usb_dev* cpdev;
1825 //struct cp_lkm_usb_base_dev* cpbdev;
1826 int res = 0;
1827
1828 //printk("%s()\n", __FUNCTION__);
1829
1830 spin_lock(&cp_lkm_usb_mgr.lock);
1831 cpdev = cp_lkm_usb_find_dev(smi->unique_id);
1832 if(!cpdev) {
1833 spin_unlock(&cp_lkm_usb_mgr.lock);
1834 //printk("%s() failed to find cpdev for id: %d\n", __FUNCTION__, smi->unique_id);
1835 return -1;
1836 }
1837 if(cpdev->cpbdev->base_state == CP_LKM_USB_DEAD){
1838 //modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
1839 spin_unlock(&cp_lkm_usb_mgr.lock);
1840 return 0;
1841 }
1842 cpdev->mux_id = smi->mux_id;
1843 //printk("%s() unique_id: %d, mux_id: %d\n", __FUNCTION__, smi->unique_id, smi->mux_id);
1844
1845 spin_unlock(&cp_lkm_usb_mgr.lock);
1846 return res;
1847
1848}
1849
1850static int cp_lkm_usb_open_intf(struct cp_lkm_usb_open_intf* oi)
1851{
1852 //unsigned long flags;
1853 struct cp_lkm_usb_dev* cpdev;
1854
1855 //printk("%s() u-uid: %d\n", __FUNCTION__,oi->unique_id);
1856
1857 spin_lock(&cp_lkm_usb_mgr.lock);
1858 cpdev = cp_lkm_usb_find_dev(oi->unique_id);
1859
1860 //if state isn't CP_LKM_USB_CTRL, then the interface either did not plug for some reason (i.e. didn't get probe from usb),
1861 //or it plugged, but then unplugged before open was called.
1862 if(!cpdev || cpdev->cpbdev->base_state != CP_LKM_USB_CTRL) {
1863 spin_unlock(&cp_lkm_usb_mgr.lock);
1864 //printk("%s() open_intf fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev?cpdev->state:0xff);
1865 return -1;
1866 }
1867 cpdev->state = CP_LKM_USB_ACTIVE;
1868 cpdev->edi->usb_send_ctx = cpdev; //this allows the network side to call me
1869 cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_ACTIVE);
1870 spin_unlock(&cp_lkm_usb_mgr.lock);
1871 //printk("%s() done\n", __FUNCTION__);
1872 return 0;
1873
1874}
1875
1876static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci)
1877{
1878 //unsigned long flags;
1879 struct cp_lkm_usb_dev* cpdev;
1880
1881 //printk("%s() u-uid: %d\n", __FUNCTION__, ci->unique_id);
1882
1883 //down(&cp_lkm_usb_mgr.thread_sem);
1884 spin_lock(&cp_lkm_usb_mgr.lock);
1885 cpdev = cp_lkm_usb_find_dev(ci->unique_id);
1886
1887 if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_DEAD) {
1888 //device has already unplugged, or is half-unplugged, so don't allow this action to complete
1889 spin_unlock(&cp_lkm_usb_mgr.lock);
1890 //up(&cp_lkm_usb_mgr.thread_sem);
1891 return 0;
1892 }
1893 cpdev->edi->usb_send_ctx = NULL; //disconnect from network side so he won't send me any more data
1894 cpdev->state = CP_LKM_USB_CTRL;
1895 cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
1896 spin_unlock(&cp_lkm_usb_mgr.lock);
1897 //up(&cp_lkm_usb_mgr.thread_sem);
1898 //printk("%s() done\n", __FUNCTION__);
1899
1900 return 0;
1901}
1902
1903static bool cp_lkm_usb_unplug_do_disconnect_wait(void* ctx1, void* ctx2)
1904{
1905 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1906 bool done = false;
1907 if (cpbdev->disconnect_wait == false){
1908 done = true;
1909 }
1910 return done;
1911}
1912
1913/*
1914 * This function is called when the common modem stack wants to give up the interface.
1915 * There are two scenarios:
1916 * 1. Modem unplugs which leads to the following flow:
1917 * -> cp_lkm_usb_disconnect is called by USB sublayer, it cleans up bottom half of cpdev and waits for common modem stack unplug
1918 * -> common modem stack sees unplug event
1919 * -> it calls this function to finish the cleanup and deregister the driver
1920 * -> we are done
1921 *
1922 * 2. Common modem stack decides to give up the interface due to one common
1923 * modem driver relinquishing the modem and another common modem driver grabbing it.
1924 * This leads to the following flow:
1925 * -> Common modem stack calls this function.
1926 * -> it calls usb_deregister() which will call cp_lkm_usb_disconnect in context
1927 * -> cp_lkm_usb_disconnect shuts down and frees the usb interface
1928 * -> After usb_deregister() exits we finish and exit.
1929 *
1930 * Notes: This means the two shutdown functions, this one and cp_lkm_usb_disconnect can be
1931 * run in any order, so they must not stomp on each other. For example since
1932 * cp_lkm_usb_disconnect frees the interface with the kernel, this function better
1933 * not do anything that requires the interface after calling usb_deregister()
1934 *
1935 * The modem stack is single threaded so this function can never be reentrant
1936 */
1937static int cp_lkm_usb_unplug_intf(struct cp_lkm_usb_unplug_intf* ui)
1938{
1939 //find dev in list by unique id
1940 struct cp_lkm_usb_dev* cpdev;
1941 struct cp_lkm_usb_base_dev* cpbdev;
1942 bool shuter_down = true;
1943 struct list_head *pos;
1944
1945 //printk("%s() start id: %d\n", __FUNCTION__, ui->unique_id);
1946 spin_lock(&cp_lkm_usb_mgr.lock);
1947 //The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
1948 cpdev = cp_lkm_usb_find_dev(ui->unique_id);
1949 if(!cpdev) {
1950 spin_unlock(&cp_lkm_usb_mgr.lock);
1951 return -1;
1952 }
1953 cpbdev = cpdev->cpbdev;
1954
1955 cp_lkm_usb_mark_as_dead(cpdev);
1956
1957 list_for_each(pos, &cpbdev->cpdev_list){
1958 struct cp_lkm_usb_dev* tmp_cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
1959 if(tmp_cpdev->state != CP_LKM_USB_DEAD) {
1960 //don't shut down until all clone devices have unplugged
1961 shuter_down = false;
1962 break;
1963 }
1964 }
1965
1966 //free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
1967 //which will try and grab the semaphore, so we would be deadlocked
1968 spin_unlock(&cp_lkm_usb_mgr.lock);
1969
1970 if (shuter_down) {
1971 LOG("Wait for cpdev to finish unplugging");
1972 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_unplug_do_disconnect_wait,CP_LKM_TIMEOUT_MS,CP_LKM_ITER,"cpdev failed to finish disconnecting");
1973
1974 //printk("%s() usb_deregister\n",__FUNCTION__);
1975 usb_deregister(cpbdev->usb_driver);
1976
1977 /* clean up */
1978 memref_deref(cpbdev);
1979
1980 }
1981 /* IMPORTANT: don't do anything other than deref after call to deregister*/
1982
1983 LOG("cpdev done unplugging");
1984
1985 return 0;
1986}
1987
1988/*
1989 * Handle endpoint action requests from modem stack.
1990 *
1991 * Important things to know:
1992 * In normal mode:
1993 * 1. There will be 1 cpdev per cpbdev, and 1 ep per bep.
1994 * 2. Every different ep can either be listened on or recv'd on, but never both at the same time
1995 *
1996 * In clone mode:
1997 * 1. There will be n cpdevs per cpbdev, and n eps ber bep (depending on number of clones).
1998 * 2. Every different ep can either be listened on or recv'd on, but never both at the same time.
1999 * 3. All cloned data eps can be listened on at the same time (data header allows us to mux data between all the data eps, data endpoints don't use recv).
2000 * 4. With all other cloned eps of the same type (AT, CNS, QMI), only one clone can be listened on or recv'd on at a time.
2001 * This is because there are not headers on these channels to let us know where to mux the data to. Fortunately, the
2002 * modem stack enforces this, so we don't have to enforce it here, but we can use it to know how to route cloned packets
2003 * coming in on non-data channel endpoints
2004*/
2005static int cp_lkm_usb_ep_action(struct cp_lkm_usb_ep_action* ea)
2006{
2007 struct cp_lkm_ep* ep;
2008 struct cp_lkm_base_ep* bep = NULL;
2009 struct cp_lkm_usb_dev* cpdev;
2010 struct cp_lkm_usb_base_dev* cpbdev;
2011 //unsigned long flags;
2012 int pump_recv = 0;
2013
2014 //printk("%s() - action: %d, ep_num: 0x%x, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, ea->unique_id);
2015
2016 spin_lock(&cp_lkm_usb_mgr.lock);
2017 //There should always be a device, and it should always be plugged
2018 cpdev = cp_lkm_usb_find_dev(ea->unique_id);
2019 if(!cpdev) {
2020 spin_unlock(&cp_lkm_usb_mgr.lock);
2021 //printk("%s() no device found for unique id: %d\n", __FUNCTION__, ea->unique_id);
2022 return -1;
2023 }
2024
2025 cpbdev = cpdev->cpbdev;
2026 if(cpbdev->base_state == CP_LKM_USB_INIT) {
2027 spin_unlock(&cp_lkm_usb_mgr.lock);
2028 //printk("%s() no probe yet, unique_id: %d, action: %d\n", __FUNCTION__,ea->unique_id,ea->action);
2029 return -1;
2030 }
2031 if(cpbdev->base_state == CP_LKM_USB_DEAD) {
2032 // The device can unplug down here before cpusb knows about it so it can continue to send us stuff.
2033 // The modem will unplug soon so just act like we did it and return ok. I didn't want to
2034 // return an error because that might cause cpusb unnecessary heartburn.
2035 spin_unlock(&cp_lkm_usb_mgr.lock);
2036 //printk("%s() cpdev already dead, shouldn't be doing this: id: %d, action: %d cpbdev: %p, cpdev: %p\n", __FUNCTION__,ea->unique_id,ea->action,cpbdev,cpdev);
2037 return 0;
2038 }
2039 DEBUG_ASSERT(cpbdev, "cpbdev is null");
2040 //create the ep if it doesn't already exist
2041 if(ea->action == EP_ACTION_CREATE) {
2042 cp_lkm_usb_create_ep(cpdev, ea->ep_num);
2043 }
2044
2045 if (ea->action == EP_ACTION_FLUSH_CONTROL) {
2046 ep = NULL;
2047 } else {
2048 ep = cp_lkm_usb_get_ep(cpdev, ea->ep_num);
2049 if(!ep) {
2050 spin_unlock(&cp_lkm_usb_mgr.lock);
2051 //printk("%s() failed to find ep: 0x%x for action: %d\n", __FUNCTION__, ea->ep_num, ea->action);
2052 return -1;
2053 }
2054 bep = ep->bep;
2055 DEBUG_ASSERT(bep,"base ep is null");
2056 }
2057
2058
2059 //if (ep && ea->action != EP_ACTION_RECV) {
2060 // printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
2061 //}
2062
2063 //printk("ea->action: %d, ep_num: %d\n", ea->action, ea->ep_num);
2064 switch(ea->action) {
2065 case EP_ACTION_CREATE:
2066 //printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
2067 //initialize endpoint fields
2068 bep->type = ea->ep_type;
2069 bep->max_transfer_size = ea->max_transfer_size;
2070 bep->interval = ea->interval;
2071
2072 DEBUG_ASSERT(cpbdev->udev,"udev is null");
2073 if(bep->ep_num & USB_DIR_IN) { //in
2074 if(bep->type == UE_BULK) {
2075 bep->pipe = usb_rcvbulkpipe(cpbdev->udev,bep->ep_num);
2076 }
2077 else{ //interrupt
2078 bep->pipe = usb_rcvintpipe(cpbdev->udev, bep->ep_num);
2079 }
2080 }
2081 else{ //out
2082 if(bep->type == UE_BULK) {
2083 bep->pipe = usb_sndbulkpipe(cpbdev->udev,bep->ep_num);
2084 }
2085 else{ //interrupt
2086 bep->pipe = usb_sndintpipe(cpbdev->udev, bep->ep_num);
2087 }
2088 }
2089 DEBUG_TRACE("%s() create action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2090 break;
2091
2092 case EP_ACTION_LISTEN:
2093 DEBUG_TRACE("%s() listen action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2094 ep->con_flags |= CP_LKM_USB_LISTEN;
2095 //listen on any endpoint starts listen on base
2096 bep->con_flags |= CP_LKM_USB_LISTEN;
2097 pump_recv = 1;
2098 break;
2099
2100 case EP_ACTION_LISTEN_STOP:
2101 {
2102 bool listen_done = true;
2103 struct list_head *entry, *nxt;
2104 struct cp_lkm_ep *tmp_ep;
2105
2106 DEBUG_TRACE("%s() listen stop action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2107
2108 // the ep is done listening
2109 ep->con_flags &= ~CP_LKM_USB_LISTEN;
2110
2111 //now see if all eps on this bep are done listening
2112 list_for_each_safe(entry, nxt, &bep->eps) {
2113 tmp_ep = list_entry(entry, struct cp_lkm_ep, list_bep);
2114 if(tmp_ep->con_flags & CP_LKM_USB_LISTEN) {
2115 //if any of the eps on the bep still listening, then still listen on the bep
2116 listen_done = false;
2117 break;
2118 }
2119 }
2120 if(listen_done) {
2121 bep->con_flags &= ~CP_LKM_USB_LISTEN;
2122 //If RX_HALT bit set then there is an error on this endpoint and the kevent will be scheduled to fix the error. As part of the fix
2123 //he will unlink the urbs. Bad things can happen if we call cp_lkm_usb_unlink_urbs here at same time the kevent handler is calling it
2124 if(!test_bit (EVENT_RX_HALT, &bep->err_flags)){
2125 //TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
2126 cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->in_q, bep);
2127 }
2128 }
2129 }
2130 break;
2131
2132 case EP_ACTION_RECV:
2133 DEBUG_TRACE("%s() recv action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2134 // can only have one pending recv on a given ep
2135 ep->con_flags |= CP_LKM_USB_RECV;
2136 bep->con_flags |= CP_LKM_USB_RECV;
2137 pump_recv = 1;
2138 break;
2139
2140 case EP_ACTION_FLUSH_CONTROL:
2141 //printk("%s() flush control action:%d\n", __FUNCTION__, ea->action);
2142 //TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
2143 //We don't schedule kevents to clear endpoint halts since they are self recovering so we don't need to test the halt bits on the ctrl channel
2144 cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->ctrlq, NULL);
2145 break;
2146
2147 case EP_ACTION_SET_MAX_TX_SIZE:
2148 //printk("%s() set max tx size to %d on ep: 0x%x\n",__FUNCTION__,ea->max_transfer_size, ea->ep_num);
2149 bep->max_transfer_size = ea->max_transfer_size;
2150 break;
2151
2152 default:
2153 break;
2154 }
2155
2156
2157 if(pump_recv) {
2158 cp_lkm_schedule_rx_restock(cpbdev, bep);
2159 }
2160
2161 spin_unlock(&cp_lkm_usb_mgr.lock);
2162
2163 return 0;
2164}
2165
2166static bool cp_lkm_usb_do_pm_link(void* ctx1, void* ctx2)
2167{
2168 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)ctx1;
2169 struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
2170 struct cp_lkm_usb_pm_link* upl = (struct cp_lkm_usb_pm_link*)ctx2;
2171 unsigned long flags;
2172 bool done = false;
2173 int rc;
2174
2175 //printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
2176
2177 // We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
2178 // function pointers that are used by the data processing state machine and by the code that schedules the data
2179 // processing machine.
2180 //
2181 // We need to shut both of those down before doing the linking.
2182 // 1: We shut the machine down by setting the state to USB_PROCESS_STATE_PAUSED.
2183 // 2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
2184 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2185 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2186 cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //pauses the data processing soft irq handler
2187
2188 spin_lock(&cpbdev->data_q_lock);
2189 cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT; //stops the hw irq handlers from trying to schedule the soft irq handler
2190 spin_unlock(&cpbdev->data_q_lock);
2191
2192 if(upl->link) {
2193 cpdev->edi->usb_send_ctx = cpdev;
2194 }
2195
2196 //release lock while calling pm since we don't know how long they may take. We have already set the processing_state to
2197 //paused so the soft interrupt routines won't try to do anything so we are safe.
2198 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2199
2200 rc = cp_lkm_pm_usb_link(cpdev->edi, upl->pm_unique_id, upl->link);
2201 DEBUG_ASSERT(rc == 0, "Failed to link usb and pm");
2202
2203 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2204 if(upl->link) {
2205 if (cpdev->edi->pm_get_hdr_size && cpdev->edi->pm_recv_ctx) {
2206 cpdev->edi->pm_get_hdr_size(cpdev->edi->pm_recv_ctx, cpbdev->wrapper_hdr_size, &cpbdev->pm_hdr_size, &cpbdev->pm_hdr_offset);
2207 }
2208 }
2209 else{
2210 cpdev->edi->usb_send_ctx = NULL;
2211 }
2212
2213 cpdev->pm_id = upl->pm_unique_id;
2214
2215 spin_lock(&cpbdev->data_q_lock);
2216 //set things back up properly before re-enabling the soft irq and hardware handlers
2217 cpbdev->data_q_len = cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen; //this must be set before calling schedule_data_process
2218 spin_unlock(&cpbdev->data_q_lock);
2219
2220 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
2221 done = true;
2222 }
2223 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2224
2225 return done;
2226}
2227
2228static int cp_lkm_usb_pm_link(struct cp_lkm_usb_pm_link* upl)
2229{
2230 struct cp_lkm_usb_dev* cpdev;
2231 struct cp_lkm_usb_base_dev* cpbdev;
2232
2233 spin_lock(&cp_lkm_usb_mgr.lock);
2234 //There should always be a device, and it should always be plugged
2235 cpdev = cp_lkm_usb_find_dev(upl->usb_unique_id);
2236
2237 //printk("%s() cpdev: %p, u-uid: %d, pm-uid: %d, up: %d\n", __FUNCTION__, cpdev, upl->usb_unique_id, upl->pm_unique_id, upl->link);
2238
2239 if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_INIT) {
2240 spin_unlock(&cp_lkm_usb_mgr.lock);
2241 //printk("%s() no device or no probe yet\n", __FUNCTION__);
2242 return -1;
2243 }
2244 cpbdev = cpdev->cpbdev;
2245 // The device can unplug down here before cpusb knows about it so it can continue to send us stuff.
2246 // The modem will unplug soon so just act like we did it and return ok. I didn't want to
2247 // return an error because that might cause cpusb unnecessary heartburn.
2248 if(cpbdev->base_state == CP_LKM_USB_DEAD) {
2249 spin_unlock(&cp_lkm_usb_mgr.lock);
2250 //printk("%s() device already unplugged\n", __FUNCTION__);
2251 return 0;
2252 }
2253
2254 //printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
2255 // We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
2256 // function pointers that are used by the data processing state machine and by the code that schedules the data
2257 // processing machine.
2258 //
2259 // We need to shut both of those down before doing the linking.
2260 // 1: We shut the machine down by setting the state to USB_processing_state_PAUSED.
2261 // 2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
2262 cp_lkm_do_or_die(cpdev, upl, cp_lkm_usb_do_pm_link, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "cpdev failed to link with pm");
2263
2264 //printk("%s() done\n", __FUNCTION__);
2265 spin_unlock(&cp_lkm_usb_mgr.lock);
2266 //force a resume
2267 cp_lkm_schedule_data_process(cpbdev, false, true, false);
2268 return 0;
2269}
2270
2271static int cp_lkm_usb_is_alive_intf(struct cp_lkm_usb_is_alive_intf *alivei)
2272{
2273 //find dev in list by unique id
2274 struct cp_lkm_usb_dev *cpdev;
2275 int alive;
2276
2277 //printk("%s() start\n", __FUNCTION__);
2278 spin_lock(&cp_lkm_usb_mgr.lock);
2279 //The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
2280 cpdev = cp_lkm_usb_find_dev(alivei->unique_id);
2281
2282 if(!cpdev) {
2283 spin_unlock(&cp_lkm_usb_mgr.lock);
2284 return -1;
2285 }
2286
2287 alive = (cpdev->state == CP_LKM_USB_DEAD) ? -1 : 0;
2288 //free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
2289 //which will try and grab the semaphore, so we would be deadlocked
2290 spin_unlock(&cp_lkm_usb_mgr.lock);
2291
2292 return alive;
2293}
2294static bool cp_lkm_usb_is_attached(struct cp_lkm_usb_dev* cpdev)
2295{
2296 return (cpdev->state == CP_LKM_USB_ACTIVE || cpdev->state == CP_LKM_USB_CTRL);
2297}
2298
2299static bool cp_lkm_usb_is_base_attached(struct cp_lkm_usb_base_dev* cpbdev)
2300{
2301 //base has three possible states: INIT, CTRL, DEAD (it never goes to ACTIVE, only the cpdev's do that)
2302 return cpbdev->base_state == CP_LKM_USB_CTRL;
2303}
2304
2305
2306//
2307// Input:
2308// if_data: set to true if caller only wants to schedule if there is data pending
2309// is_reschedule: set to true if the caller is the scheduled handler to see if it should be rescheduled
2310// have_lock: true if the caller already has the lock
2311//
2312// returns:
2313// true if scheduled new processing
2314// false if didn't schedule.
2315//
2316// Note: returns false if it was currently scheduled
2317static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_reschedule, bool have_lock)
2318{
2319 unsigned long flags;
2320 bool res = false;
2321
2322 if (!have_lock) {
2323 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2324 }
2325
2326 //never schedule processing when we are paused
2327 if (cpbdev->data_q_len == CP_LKM_USB_PAUSED_CNT) {
2328 goto schedule_done;
2329 }
2330
2331 if (is_reschedule) {
2332 cpbdev->scheduled = false;
2333 }
2334
2335 if (cpbdev->scheduled == true) {
2336 goto schedule_done;
2337 }
2338
2339 if (if_data) {
2340 if(!cp_lkm_usb_have_data(cpbdev)){
2341 goto schedule_done;
2342 }
2343 }
2344
2345 cpbdev->scheduled = true;
2346 res = true;
2347
2348 //cpdev->dbg_total_tasklet_sched++;
2349 tasklet_schedule(&cpbdev->data_process_tasklet);
2350
2351schedule_done:
2352 if (!have_lock) {
2353 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2354 }
2355 return res;
2356}
2357
2358static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep)
2359{
2360 if(bep == NULL) {
2361 cp_lkm_schedule_data_process(cpbdev,false,false,false);
2362 tasklet_schedule(&cpbdev->other_process_tasklet);
2363 }
2364 else if(bep->ep_num == cpbdev->data_in_bep_num) {
2365 //printk("start data ep listen\n");
2366 cp_lkm_schedule_data_process(cpbdev,false,false,false);
2367 }
2368 else{
2369 tasklet_schedule(&cpbdev->other_process_tasklet);
2370 }
2371}
2372
2373#define DATA_SRC_TX 0
2374#define DATA_SRC_RX 1
2375#define DATA_SRC_OTHER 2
2376static void cp_lkm_usb_done_and_defer_data(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb, int src)
2377{
2378 unsigned long flags;
2379
2380 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2381 if(src == DATA_SRC_TX) {
2382 __skb_queue_tail(&cpbdev->data_tx_done, skb);
2383 }
2384 else{
2385 __skb_queue_tail(&cpbdev->data_rx_done, skb);
2386 }
2387 if(cpbdev->data_q_len != CP_LKM_USB_PAUSED_CNT) {
2388 cpbdev->data_q_len++;
2389 cp_lkm_schedule_data_process(cpbdev,true,false,true);
2390 }
2391 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2392
2393}
2394
2395//for non data endpoint pkts
2396static void cp_lkm_usb_done_and_defer_other(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb)
2397{
2398 unsigned long flags;
2399
2400 spin_lock_irqsave(&cpbdev->other_done.lock, flags);
2401 __skb_queue_tail(&cpbdev->other_done, skb);
2402 //only rearm the softirq if the list was empty
2403 if(cpbdev->other_done.qlen == 1) {
2404 tasklet_schedule(&cpbdev->other_process_tasklet);
2405 }
2406 spin_unlock_irqrestore(&cpbdev->other_done.lock, flags);
2407}
2408
2409static void cp_lkm_usb_process_other_done_tasklet (unsigned long param)
2410{
2411 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2412 struct sk_buff *skb;
2413 struct skb_data *entry;
2414 bool timed_out = false;
2415 unsigned long time_limit = jiffies + 2;
2416 bool can_restock = true;
2417 unsigned long flags;
2418
2419 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
2420 if(cpbdev->other_state != USB_PROCESS_STATE_IDLE){
2421 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2422 return;
2423 }
2424 cpbdev->other_state = USB_PROCESS_STATE_ACTIVE;
2425 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2426
2427 if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
2428 //printk("%s(), cpbdev %p delaying or no longer attached, base_state: %d\n", __FUNCTION__,cpbdev,cpbdev->base_state);
2429 can_restock = false;
2430 }
2431 //cpdev->dbg_total_o_done++;
2432
2433 while(!timed_out) {
2434 skb = skb_dequeue(&cpbdev->other_done);
2435 if(skb == NULL) {
2436 break;
2437 }
2438 entry = (struct skb_data *) skb->cb;
2439
2440 //printk("%s(), other data cpbdev: %p, bep: %p, num: 0x%x\n",__FUNCTION__,cpbdev,entry->bep,(entry->bep?entry->bep->ep_num:0));
2441
2442 //cp_lkm_usb_cnts(entry->state,-1);
2443 switch (entry->state) {
2444 case in_other_done:
2445 if(entry->urb) {
2446 //cp_lkm_usb_urb_cnt(-1);
2447 usb_free_urb (entry->urb);
2448 }
2449 cp_lkm_usb_other_recv_process(cpbdev, skb);
2450 break;
2451 case ctrl_done:
2452 if(entry->urb) {
2453 //cp_lkm_usb_urb_cnt(-1);
2454 usb_free_urb (entry->urb);
2455 }
2456 cp_lkm_usb_ctrl_process(cpbdev, skb);
2457 break;
2458 case out_done:
2459 case in_other_cleanup:
2460 if(entry->urb) {
2461 //cp_lkm_usb_urb_cnt(-1);
2462 usb_free_urb (entry->urb);
2463 }
2464 dev_kfree_skb_any(skb);
2465 break;
2466 case unlink_start:
2467 default:
2468 //printk("!!other: unknown skb state: %d\n",entry->state);
2469 break;
2470 }
2471
2472 if(time_after_eq(jiffies, time_limit)) {
2473 //ran out of time, process this one and then bail
2474 timed_out = true;
2475 }
2476 }
2477
2478 if(can_restock) {
2479 cp_lkm_usb_rx_other_restock(cpbdev);
2480 }
2481
2482 if(timed_out) {
2483 tasklet_schedule(&cpbdev->other_process_tasklet);
2484 }
2485
2486 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
2487 cpbdev->other_state = USB_PROCESS_STATE_IDLE;
2488 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2489
2490 return ;
2491}
2492
2493// Timer callback. This runs in soft interrupt context.
2494//
2495// The call to restock can blow chow (actually when it calls cp_lkm_schedule_data_process)
2496// if an unlink or unplug happens while we are still in the call.
2497//
2498// Unlink or plug can happen during this call on multi core platforms with kernel preemption enabled.
2499// This timer is scheduled if we ran into some unexpected USB error and want
2500// to give the USB endpoint some time before trying to reschedule recv urbs on it.
2501//
2502// The whole purpose of this function is to pump the system if it is otherwise idle. If
2503// it isn't idle, we can count on those processes to call cp_lkm_schedule_rx_restock when done.
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002504static void cp_lkm_usb_delay_timer (struct timer_list *timer)
Kyle Swenson74ad7532023-02-16 11:05:29 -07002505{
2506 unsigned long flags;
2507
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002508 struct cp_lkm_usb_base_dev* cpbdev = from_timer(cpbdev,timer,rx_delay);
2509
Kyle Swenson74ad7532023-02-16 11:05:29 -07002510 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2511 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2512 cp_lkm_schedule_rx_restock(cpbdev,NULL);
2513 }
2514 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2515}
2516
2517#if 0
2518static void cp_lkm_usb_dbg_memleak_timer (unsigned long param)
2519{
2520 printk("+=+=+=+=+=!!!!mem: %d, urb: %d, skb: data: %d, other: %d, xmit: %d, ctrl: %d, unplug:%d, stck_cnt: %d, stck_chk: %d, unlink: %d\n",g_dbg_memalloc_cnt,g_dbg_urballoc_cnt,g_dbg_data_skballoc_cnt,g_dbg_other_skballoc_cnt,g_dbg_xmit_skballoc_cnt,g_dbg_ctrl_skballoc_cnt,g_dbg_unplug_cnt,g_stuck_cnt,g_stuck_chk,g_unlink_cnt);
2521 mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(5000));
2522}
2523#endif
2524
2525
2526/*
2527 * We pause the transmit if there are too many urbs down at the usb layer.
2528 * The Broadcom processor's USB block sometimes gets stuck meaning we will never
2529 * unpause. This function is used to detect if we are paused because of a stuck and
2530 * try to recover it.
2531*/
2532static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action)
2533{
2534 //only broadcom has the stuck problem
2535 if (cp_lkm_is_broadcom == 0) {
2536 //printk("Not BRCM!!!!\n");
2537 return;
2538 }
2539
2540 //TODO: it seems like this might work fine with clones. I don't think it hurts to be inited,
2541 // started or stopped multiple times??
2542 //g_stuck_chk++;
2543 switch(action) {
2544 case CP_LKM_STUCK_INIT:
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002545 timer_setup(&cpbdev->usb_pause_stuck_timer, cp_lkm_usb_pause_stuck_timer, 0);
Kyle Swenson74ad7532023-02-16 11:05:29 -07002546 break;
2547 case CP_LKM_STUCK_START:
2548 mod_timer(&cpbdev->usb_pause_stuck_timer, jiffies + msecs_to_jiffies(3000));
2549 cpbdev->tx_proc_cnt_at_pause = cpbdev->tx_proc_cnt;
2550 break;
2551 case CP_LKM_STUCK_STOP:
2552 case CP_LKM_STUCK_DEINIT:
2553 del_timer_sync(&cpbdev->usb_pause_stuck_timer);
2554 break;
2555 }
2556}
2557
2558// Broadcom has a problem in the EHCI controller where if it gets a NAK on an out packet
2559// it occassionally doesn't update the status of the URB and retry it. This results in the endpoint getting stuck.
2560// If we detect that it is stuck (if the tx has been paused for more than 3 seconds) then we cancel the
2561// struck urb and this gets things going again. The cancelled urb results in a dropped packet which is undesirable,
2562// but preferrable to being stuck.
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002563static void cp_lkm_usb_pause_stuck_timer (struct timer_list *timer)
Kyle Swenson74ad7532023-02-16 11:05:29 -07002564{
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002565 struct cp_lkm_usb_base_dev* cpbdev = from_timer(cpbdev,timer,usb_pause_stuck_timer);
Kyle Swenson74ad7532023-02-16 11:05:29 -07002566 struct skb_data *entry;
2567 struct sk_buff *skb;
2568 struct urb *urb = NULL;
2569 unsigned long flags;
2570
2571 spin_lock_irqsave(&cpbdev->out_q.lock, flags);
2572 if (cpbdev->tx_paused) {
2573 // cancel stuck urb?
2574 skb = skb_peek(&cpbdev->out_q);
2575 if (skb) {
2576 entry = (struct skb_data *) skb->cb;
2577 if (entry) {
2578 if(cpbdev->tx_proc_cnt_at_pause == cpbdev->tx_proc_cnt){
2579 //printk("\n!!!!!!Canceling stuck URB, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n", cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
2580 urb = entry->urb;
2581 usb_get_urb(urb);
2582 }
2583 //else{
2584 //some pkts were transmitted successfully while waiting, though not enough to unpause us.
2585 //this means the tx is not stuck, so don't need to cancel anything
2586 //printk("\n!!!!!!Restarting stuck URB timer, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n",cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
2587 //}
2588 // restart just in case this doesn't unpause tx
2589 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
2590 //g_stuck_cnt++;
2591 }
2592 }
2593 }
2594 spin_unlock_irqrestore(&cpbdev->out_q.lock, flags);
2595 if (urb) {
2596 //printk("\n!!!!!!Canceling stuck URB!!!!!!!!!!\n");
2597 //cpbdev->dbg_total_stuck_cnt++;
2598 usb_unlink_urb (urb);
2599 usb_put_urb(urb);
2600 }
2601}
2602
2603#if 0
2604static void cp_lkm_usb_dbg_timer (unsigned long param)
2605{
2606 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
2607 struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
2608 printk("!!!!cpdev: %p, clone: %d, id: 0x%x, q_cnt: %d, p: %d, stuck_cnt: %d, tx done: %d, ip_copies: %d!!!!!!!\n",cpdev, cpdev->clone_num,cpdev->mux_id,cpbdev->tx_usb_q_count,cpbdev->tx_paused, cpbdev->dbg_total_stuck_cnt, cpbdev->tx_proc_cnt,num_ip_copies);
2609
2610 //printk("!!!!Stuck urb count: %d, total_pause: %d, cpdev: %p, is_brcm: %d!!!!!!!\n",cpdev->dbg_total_stuck_cnt,cpdev->dbg_total_pause,cpdev,cp_lkm_is_broadcom);
2611 //printk("!!!!!!!!!!!\n");
2612 #if 0
2613 int txa;
2614 int rxa;
2615 int drql;
2616 int dtql;
2617 //int ab;
2618 int tx,rx;
2619 int pkt_avg;
2620 //int epqc, in_q;
2621
2622 cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
2623 cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
2624
2625 //ab = cpdev->dbg_total_budget/(cpdev->dbg_total_d_done+1);
2626 txa = cpdev->dbg_total_tx_proc/(cpdev->dbg_total_d_done+1);
2627 rxa = cpdev->dbg_total_rx_proc/(cpdev->dbg_total_d_done+1);
2628 drql = cpdev->dbg_total_rx_qlen/(cpdev->dbg_total_d_done+1);
2629 dtql = cpdev->dbg_total_tx_qlen/(cpdev->dbg_total_d_done+1);
2630 //epqc = cpdev->in_eps[CP_LKM_DATA_INDEX].q_cnt;
2631 //in_q = cpdev->in_q.qlen;
2632 tx = cpdev->dbg_total_tx_irq;
2633 rx = cpdev->dbg_total_rx_irq;
2634 pkt_avg = (tx+rx)/5;
2635 printk("tot: %d, tx: %d, rx: %d, pa: %d, dones: %d, p: %d\n", tx+rx, tx, rx, pkt_avg, cpdev->dbg_total_d_done, cpdev->dbg_total_pause);
2636 printk("resch: %d, d_c: %d, sch_n: %d, sch_t: %d, sch_wq: %d, sch_sk: %d, ds: %d\n", cpdev->dbg_total_d_resched, cpdev->dbg_total_d_comp, cpdev->dbg_total_napi_sched,cpdev->dbg_total_tasklet_sched, cpdev->dbg_total_wq_sched,cpdev->dbg_total_sch_sk, cpdev->data_state);
2637 printk("txa: %d, rxa: %d, to: %d, HZ:%d \n", txa , rxa, cpdev->dbg_total_timeout, HZ);
2638 printk("nrm_t: %d, blk_t: %d, nrm: %d, blk: %d, ntmrs: %d \n", cpdev->dbg_total_num_normal_t,cpdev->dbg_total_num_hybrid_t,cpdev->dbg_total_num_normal,cpdev->dbg_total_num_hybrid, cpdev->dbg_total_num_d_timers);
2639 printk("psd: %d, tuqc: %d, schd: %d, dql: %d, rql: %d, tql: %d, toq: %d\n",cpdev->tx_paused,cpdev->tx_usb_q_count,cpdev->scheduled,cpdev->data_q_len,cpdev->data_rx_done.qlen,cpdev->data_tx_done.qlen,cpdev->out_q.qlen);
2640 printk("txirq: %d, txprc: %d\n",cpdev->dbg_total_tx_irq, cpdev->dbg_total_tx_proc);
2641
2642 //printk("ipqc: %d, in_q: %d\n", epqc, in_q);
2643 //printk("d0: %p,d1: %p,d2: %p,d3: %p,d4: %p\n", devs[0],devs[1],devs[2],devs[3],devs[4]);
2644 cpdev->dbg_total_d_done = cpdev->dbg_total_d_resched = cpdev->dbg_total_d_comp = 0;
2645 cpdev->dbg_total_pause = cpdev->dbg_total_max_work = cpdev->dbg_total_budget = 0;
2646 cpdev->dbg_total_tx_irq = cpdev->dbg_total_rx_irq = 0;
2647 cpdev->dbg_total_tx_proc = cpdev->dbg_total_rx_proc = 0;
2648 cpdev->dbg_total_rx_qlen = cpdev->dbg_total_tx_qlen = 0;
2649 cpdev->dbg_total_napi_sched=cpdev->dbg_total_tasklet_sched=cpdev->dbg_total_wq_sched=0;
2650 cpdev->dbg_total_num_normal_t=cpdev->dbg_total_num_hybrid_t=cpdev->dbg_total_num_normal=cpdev->dbg_total_num_hybrid=cpdev->dbg_total_num_d_timers = 0;
2651 #endif
2652
2653 mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(5000));
2654
2655}
2656#endif
2657
2658
2659//Caller must have the data_q_lock before calling
2660static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev)
2661{
2662 //return the amount of work to be done if it exceeds the threshold, else return 0
2663 if(cpbdev->data_rx_done.qlen >= cpbdev->rx_schedule_threshold || cpbdev->data_tx_done.qlen >= cpbdev->tx_schedule_threshold){
2664 return cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen;
2665 }
2666 return 0;
2667}
2668
2669
2670#if 1
2671static int cp_lkm_usb_process_data_done(struct cp_lkm_usb_base_dev *cpbdev, int budget)
2672{
2673 struct sk_buff *skb;
2674 struct skb_data *entry;
2675 struct cp_lkm_usb_dev* cpdev __attribute__((unused));
2676 unsigned long time_limit = jiffies + 3;
2677 int retval;
2678 int restock = 0;
2679 unsigned long flags;
2680 int rx_work_done = 0;
2681 int tx_work_done = 0;
2682 int work_done = 0;
2683 int can_restock = 1;
2684 int i;
2685 int loop;
2686 int num_proc;
2687 int actual_budget;
2688 int num_rx;
2689 int num_tx;
2690 struct sk_buff_head done_q;
2691 bool paused;
2692
2693 skb_queue_head_init (&done_q);
2694
2695 //cpdev->dbg_total_d_done++;
2696 //cpdev->dbg_total_budget += budget;
2697 //cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
2698 //cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
2699
2700 // if the delay timer is running, we aren't supposed to send any more recv urbs to the usb layer.
2701 // if the device has detached, we need to finish processing done pkts, but don't resubmit any new urbs
2702 if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
2703 //printk("%s(), cpdev delaying or no longer attached\n", __FUNCTION__);
2704 can_restock = 0;
2705 }
2706
2707 paused = cpbdev->tx_paused;
2708
2709 actual_budget = CP_LKM_USB_NAPI_MAX_WORK;
2710 for(loop=0;loop<CP_LKM_USB_PROCESS_DIVISOR;loop++) {
2711 if(time_after_eq(jiffies, time_limit)) {
2712 //ran out of time, process this one and then bail
2713 work_done = budget;
2714 //cpdev->dbg_total_timeout++;
2715 break;
2716 }
2717 //keep restocking the q until we max out the budget or timeout or runout
2718 if(rx_work_done >= actual_budget || (paused && tx_work_done >= actual_budget)) {
2719 work_done = budget;
2720 break;
2721 }
2722 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2723 num_rx = cpbdev->data_rx_done.qlen;
2724 num_tx = cpbdev->data_tx_done.qlen;
2725 num_proc = max(num_rx,num_tx);
2726 num_proc = min(num_proc,actual_budget/CP_LKM_USB_PROCESS_DIVISOR); //grab 1/divisor of remaining budget each time
2727 // Note: A unit of work for the shim is either a lone tx, a lone rx or a combo of a rx and a tx.
2728 // Here we calculate how much work to do on this poll. If there was work left over from last time
2729 // finish processing it.
2730 for(i = 0; i < num_proc; i++) {
2731 skb = __skb_dequeue (&cpbdev->data_rx_done);
2732 if(skb){
2733 cpbdev->data_q_len--;
2734 __skb_queue_tail(&done_q, skb);
2735 }
2736 skb = __skb_dequeue (&cpbdev->data_tx_done);
2737 if(skb){
2738 cpbdev->data_q_len--;
2739 __skb_queue_tail(&done_q, skb);
2740 }
2741 }
2742 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2743
2744 //nothing in the q, we are done
2745 if(done_q.qlen == 0) {
2746 break;
2747 }
2748
2749 while((skb = __skb_dequeue(&done_q))){
2750 entry = (struct skb_data *) skb->cb;
2751 //cp_lkm_usb_cnts(entry->state,-1);
2752 switch (entry->state) {
2753 case in_data_done:
2754 //cpdev->dbg_total_rx_proc++;
2755 entry->bep->q_cnt--;
2756 restock++;
2757 rx_work_done++;
2758 work_done++;
2759 if(can_restock && restock == CP_LKM_USB_RESTOCK_MULTIPLE) {
2760 restock = 0;
2761
2762 retval = cp_lkm_usb_submit_recv (cpbdev, entry->urb, GFP_ATOMIC, entry->bep, true);
2763 if (retval < 0) {
2764 //printk("%s(), can't resubmit\n", __FUNCTION__);
2765 //cp_lkm_usb_urb_cnt(-1);
2766 usb_free_urb (entry->urb);
2767 can_restock = 0;
2768 }
2769 }
2770 else{
2771 //cp_lkm_usb_urb_cnt(-1);
2772 usb_free_urb (entry->urb);
2773 }
2774 cp_lkm_usb_data_recv_process(cpbdev, skb);
2775 break;
2776 case out_done:
2777 work_done++;
2778 tx_work_done++;
2779 //fall through on purpose
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002780 fallthrough;
Kyle Swenson74ad7532023-02-16 11:05:29 -07002781 case in_data_cleanup:
2782 if(entry->urb) {
2783 //cp_lkm_usb_urb_cnt(-1);
2784 usb_free_urb (entry->urb);
2785 }
2786 dev_kfree_skb_any(skb);
2787 break;
2788
2789 case unlink_start:
2790 default:
2791 //printk("!!data: unknown skb state: %d\n",entry->state);
2792 break;
2793 }
2794 }
2795 }
2796
2797 //restock recv urbs to usb layer if we processed any
2798 if(can_restock) {
2799 cp_lkm_usb_rx_data_restock(cpbdev);
2800 }
2801
2802 //see if we need to resume the tx side
2803 if(tx_work_done) {
2804 spin_lock_irqsave (&cpbdev->out_q.lock, flags);
2805 cpbdev->tx_proc_cnt += tx_work_done;
2806
2807 if(tx_work_done > cpbdev->tx_usb_q_count) {
2808 cpbdev->tx_usb_q_count = 0;
2809 }
2810 else{
2811 cpbdev->tx_usb_q_count -= tx_work_done;
2812 }
2813 if(cpbdev->tx_usb_q_count <= cpbdev->tx_resume_threshold) {
2814 if(cpbdev->tx_paused){
2815 //unpause all cpdevs
2816 cp_lkm_usb_dev_pause(cpbdev, false);
2817 // cancel usb_pause_stuck_timer
2818 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_STOP);
2819 }
2820
2821 }
2822 spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
2823 }
2824
2825 //if(work_done > cpdev->dbg_total_max_work){
2826 // cpdev->dbg_total_max_work = work_done;
2827 //}
2828
2829 //can't return greater than the passed in budget
2830 if(work_done > budget) {
2831 work_done = budget;
2832 }
2833
2834 return work_done;
2835 //return 1;
2836}
2837#endif
2838
2839static int cp_lkm_usb_common_process_data_done(struct cp_lkm_usb_base_dev* cpbdev, int budget)
2840{
2841 unsigned long flags;
2842 int work_done = -1;
2843 bool rescheduled;
2844 bool ran_data_done = false;
2845 if(NULL == cpbdev) {
2846 //printk("%s() !!!!!!!!!!!!!!!!no ctxt\n", __FUNCTION__);
2847 return work_done;
2848 }
2849
2850 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2851 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2852 cpbdev->processing_state = USB_PROCESS_STATE_ACTIVE;
2853 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2854 work_done = cp_lkm_usb_process_data_done(cpbdev, budget);
2855 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2856 ran_data_done = true;
2857 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
2858 }
2859 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2860 if (ran_data_done) {
2861 rescheduled = cp_lkm_schedule_data_process(cpbdev,true,true,false);
2862 if (rescheduled) {
2863 work_done = budget;
2864 //cpdev->dbg_total_d_resched++;
2865 }
2866 else if(work_done){
2867 work_done--;
2868 //cpdev->dbg_total_d_comp++;
2869 }
2870 }
2871 else{
2872 //cpdev->dbg_total_sch_sk++;
2873 }
2874 return work_done;
2875}
2876
2877
2878static void cp_lkm_usb_process_data_done_tasklet (unsigned long param)
2879{
2880 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2881
2882 cp_lkm_usb_common_process_data_done(cpbdev, CP_LKM_PM_NAPI_WEIGHT);
2883}
2884
2885
2886static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpbdev)
2887{
2888 //struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
2889 //int cur_token;
2890 struct urb *urb;
2891 //int ep_index;
2892 int q_len;
2893 struct cp_lkm_base_ep* bep;
2894 int retval;
2895 int q_cnt;
2896
2897 // timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
2898 if (timer_pending(&cpbdev->rx_delay)) {
2899 return;
2900 }
2901
2902 // restock the recv queues on any ep's that are listening
2903 bep = cp_lkm_usb_get_bep(cpbdev, cpbdev->data_in_bep_num);
2904 if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
2905 return;
2906 }
2907 if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
2908 return;
2909 }
2910
2911 if(bep->con_flags & CP_LKM_USB_RECV) {
2912 //only post 1 for recv's
2913 q_len = 1;
2914 }
2915 else{
2916 //its a listen
2917 q_len = CP_LKM_USB_MAX_RX_QLEN;
2918 }
2919
2920 // Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
2921 // there is a problem with usb, so only try up to q_len times to insert them.
2922 retval = 0;
2923 q_cnt = bep->q_cnt;
2924
2925 while(q_cnt < q_len) {
2926 urb = usb_alloc_urb (0, GFP_ATOMIC);
2927 if (!urb) {
2928 if (q_cnt == 0) {
2929 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
2930 }
2931 break;
2932 }
2933 //cp_lkm_usb_urb_cnt(1);
2934 retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, true);
2935 if (retval < 0) {
2936 //cp_lkm_usb_urb_cnt(-1);
2937 usb_free_urb (urb);
2938 break;
2939 }
2940 q_cnt++;
2941 }
2942}
2943
2944static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev)
2945{
2946 struct urb *urb;
2947 int q_len;
2948 struct cp_lkm_base_ep* bep;
2949 int retval;
2950 int q_cnt;
2951 struct list_head *entry, *nxt;
2952
2953 // timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
2954 if (timer_pending(&cpbdev->rx_delay)) {
2955 return;
2956 }
2957
2958 // restock the recv queues on any ep's that are listening
2959 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
2960 bep = list_entry(entry, struct cp_lkm_base_ep, list);
2961 if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
2962 continue;
2963 }
2964 if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
2965 continue;
2966 }
2967 if(bep->ep_num == cpbdev->data_in_bep_num) {
2968 continue;
2969 }
2970
2971 if(bep->con_flags & CP_LKM_USB_RECV) {
2972 //only post 1 for recv's
2973 q_len = 1;
2974 }
2975 else{
2976 //its a listen
2977 q_len = CP_LKM_USB_MAX_OTHER_QLEN;
2978 }
2979
2980 // Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
2981 // there is a problem with usb, so only try up to q_len times to insert them.
2982 retval = 0;
2983 q_cnt = bep->q_cnt;
2984
2985 while(q_cnt < q_len) {
2986 urb = usb_alloc_urb (0, GFP_ATOMIC);
2987 if (!urb) {
2988 if (q_cnt == 0) {
2989 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
2990 }
2991 break;
2992 }
2993 //cp_lkm_usb_urb_cnt(1);
2994 retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, false);
2995 if (retval < 0) {
2996 //cp_lkm_usb_urb_cnt(-1);
2997 usb_free_urb (urb);
2998 break;
2999 }
3000 q_cnt++;
3001 }
3002 }
3003}
3004
3005//unlink all urbs with the given ep, or all if ep is NULL
3006static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep)
3007{
3008 unsigned long flags;
3009 struct sk_buff *skb;
3010 int count = 0;
3011
3012 spin_lock_irqsave (&q->lock, flags);
3013 while (!skb_queue_empty(q)) {
3014 struct skb_data *entry;
3015 struct urb *urb;
3016 int retval;
3017
3018 skb_queue_walk(q, skb) {
3019 entry = (struct skb_data *) skb->cb;
3020 urb = entry->urb;
3021 if(urb && (entry->state != unlink_start) && (entry->bep == bep || bep == NULL)) {
3022 goto found;
3023 }
3024 }
3025 break;
3026found:
3027 entry->state = unlink_start;
3028
3029 /*
3030 * Get reference count of the URB to avoid it to be
3031 * freed during usb_unlink_urb, which may trigger
3032 * use-after-free problem inside usb_unlink_urb since
3033 * usb_unlink_urb is always racing with .complete
3034 * handler(include defer_bh).
3035 */
3036 usb_get_urb(urb);
3037 spin_unlock_irqrestore(&q->lock, flags);
3038 // during some PM-driven resume scenarios,
3039 // these (async) unlinks complete immediately
3040 //usb_kill_urb(urb);
3041 retval = usb_unlink_urb (urb);
3042 //g_unlink_cnt++;
3043 if (retval != -EINPROGRESS && retval != 0){
3044 //netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
3045 } else{
3046 count++;
3047 }
3048 usb_put_urb(urb);
3049 spin_lock_irqsave(&q->lock, flags);
3050 }
3051 spin_unlock_irqrestore (&q->lock, flags);
3052 return count;
3053}
3054
3055
3056static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work)
3057{
3058 set_bit (work, &bep->err_flags);
3059 if (!schedule_work (&cpbdev->kevent)) {
3060 //deverr (dev, "kevent %d may have been dropped", work);
3061 } else {
3062 //devdbg (dev, "kevent %d scheduled", work);
3063 }
3064}
3065
3066// Workqueue callback function. This runs in thread context
3067static void cp_lkm_usb_kevent (struct work_struct *work)
3068{
3069 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)container_of(work, struct cp_lkm_usb_base_dev, kevent);
3070 int status;
3071 struct cp_lkm_base_ep* bep;
3072 struct list_head *entry, *nxt;
3073
3074
3075 //grab global lock while testing dev state so it can't change on us.
3076 spin_lock(&cp_lkm_usb_mgr.lock);
3077 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3078 spin_unlock(&cp_lkm_usb_mgr.lock);
3079 return;
3080 }
3081
3082 //don't want to hold global lock while doing this since don't know how long this will take, see next note
3083 spin_unlock(&cp_lkm_usb_mgr.lock);
3084
3085
3086 //NOTE: if kernel preemption is enabled and the disconnect gets called right here, bad things could happen if the cpdev->udev
3087 // is released. Fortunately, cp_lkm_usb_disconnect() calls cancel_work_sync() before releasing it. This will either cancel this
3088 // function if it isn't currently running, or will wait until it exits before returning if it is running. This protects us.
3089
3090 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
3091 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3092 /* usb_clear_halt() needs a thread context */
3093 if (test_bit (EVENT_TX_HALT, &bep->err_flags)) {
3094 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, bep);
3095 status = usb_clear_halt (cpbdev->udev, bep->pipe);
3096 DEBUG_TRACE("%s() EVENT_TX_HALT status:%d", __FUNCTION__, status);
3097 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
3098 //if (netif_msg_tx_err (dev))
3099 // deverr (dev, "can't clear tx halt, status %d",
3100 DEBUG_TRACE("%s() failed EVENT_TX_HALT status:%d", __FUNCTION__, status);
3101 // status);
3102 } else {
3103 clear_bit (EVENT_TX_HALT, &bep->err_flags);
3104 //if (status != -ESHUTDOWN)
3105 // netif_wake_queue (dev->net);
3106 }
3107 }
3108 }
3109
3110 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
3111 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3112 if (test_bit (EVENT_RX_HALT, &bep->err_flags)) {
3113 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, bep);
3114 status = usb_clear_halt (cpbdev->udev, bep->pipe);
3115 DEBUG_TRACE("%s() EVENT_RX_HALT status:%d", __FUNCTION__, status);
3116 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
3117 DEBUG_TRACE("%s() failed EVENT_RX_HALT status:%d", __FUNCTION__, status);
3118 //if (netif_msg_rx_err (dev))
3119 // deverr (dev, "can't clear rx halt, status %d",
3120 // status);
3121 } else {
3122 clear_bit (EVENT_RX_HALT, &bep->err_flags);
3123 //grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
3124 spin_lock(&cp_lkm_usb_mgr.lock);
3125 if (cp_lkm_usb_is_base_attached(cpbdev)){
3126 cp_lkm_schedule_rx_restock(cpbdev,bep);
3127 }
3128 spin_unlock(&cp_lkm_usb_mgr.lock);
3129
3130 }
3131 }
3132 }
3133 /* tasklet could resubmit itself forever if memory is tight */
3134 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
3135 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3136 if (test_bit (EVENT_RX_MEMORY, &bep->err_flags)) {
3137 DEBUG_TRACE("%s() EVENT_RX_MEMORY", __FUNCTION__);
3138
3139 clear_bit (EVENT_RX_MEMORY, &bep->err_flags);
3140
3141 //grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
3142 spin_lock(&cp_lkm_usb_mgr.lock);
3143 if (cp_lkm_usb_is_base_attached(cpbdev) && bep->q_cnt == 0){
3144 cp_lkm_schedule_rx_restock(cpbdev,bep);
3145
3146 }
3147 spin_unlock(&cp_lkm_usb_mgr.lock);
3148 }
3149 }
3150 //if (test_bit (EVENT_LINK_RESET, &cpdev->flags)) {
3151 // struct driver_info *info = dev->driver_info;
3152 // int retval = 0;
3153 //
3154 // clear_bit (EVENT_LINK_RESET, &dev->flags);
3155 // if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
3156 // devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
3157 // retval,
3158 // dev->udev->bus->bus_name, dev->udev->devpath,
3159 // info->description);
3160 // }
3161 //}
3162
3163 //if (dev->flags)
3164 // devdbg (dev, "kevent done, flags = 0x%lx",
3165 // dev->flags);
3166}
3167
3168static void cp_lkm_usb_ctrl_complete(struct urb *urb)
3169{
3170 unsigned long flags;
3171 struct sk_buff *skb = (struct sk_buff *) urb->context;
3172 struct skb_data *entry = (struct skb_data *) skb->cb;
3173 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3174
3175 //remove skb from the list first thing so no other code conext looking at the
3176 //list (such as unlink_urbs) can mess with it.
3177 spin_lock_irqsave(&cpbdev->ctrlq.lock, flags);
3178 __skb_unlink(skb, &cpbdev->ctrlq);
3179 spin_unlock_irqrestore(&cpbdev->ctrlq.lock,flags);
3180
3181 skb->len = urb->actual_length;
3182
3183 //skip status and error checking if the device has unplugged
3184 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3185 urb->status = -ENODEV;
3186 goto ctrl_done;
3187 }
3188
3189 if (urb->status != 0) {
3190 switch (urb->status) {
3191 case -EPIPE:
3192 break;
3193
3194 /* software-driven interface shutdown */
3195 case -ECONNRESET: // async unlink
3196 case -ESHUTDOWN: // hardware gone
3197 break;
3198
3199 case -ENODEV:
3200 //printk("ctrl fail, no dev\n");
3201 break;
3202
3203 case -EPROTO:
3204 case -ETIME:
3205 case -EILSEQ:
3206 //CA: decided not to throttle on ctrl channel transfers since they are a different beast
3207 //if (!timer_pending (&cpdev->rx_delay)) {
3208 // mod_timer (&cpdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3209 //if (netif_msg_link (dev))
3210 // devdbg (dev, "tx throttle %d",
3211 // urb->status);
3212 //}
3213 //netif_stop_queue (dev->net);
3214 break;
3215 default:
3216 //if (netif_msg_tx_err (dev))
3217 // devdbg (dev, "tx err %d", entry->urb->status);
3218 break;
3219 }
3220 }
3221
3222ctrl_done:
3223 urb->dev = NULL;
3224 entry->state = ctrl_done;
3225 entry->status = urb->status;
3226 entry->urb = NULL;
3227 if(urb->setup_packet) {
3228 kfree(urb->setup_packet);
3229 }
3230 //cp_lkm_usb_urb_cnt(-1);
3231 usb_free_urb (urb);
3232 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3233}
3234
3235
3236static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in)
3237{
3238 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3239 struct cp_lkm_usb_base_dev* cpbdev;
3240 int retval = NET_XMIT_SUCCESS;
3241 struct urb *urb = NULL;
3242 struct skb_data *entry;
3243 unsigned long flags;
3244 int pipe;
3245 u8* tmp8;
3246 u16* tmp16;
3247 struct usb_ctrlrequest *req = NULL;
3248
3249 if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
3250 //printk("%s() no ctxt\n", __FUNCTION__);
3251 goto ctrl_done;
3252 }
3253
3254 cpbdev = cpdev->cpbdev;
3255
3256 DEBUG_TRACE("%s()", __FUNCTION__);
3257
3258 if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
3259 retval = -ENOMEM;
3260 goto ctrl_done;
3261 }
3262 //cp_lkm_usb_urb_cnt(1);
3263
3264 if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
3265 //cp_lkm_usb_urb_cnt(-1);
3266 usb_free_urb(urb);
3267 retval = -ENOMEM;
3268 goto ctrl_done;
3269 }
3270
3271 //The upper layer driver put all the ctrl stuff at the end of the buffer (in correct le order)
3272 //This layer puts it in a separate buffer
3273 tmp8 = (u8*)skb_in->data;
3274 req->bRequestType = *tmp8;
3275 skb_pull(skb_in, 1);
3276
3277 tmp8 = (u8*)skb_in->data;
3278 req->bRequest = *tmp8;
3279 skb_pull(skb_in, 1);
3280
3281 tmp16 = (u16*)skb_in->data;
3282 req->wValue = *tmp16;
3283 skb_pull(skb_in, 2);
3284
3285 tmp16 = (u16*)skb_in->data;
3286 req->wIndex = *tmp16;
3287 skb_pull(skb_in, 2);
3288
3289 tmp16 = (u16*)skb_in->data;
3290 req->wLength = *tmp16;
3291 skb_pull(skb_in, 2);
3292 //printk("%s() RT:%x, R:%x, V:%x, I:%x, L:%x\n", __FUNCTION__, req->bRequestType, req->bRequest, req->wValue, req->wIndex, req->wLength);
3293
3294 entry = (struct skb_data *) skb_in->cb;
3295 entry->urb = urb;
3296 entry->cpbdev = cpbdev;
3297 entry->state = ctrl_start;
3298 entry->status = 0;
3299 entry->bep = NULL;
3300 entry->unique_id = cpdev->unique_id;
3301
3302 if(req->bRequestType & USB_DIR_IN) {
3303 DEBUG_TRACE("%s() ctrl in len: %d", __FUNCTION__,skb_in->len);
3304 pipe = usb_rcvctrlpipe(cpbdev->udev, 0);
3305 }
3306 else{
3307 DEBUG_TRACE("%s() ctrl out len: %d", __FUNCTION__,skb_in->len);
3308 pipe = usb_sndctrlpipe(cpbdev->udev, 0);
3309 }
3310
3311 usb_fill_control_urb(urb, cpbdev->udev, pipe,
3312 (void *)req, skb_in->data, skb_in->len,
3313 cp_lkm_usb_ctrl_complete, skb_in);
3314
3315 //cp_lkm_usb_cnts(ctrl_start,1);
3316 spin_lock_irqsave (&cpbdev->ctrlq.lock, flags);
3317 retval = usb_submit_urb (urb, GFP_ATOMIC);
3318 switch (retval) {
3319 case 0:
3320 //net->trans_start = jiffies;
3321 //success: queue it
3322 __skb_queue_tail (&cpbdev->ctrlq, skb_in);
3323 skb_in = NULL;
3324 urb = NULL;
3325 req = NULL;
3326 break;
3327 case -ENODEV:
3328 break;
3329 case -EPROTO:
3330 case -EPIPE:
3331 break;
3332 default:
3333 break;
3334 }
3335 spin_unlock_irqrestore (&cpbdev->ctrlq.lock, flags);
3336
3337ctrl_done:
3338 if(req) {
3339 kfree(req);
3340 }
3341 if(urb) {
3342 //cp_lkm_usb_urb_cnt(-1);
3343 usb_free_urb(urb);
3344 }
3345 if(skb_in) {
3346 //cp_lkm_usb_cnts(ctrl_start,-1);
3347 dev_kfree_skb_any (skb_in);
3348 }
3349
3350 DEBUG_TRACE("%s() retval %d", __FUNCTION__, retval);
3351
3352 return retval;
3353}
3354
3355
3356#define THROTTLE_JIFFIES (HZ/8)
3357/*
3358 * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here.
3359*/
3360static void cp_lkm_usb_xmit_complete (struct urb *urb)
3361{
3362 unsigned long flags;
3363 struct sk_buff *skb = (struct sk_buff *) urb->context;
3364 struct skb_data *entry = (struct skb_data *) skb->cb;
3365 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3366 struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)entry->bep;
3367 bool is_data = false;
3368 struct cp_lkm_usb_dev* cpdev;
3369
3370 //remove skb from the list first thing so no other code context looking at the
3371 //list (such as unlink_urbs) can mess with it.
3372 spin_lock_irqsave(&cpbdev->out_q.lock,flags);
3373 __skb_unlink(skb, &cpbdev->out_q);
3374 spin_unlock_irqrestore(&cpbdev->out_q.lock,flags);
3375
3376 bep->q_cnt--;
3377
3378 if(bep->ep_num == cpbdev->data_out_bep_num) {
3379 is_data = true;
3380 }
3381
3382 // we save mux id of the cpdev that sent each tx pckt.
3383 cpdev = cp_lkm_usb_find_dev(entry->unique_id);
3384
3385 //skip status and error checking if the device has unplugged
3386 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3387 goto xmit_done;
3388 }
3389
3390 if (urb->status != 0) {
3391 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3392 switch (urb->status) {
3393 case -EPIPE:
3394 //don't have to clear halts on ctrl ep
3395 if (bep->ep_num != 0) {
3396 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_TX_HALT);
3397 }
3398 break;
3399
3400 /* software-driven interface shutdown */
3401 case -ECONNRESET: // async unlink
3402 case -ESHUTDOWN: // hardware gone
3403 break;
3404
3405 case -ENODEV:
3406 break;
3407
3408 // like rx, tx gets controller i/o faults during khubd delays
3409 // and so it uses the same throttling mechanism.
3410 case -EPROTO:
3411 case -ETIME:
3412 case -EILSEQ:
3413 if (!timer_pending (&cpbdev->rx_delay)) {
3414 mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3415 //if (netif_msg_link (dev))
3416 // devdbg (dev, "tx throttle %d",
3417 // urb->status);
3418 }
3419 //netif_stop_queue (dev->net);
3420 break;
3421 default:
3422 //if (netif_msg_tx_err (dev))
3423 // devdbg (dev, "tx err %d", entry->urb->status);
3424 break;
3425 }
3426 }
3427
3428xmit_done:
3429 entry->state = out_done;
3430
3431 if(is_data) {
3432 //cpdev->dbg_total_tx_irq++;
3433 cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_TX);
3434 }
3435 else{
3436 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3437 }
3438}
3439
3440static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb_in, int src, struct cp_lkm_ep* ep)
3441{
3442 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3443 struct cp_lkm_usb_base_dev* cpbdev;
3444 struct cp_lkm_base_ep* bep;
3445 int length;
3446 int retval = NET_XMIT_SUCCESS;
3447 struct urb *urb = NULL;
3448 struct skb_data *entry;
3449 unsigned long flags;
3450 struct sk_buff* skb_out = NULL;
3451 int wres;
3452
3453 if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
3454 //printk("%s() no ctxt\n", __FUNCTION__);
3455 dev_kfree_skb_any(skb_in);
3456 return -1;
3457 }
3458
3459 cpbdev = cpdev->cpbdev;
3460
3461 //the network doesn't have a pointer to the ep readily available so he passes in NULL for ep so we can
3462 //fetch the well known ep for the data out ep
3463 length = 0;
3464 if(src == CP_LKM_WRAPPER_SRC_DATA && ep == NULL){
3465 ep = cp_lkm_usb_get_ep(cpdev,cpdev->data_out_ep_num);
3466 length = skb_in->len;
3467 }
3468 bep = ep->bep;
3469
3470 while(1) {
3471 skb_out = NULL;
3472 urb = NULL;
3473 retval = NET_XMIT_SUCCESS;
3474
3475 //DEBUG_ERROR("%s() wrap it skb_in:%p", __FUNCTION__, skb_in);
3476
3477 //only use wrappers on the data endpoint
3478 if(ep->ep_num == cpdev->data_out_ep_num) {
3479 //DEBUG_ERROR("%s() wrap it", __FUNCTION__);
3480 //spin_lock_irqsave (&cp_lkm_usb_mgr.lock, flags);
3481 wres = cp_lkm_wrapper_send(cpbdev->wrapper_ctxt, src, cpdev->mux_id, skb_in, &skb_out);
3482 skb_in = NULL; //we no longer own skb so null its pointer for future call if we loop
3483 //spin_unlock_irqrestore (&cp_lkm_usb_mgr.lock, flags);
3484 if (wres == CP_LKM_WRAPPER_RES_ERROR) {
3485 DEBUG_ERROR("%s() wrapper error wres:0x%x, skb_out:%p", __FUNCTION__, wres, skb_out);
3486 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
3487 retval = -ENOMEM;
3488 goto xmit_done;
3489 }
3490 }
3491 else{
3492 //Not a data ep, send the skb and then we are done
3493 skb_out = skb_in;
3494 skb_in = NULL;
3495 wres = CP_LKM_WRAPPER_RES_DONE;
3496 }
3497
3498 //If we get here, send returned either done or again. skb_out can be NULL if there is nothing to
3499 //send, so check that first
3500 if(NULL == skb_out) {
3501// DEBUG_INFO("%s() no wrapped data", __FUNCTION__);
3502 goto xmit_done;
3503 }
3504
3505 if(cp_lkm_is_broadcom && ((uintptr_t)(skb_out->data) & 0x3)) {
3506 //broadcom unaligned packets that are multiples of 512 plus 3,4 or 5 bytes (515,516,517,1027,1028,1029,etc)
3507 //are corrupted for some reason, so need to copy into an aligned buffer
3508 int r = skb_out->len & 0x000001FF; //poor man's mod
3509 if (r >= 3 && r <= 5) {
3510 struct sk_buff* skb_new = skb_copy_expand(skb_out, 0, 0, GFP_ATOMIC);
3511 if(!skb_new) {
3512 retval = -ENOMEM;
3513 goto xmit_done;
3514 }
3515 //printk("%s() unaligned: %p, aligned: %p, len: %d, r: %d\n",__FUNCTION__,skb_out->data, skb_new->data, skb_out->len, r);
3516 dev_kfree_skb_any(skb_out);
3517 skb_out=skb_new;
3518 }
3519 }
3520
3521 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
3522 //if (netif_msg_tx_err (dev))
3523 // devdbg (dev, "no urb");
3524 DEBUG_ERROR("%s() urb alloc failed", __FUNCTION__);
3525 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
3526 retval = -ENOMEM;
3527 goto xmit_done;
3528 }
3529 //cp_lkm_usb_urb_cnt(1);
3530 entry = (struct skb_data *) skb_out->cb;
3531 entry->urb = urb;
3532 entry->cpbdev = cpbdev;
3533 entry->bep = bep;
3534 entry->state = out_start;
3535 entry->unique_id = cpdev->unique_id;
3536 //cp_lkm_usb_cnts(out_start,1);
3537
3538 if(bep->type == UE_BULK) {
3539 usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb_out->data,
3540 skb_out->len, cp_lkm_usb_xmit_complete, skb_out);
3541 }
3542 else{
3543 usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb_out->data, skb_out->len,
3544 cp_lkm_usb_xmit_complete, skb_out, bep->interval);
3545 }
3546
3547 if (!(cpbdev->feature_flags & CP_LKM_FEATURE_NO_ZERO_PACKETS)) {
3548 urb->transfer_flags |= URB_ZERO_PACKET;
3549 }
3550
3551 // DEBUG_INFO("%s()", __FUNCTION__);
3552 // DEBUG_INFO("%s() send to ep: 0x%x type:%d, pipe:0x%x", __FUNCTION__, ep->ep_num, ep->type, ep->pipe);
3553
3554 spin_lock_irqsave (&cpbdev->out_q.lock, flags);
3555 retval = usb_submit_urb (urb, GFP_ATOMIC);
3556 switch (retval) {
3557 case 0:
3558 //net->trans_start = jiffies;
3559 //success: queue it
3560 __skb_queue_tail (&cpbdev->out_q, skb_out);
3561 bep->q_cnt++;
3562 skb_out = NULL;
3563 urb = NULL;
3564 if(ep->ep_num == cpdev->data_out_ep_num) {
3565 cpbdev->tx_usb_q_count++;
3566 if(cpbdev->tx_usb_q_count >= CP_LKM_USB_TX_PAUSE_Q_PKTS){
3567 if(!cpbdev->tx_paused) {
3568 //pause all cpdevs
3569 cp_lkm_usb_dev_pause(cpbdev, true);
3570 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
3571 }
3572 }
3573 }
3574 break;
3575 case -EPIPE:
3576 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3577 //don't clear halts on ctrl ep
3578 if(ep->ep_num != 0) {
3579 cp_lkm_usb_defer_kevent(cpbdev, bep, EVENT_TX_HALT);
3580 }
3581 break;
3582 case -ENODEV:
3583 break;
3584 case -EPROTO:
3585 default:
3586 //if (netif_msg_tx_err (dev))
3587 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3588 // devdbg (dev, "tx: submit urb err %d", retval);
3589 break;
3590 }
3591 spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
3592
3593xmit_done:
3594 if (retval) {
3595 DEBUG_TRACE("%s() failed to send: %d", __FUNCTION__, retval);
3596 //cp_lkm_usb_cnts(out_start,-1);
3597 }
3598
3599 //if these are non null then they weren't sent so free them
3600 if (skb_out){
3601 dev_kfree_skb_any (skb_out);
3602 }
3603 if(urb) {
3604 //cp_lkm_usb_urb_cnt(-1);
3605 usb_free_urb (urb);
3606 }
3607
3608 //Bail out of while loop unless the wrapper asked to be called again
3609 if(wres != CP_LKM_WRAPPER_RES_AGAIN) {
3610 break;
3611 }
3612
3613 length = 0;
3614
3615 }
3616 return retval;
3617}
3618
3619static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb)
3620{
3621 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3622 struct cp_lkm_usb_base_dev* cpbdev;
3623 int res;
3624
3625 if(NULL == cpdev){
3626 DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
3627 dev_kfree_skb_any(skb);
3628 return -1;
3629 }
3630 cpbdev = cpdev->cpbdev;
3631 if(cpbdev->tx_paused || CP_LKM_USB_ACTIVE != cpdev->state) {
3632 DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
3633 dev_kfree_skb_any(skb);
3634 return -1;
3635 }
3636 res = cp_lkm_usb_start_xmit_common(ctx, skb, CP_LKM_WRAPPER_SRC_DATA, NULL);
3637 return res;
3638}
3639
3640static int cp_lkm_usb_to_cplkm_status(int usb_status)
3641{
3642 int cplkm_status;
3643 switch(usb_status) {
3644 case 0:
3645 cplkm_status = CP_LKM_STATUS_OK;
3646 break;
3647 default:
3648 //printk("usb err: %d\n", usb_status);
3649 cplkm_status = CP_LKM_STATUS_ERROR;
3650 break;
3651 }
3652 return cplkm_status;
3653}
3654
3655static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3656{
3657 struct skb_data *entry;
3658 struct cp_lkm_msg_hdr hdr;
3659 int status;
3660 struct cp_lkm_base_ep* bep;
3661 struct cp_lkm_usb_dev* cpdev = NULL;
3662 struct list_head *tmp, *nxt;
3663 struct cp_lkm_ep *ep;
3664
3665 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3666 //printk("%s(), cpbdev: %p not attached. state: %d\n",__FUNCTION__,cpbdev,cpbdev->base_state);
3667 dev_kfree_skb_any (skb_in);
3668 return;
3669 }
3670 entry = (struct skb_data *)skb_in->cb;
3671 bep = entry->bep;
3672
3673 //Note: pkts on non-data endpoints when running with clones present a problem because there are no headers on these
3674 // pkts to tell us which clone ep to send this to. Fortunately, the modem stack serializes clone instances so
3675 // only one can be accessing the non-data endpoints at a time. In order to get any responses from the module
3676 // over their endpoint, they must be either listening or have posted a recv. We use this fact to find the
3677 // ep we need to send the recv back on.
3678 list_for_each_safe(tmp, nxt, &bep->eps) {
3679 ep = list_entry(tmp, struct cp_lkm_ep, list_bep);
3680 if (ep->con_flags & (CP_LKM_USB_LISTEN | CP_LKM_USB_RECV)) {
3681 cpdev = ep->cpdev;
3682 if (ep->con_flags & CP_LKM_USB_RECV) {
3683 //can only have one recv pending on non-data endpoints for a given ep number.
3684 //therefor when the clone is done, the base is done
3685 ep->con_flags &= ~CP_LKM_USB_RECV;
3686 bep->con_flags &= ~CP_LKM_USB_RECV;
3687 }
3688 //printk("%s(), other data cpdev: %p, ep: %p, num: 0x%x, flags: 0x%x\n",__FUNCTION__,cpdev,ep, ep->ep_num,ep->con_flags);
3689 break;
3690 }
3691 }
3692
3693 if (!cpdev) {
3694 //printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
3695 dev_kfree_skb_any (skb_in);
3696 return;
3697 }
3698
3699 status = cp_lkm_usb_to_cplkm_status(entry->status);
3700 //printk("%s() other data uid: %d, ep_num:0x%x, status:%d, len: %d\n", __FUNCTION__, cpdev->unique_id,bep->ep_num, entry->status, skb_in->len);
3701
3702 memset(&hdr,0,sizeof(hdr));
3703 hdr.instance_id = cpdev->unique_id;
3704 hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
3705 hdr.status = status;
3706 hdr.len = skb_in?skb_in->len:0;
3707 hdr.arg1 = bep->ep_num;
3708 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in);
3709
3710 return;
3711}
3712
3713
3714static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3715{
3716 struct skb_data *entry;
3717 struct cp_lkm_msg_hdr hdr;
3718 int status;
3719 static struct cp_lkm_usb_dev* cpdev = NULL;
3720
3721 DEBUG_TRACE("%s()", __FUNCTION__);
3722 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3723 dev_kfree_skb_any (skb_in);
3724 return;
3725 }
3726
3727 entry = (struct skb_data *)skb_in->cb;
3728 cpdev = cp_lkm_usb_find_dev(entry->unique_id);
3729 if (!cpdev) {
3730 //printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
3731 dev_kfree_skb_any (skb_in);
3732 return;
3733 }
3734
3735 status = cp_lkm_usb_to_cplkm_status(entry->status);
3736 memset(&hdr,0,sizeof(hdr));
3737 hdr.instance_id = cpdev->unique_id;
3738 hdr.cmd = CP_LKM_USB_CMD_CTRL_RECV;
3739 hdr.status = status;
3740 hdr.len = skb_in?skb_in->len:0;
3741 hdr.arg1 = 0; //ctrl channel ep is always 0
3742
3743 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in);
3744 DEBUG_TRACE("%s() ctrl response status:%d", __FUNCTION__, entry->status);
3745
3746 return;
3747}
3748
3749
3750//This function runs in an interrupt context so it can't be preempted. This means cpdev can't
3751//be deleted out from under
3752static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3753{
3754 struct sk_buff *skb_out;
3755 int res;
3756 int dst;
3757 struct skb_data *entry;
3758 struct cp_lkm_usb_dev* cpdev;
3759 struct cp_lkm_base_ep* bep;
3760 int ep_num;
3761 int mux_id;
3762
3763 // WARNING: The memory this pointer points to will be freed by the wrapper, so copy everything you need
3764 // out of it here before going into the while loop
3765 entry = (struct skb_data *)skb_in->cb;
3766 bep = entry->bep;
3767 ep_num = bep->ep_num;
3768
3769 //printk("%s() cpbdev: %p, bep: %p base_state: %d\n", __FUNCTION__, cpbdev, bep, cpbdev->base_state);
3770
3771 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3772 dev_kfree_skb_any (skb_in);
3773 return;
3774 }
3775
3776 while(1) {
3777 skb_out = NULL;
3778
3779 mux_id = 0;
3780
3781 res = cp_lkm_wrapper_recv(cpbdev->wrapper_ctxt, &dst, &mux_id, skb_in, &skb_out);
3782
3783 if (dst != CP_LKM_WRAPPER_DST_CTRL && dst != CP_LKM_WRAPPER_DST_DATA) {
3784 // this is something other than data that we don't know what to do with, so drop it.
3785 goto recv_done;
3786 }
3787
3788 cpdev = cp_lkm_usb_find_muxed_dev(cpbdev, mux_id);
3789
3790 skb_in = NULL;
3791
3792 if (NULL == cpdev) {
3793 //LOG("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
3794 DEBUG_WARN("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
3795 goto recv_done;
3796 }
3797
3798 if(res == CP_LKM_WRAPPER_RES_ERROR) {
3799 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, rx_dropped, 1);
3800 goto recv_done;
3801 }
3802
3803 //printk("%s() cpdev: %p, ep_num: 0x%x, dst: %d, mux_id: %d, state: %d, res: %d\n", __FUNCTION__, cpdev, ep_num, dst, mux_id, cpdev->state, res);
3804
3805 //DEBUG_INFO("%s() while() - skb_out:%p, dst:%d, res:%d", __FUNCTION__, skb_out, dst, res);
3806
3807 //if nothing to send, see if we can bail or if need to call again
3808 if(NULL == skb_out){
3809 goto recv_done;
3810 }
3811
3812 if(dst == CP_LKM_WRAPPER_DST_CTRL) {
3813 //printk("%s() ctrl pkt cpdev: %p\n", __FUNCTION__, cpdev);
3814 if (skb_out->len) { // watch for 0 length short packets
3815 struct cp_lkm_msg_hdr hdr;
3816
3817 DEBUG_TRACE("%s() recv app pkt", __FUNCTION__);
3818 memset(&hdr,0,sizeof(hdr));
3819 hdr.instance_id = cpdev->unique_id;
3820 hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
3821 hdr.status = CP_LKM_STATUS_OK;
3822 hdr.len = skb_out->len;
3823 hdr.arg1 = ep_num;
3824
3825 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_out);
3826 skb_out = NULL;
3827 }
3828 }
3829 //dst == CP_LKM_WRAPPER_DST_DATA
3830 else{
3831 //printk("%s() data pkt cpdev: %p\n", __FUNCTION__, cpdev);
3832 if (skb_out->len && cpdev->edi->pm_recv){
3833 //printk("%s() data pkt send to pm cpdev: %p, first byte: 0x%x\n", __FUNCTION__, cpdev, skb_out->data[0]);
3834 cpdev->edi->pm_recv(cpdev->edi->pm_recv_ctx, skb_out);
3835 skb_out = NULL;
3836 }
3837 }
3838
3839recv_done:
3840 if(skb_out) {
3841 dev_kfree_skb_any(skb_out);
3842 }
3843
3844 //if wrapper didn't ask to be called back, then done
3845 if(res != CP_LKM_WRAPPER_RES_AGAIN) {
3846 break;
3847 }
3848
3849 }
3850
3851 return;
3852}
3853
3854/*
3855 * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here.
3856*/
3857static void cp_lkm_usb_recv_complete (struct urb *urb)
3858{
3859 unsigned long flags;
3860 struct sk_buff *skb = (struct sk_buff *) urb->context;
3861 struct skb_data *entry = (struct skb_data *) skb->cb;
3862 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3863 struct cp_lkm_usb_dev* cpdev_stats_only;
3864 int urb_status = urb->status;
3865 struct cp_lkm_base_ep* bep = entry->bep;
3866 bool is_data = false;
3867 //if(urb->status) {
3868 // printk("recv_done: status: %d, len:%d\n", urb->status, urb->actual_length);
3869 //}
3870
3871 // we don't know what cpdev recv packets are destined for when running muxed clones, so report all errors
3872 // to the base device (for non cloned cases, this will always be the correct cpdev)
3873 cpdev_stats_only = cp_lkm_usb_find_dev(cpbdev->base_id);
3874
3875 //remove skb from the list first thing so no other code conext looking at the
3876 //list (such as unlink_urbs) can mess with it.
3877 spin_lock_irqsave(&cpbdev->in_q.lock,flags);
3878 __skb_unlink(skb, &cpbdev->in_q);
3879 spin_unlock_irqrestore(&cpbdev->in_q.lock,flags);
3880
3881 skb_put (skb, urb->actual_length);
3882 if(bep->ep_num == cpbdev->data_in_bep_num) {
3883 is_data = true;
3884 entry->state = in_data_done;
3885 //note we don't decrement the data ep cnt until we process the pkt
3886 } else{
3887 bep->q_cnt--;
3888 entry->state = in_other_done;
3889 }
3890 entry->status = urb->status;
3891
3892 //skip status and error checking if the device has unplugged
3893 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3894 entry->status = -ENODEV;
3895 goto recv_done;
3896 }
3897
3898 switch (urb_status) {
3899 // success
3900 case 0:
3901 break;
3902
3903 // stalls need manual reset. this is rare ... except that
3904 // when going through USB 2.0 TTs, unplug appears this way.
3905 // we avoid the highspeed version of the ETIMEOUT/EILSEQ
3906 // storm, recovering as needed.
3907 case -EPIPE:
3908 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3909 //don't clear halts on ctrl ep
3910 if(bep->ep_num != 0) {
3911 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
3912 }
3913 goto block;
3914
3915 // software-driven interface shutdown
3916 case -ECONNRESET: // async unlink
3917 case -ESHUTDOWN: // hardware gone
3918 goto block;
3919
3920 case -ENODEV:
3921 //printk("recv_done nodev:%d\n", ENODEV);
3922 goto block;
3923
3924 // we get controller i/o faults during khubd disconnect() delays.
3925 // throttle down resubmits, to avoid log floods; just temporarily,
3926 // so we still recover when the fault isn't a khubd delay.
3927 case -EPROTO:
3928 case -ETIME:
3929 case -EILSEQ:
3930 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3931 if (!timer_pending (&cpbdev->rx_delay)) {
3932 mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3933 }
3934block:
3935 if(bep->ep_num == cpbdev->data_in_bep_num) {
3936 bep->q_cnt--;
3937 entry->state = in_data_cleanup;
3938 }
3939 else{
3940 entry->state = in_other_cleanup;
3941 }
3942
3943 break;
3944
3945 // data overrun ... flush fifo?
3946 case -EOVERFLOW:
3947 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_over_errors, 1);
3948
3949 // FALLTHROUGH
3950
3951 default:
3952 if(bep->ep_num == cpbdev->data_in_bep_num) {
3953 bep->q_cnt--;
3954 entry->state = in_data_cleanup;
3955 }
3956 else{
3957 entry->state = in_other_cleanup;
3958 }
3959 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3960 break;
3961 }
3962
3963 // on responses to a requested recv from the app driver, we need to always return something even on error so force it here
3964 if(bep->con_flags & CP_LKM_USB_RECV) {
3965 if(is_data){
3966 entry->state = in_data_done; //this should never happen, data endpoints always listen, they don't post recv's
3967 }
3968 else{
3969 entry->state = in_other_done;
3970 }
3971 }
3972
3973recv_done:
3974 //do not use the 'entry' struct after this call. It is part of the skb and the skb will be freed when the _bh function runs.
3975 //if you need something from it save it off before calling this
3976 if(is_data) {
3977 //cpdev->dbg_total_rx_irq++;
3978 //printk("%s(), got data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
3979 cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_RX);
3980 }
3981 else{
3982 //printk("%s(), got other data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
3983 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3984 }
3985}
3986
3987//static int g_num_adjusts = 0;
3988//static int g_num_recv_pkts = 0;
3989//static int g_num_iters = 0;
3990static int cp_lkm_usb_submit_recv(struct cp_lkm_usb_base_dev* cpbdev , struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data)
3991{
3992 struct sk_buff *skb;
3993 struct skb_data *entry;
3994 int retval = 0;
3995 unsigned long lockflags;
3996 size_t size;
3997 int hdr_size = 0;
3998 int hdr_offset = 0;
3999 int pad = 0; //some platforms require alignment override. pad takes care of that.
4000
4001 //g_num_recv_pkts++;
4002 //g_num_iters++;
4003 //if(g_num_iters > 10000){
4004 // printk("%s() num pkts: %d, num adjusts: %d\n",__FUNCTION__,g_num_recv_pkts,g_num_adjusts);
4005 // g_num_iters = 0;
4006 //}
4007 size = bep->max_transfer_size;
4008 if (data) {
4009 hdr_size = cpbdev->pm_hdr_size;
4010 hdr_offset = cpbdev->pm_hdr_offset;
4011 }
4012
4013 if(cp_lkm_is_broadcom && (hdr_offset & 0x3)) {
4014 //Jira issue FW-14929: On broadcom, we have to keep the buffers four byte aligned else the USB block
4015 //corrupts the data (no idea why).
4016 //Round up the hdr_offset to nearest 4 byte boundary. This means pkts may not be aligned as expected,
4017 //so recieve function will need to either realign with a copy, or send up to the stack unaligned
4018 // See cp_lkm_pm_net_recv() to see how we decided to deal with it (subject to change).
4019 pad = 4 - (hdr_offset&0x3);
4020 //g_num_adjusts++;
4021 }
4022
4023 if ((skb = alloc_skb (size+hdr_size+pad, flags)) == NULL) {
4024 //if (netif_msg_rx_err (dev))
4025 // devdbg (dev, "no rx skb");
4026 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
4027 return -ENOMEM;
4028 }
4029 if (data) {
4030 skb_reserve(skb, hdr_offset+pad);
4031 //printk("%s(), data: %p, len: %d, whs:%d, hs:%d, ho:%d\n",__FUNCTION__,skb->data,skb->len,wrapper_hdr_size,hdr_size,hdr_offset);
4032 }
4033 entry = (struct skb_data *) skb->cb;
4034 entry->urb = urb;
4035 entry->cpbdev = cpbdev;
4036 if(data) {
4037 entry->state = in_data_start;
4038 }
4039 else{
4040 entry->state = in_other_start;
4041 }
4042
4043 entry->status = 0;
4044 entry->bep = bep;
4045
4046 if(bep->type == UE_BULK) {
4047 usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb->data, size,
4048 cp_lkm_usb_recv_complete, skb);
4049 }
4050 else{
4051 usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb->data, size,
4052 cp_lkm_usb_recv_complete, skb, bep->interval);
4053 }
4054 //cp_lkm_usb_cnts(entry->state,1);
4055 spin_lock_irqsave (&cpbdev->in_q.lock, lockflags);
4056 if (cp_lkm_usb_is_base_attached(cpbdev) && !test_bit (EVENT_RX_HALT, &bep->err_flags)) {
4057 DEBUG_TRACE("%s() ep:0x%x, size:%d, type:%d, pipe:0x%x",__FUNCTION__, bep->ep_num, size, bep->type, bep->pipe);
4058 retval = usb_submit_urb (urb, GFP_ATOMIC);
4059 switch (retval) {
4060 case -EPIPE:
4061 //don't clear halts on ctrl ep
4062 if(bep->ep_num != 0) {
4063 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
4064 }
4065 break;
4066 case -ENOMEM:
4067 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
4068 break;
4069 case -ENODEV:
4070 //if (netif_msg_ifdown (dev))
4071 // devdbg (dev, "device gone");
4072 //netif_device_detach (dev->net);
4073 break;
4074 case -EPROTO:
4075 default:
4076 //if (netif_msg_rx_err (dev))
4077 // devdbg (dev, "rx submit, %d", retval);
4078 cp_lkm_schedule_rx_restock(cpbdev,bep);
4079 break;
4080 case 0:
4081 __skb_queue_tail (&cpbdev->in_q, skb);
4082 bep->q_cnt++;
4083 //if(cpdev->in_q.qlen == 1 && ep->index == CP_LKM_DATA_INDEX){
4084 // printk("rx q empty\n");
4085 //}
4086
4087 }
4088 } else {
4089 //if (netif_msg_ifdown (dev))
4090 // devdbg (dev, "rx: stopped");
4091 retval = -ENOLINK;
4092 }
4093 spin_unlock_irqrestore (&cpbdev->in_q.lock, lockflags);
4094 if (retval) {
4095 DEBUG_TRACE("%s() FAILED ep_num:0x%x ep_type:%d, retval: %d",__FUNCTION__, bep->ep_num, bep->type, retval);
4096 //cp_lkm_usb_cnts(entry->state,-1);
4097 dev_kfree_skb_any (skb);
4098 }
4099
4100 return retval;
4101}
4102
4103
4104static int cp_lkm_usb_init(void)
4105{
4106 DEBUG_TRACE("%s()", __FUNCTION__);
4107 memset(&cp_lkm_usb_mgr, 0x00, sizeof(struct cp_lkm_usb_ctx));
4108 cp_lkm_usb_mgr.common.open = cp_lkm_usb_open;
4109 cp_lkm_usb_mgr.common.close = cp_lkm_usb_close;
4110 cp_lkm_usb_mgr.common.handle_msg = cp_lkm_usb_handle_msg;
4111 cp_lkm_usb_mgr.common.handle_ioctl = cp_lkm_usb_handle_ioctl;
4112 INIT_LIST_HEAD(&cp_lkm_usb_mgr.dev_list);
4113
4114 cp_lkm_common_ctx_init(&cp_lkm_usb_mgr.common);
4115
4116 spin_lock_init(&cp_lkm_usb_mgr.lock);
4117 //sema_init(&cp_lkm_usb_mgr.thread_sem, 1);
4118
4119 if(!strcmp(PRODUCT_PLATFORM, "brcm_arm")) {
4120 LOG("cp_lkm: Broadcom platform");
4121 cp_lkm_is_broadcom = 1;
4122 }
4123
4124 LOG("cp_lkm: Product chipset %s",PRODUCT_INFO_CHIPSET);
4125 LOG("cp_lkm: Product platform %s",PRODUCT_PLATFORM);
4126
4127 //Things work better if the napi weight here matchs the global weight set in service_manager/services/firewall.py
4128 //This is even true if we don't use napi here since ethernet on some platforms use it
4129 if ((strcmp(PRODUCT_PLATFORM,"ramips")==0) && (strcmp(PRODUCT_INFO_CHIPSET, "3883")!=0)){
4130 //all ralink (mediatek) platforms except for 3883 use the low settings
4131 //use_high = false;
4132 CP_LKM_PM_NAPI_WEIGHT = 32;
4133 }
4134 else{
4135 //use_high = true;
4136 CP_LKM_PM_NAPI_WEIGHT = 64;
4137 }
4138
4139 //set up default settings for all platforms
4140 CP_LKM_USB_NAPI_MAX_WORK = CP_LKM_PM_NAPI_WEIGHT;
4141 CP_LKM_USB_MAX_RX_QLEN = CP_LKM_USB_NAPI_MAX_WORK;
4142 CP_LKM_USB_MAX_OTHER_QLEN = 2;
4143 CP_LKM_USB_TX_PAUSE_Q_PKTS = CP_LKM_USB_NAPI_MAX_WORK;
4144 CP_LKM_USB_TX_RESUME_Q_PKTS = CP_LKM_USB_TX_PAUSE_Q_PKTS/4;
4145 CP_LKM_USB_TX_SCHED_CNT = 1;
4146 CP_LKM_USB_RX_SCHED_CNT = 1;
4147 CP_LKM_USB_RESTOCK_MULTIPLE = 1; //restock rx as we process them
4148 CP_LKM_USB_TASKLET_CNT = 10;
4149 CP_LKM_USB_WORKQUEUE_CNT = 5;
4150 CP_LKM_USB_PROCESS_DIVISOR = 4;
4151
4152 LOG("cp_lkm: Processor: %s, Max work: %d, NAPI budget: %d, QLEN: %d.",PRODUCT_INFO_CHIPSET, CP_LKM_USB_NAPI_MAX_WORK, CP_LKM_PM_NAPI_WEIGHT, CP_LKM_USB_MAX_RX_QLEN);
4153
4154 return 0;
4155
4156}
4157
4158static int cp_lkm_usb_cleanup(void)
4159{
4160 //module is unloading, clean up everything
4161 // empty pending posted messages
4162 cp_lkm_cleanup_msg_list(&cp_lkm_usb_mgr.common);
4163
4164 cp_lkm_usb_close(&cp_lkm_usb_mgr.common);
4165 return 0;
4166}
4167
4168static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx)
4169{
4170 //struct cp_lkm_usb_ctx* mgr;
4171
4172 DEBUG_TRACE("%s()", __FUNCTION__);
4173 //mgr = (struct cp_lkm_usb_ctx*)ctx;
4174
4175 return 0;
4176}
4177
4178static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx)
4179{
4180 //unsigned long flags;
4181 //struct cp_lkm_usb_dev* cpdev;
4182 //struct cp_lkm_usb_close_intf ci;
4183 //struct cp_lkm_usb_unplug_intf ui;
4184 LOG("%s() called unexpectedly.", __FUNCTION__);
4185
4186 //NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
4187 // things are in a bad state and the router will be rebooting. We decided not
4188 // to clean things up here because this code got into an infinite loop in
4189 // certain fail situations, which prevented the router from rebooting.
4190 // Revisit if close ever becomes a normal event.
4191
4192 /*
4193 while(1) {
4194 spin_lock(&cp_lkm_usb_mgr.lock);
4195
4196 cpdev = cp_lkm_usb_get_head_dev();
4197
4198 spin_unlock(&cp_lkm_usb_mgr.lock);
4199 if(!cpdev) {
4200 return 0;
4201 }
4202
4203 //TODO - when this closed we have a modem plugged, we will be deleting the top half of the driver while the bottom half is
4204 // still plugged. Figure out how to force the driver to disconnect the modem
4205 ci.unique_id = cpdev->unique_id;
4206 cp_lkm_usb_close_intf(&ci);
4207
4208 //the unplug removes the device from the list which prevents us from infinite looping here
4209 ui.unique_id = cpdev->unique_id;
4210 cp_lkm_usb_unplug_intf(&ui);
4211 }
4212
4213 cp_lkm_cleanup_msg_list(ctx);
4214 */
4215 return 0;
4216}
4217
4218static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
4219{
4220 int retval = -1;
4221 struct cp_lkm_ep* ep;
4222 struct cp_lkm_usb_dev* cpdev;
4223 struct cp_lkm_usb_base_dev* cpbdev;
4224
4225 //grab lock to protect global device list before searching (don't want to search it if another thread is adding or removing a cpdev)
4226 spin_lock(&cp_lkm_usb_mgr.lock);
4227 cpdev = cp_lkm_usb_find_dev(hdr->instance_id);
4228
4229 //grab thread semaphore so disconnect can't run and delete the cpdev while we are running here
4230 if(!cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
4231 spin_unlock(&cp_lkm_usb_mgr.lock);
4232 dev_kfree_skb_any (skb);
4233 //printk("%s() no device or no probe yet\n", __FUNCTION__);
4234 return 0;
4235 }
4236 cpbdev = cpdev->cpbdev;
4237 switch(hdr->cmd) {
4238 case CP_LKM_USB_CMD_DATA_SEND:
4239 {
4240 ep = cp_lkm_usb_get_ep(cpdev, hdr->arg1);
4241 if(ep) {
4242 //printk("%s(), send other data cpbdev: %p, cpdev: %p, bep: %p, ep: %p, num: 0x%x\n",__FUNCTION__,cpdev->cpbdev,cpdev,ep->bep,ep,ep->ep_num);
4243 retval = cp_lkm_usb_start_xmit_common(cpdev, skb, CP_LKM_WRAPPER_SRC_CTRL, ep);
4244 skb = NULL;
4245 }
4246 else{
4247 DEBUG_TRACE("%s() Invalid EP number 0x%x", __FUNCTION__, hdr->arg1);
4248 retval = -1;
4249 }
4250 }
4251 break;
4252 case CP_LKM_USB_CMD_CTRL_SEND:
4253 {
4254 retval = cp_lkm_usb_start_ctrl_xmit(cpdev, skb);
4255 skb = NULL;
4256 }
4257 break;
4258 }
4259
4260 spin_unlock(&cp_lkm_usb_mgr.lock);
4261
4262 if(skb) {
4263 dev_kfree_skb_any (skb);
4264 }
4265 return retval;
4266}
4267
4268static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
4269{
4270 int retval = -1;
4271 //printk("%s(), cmd:0x%x\n", __FUNCTION__, _IOC_NR(cmd));
4272
4273 switch(cmd) {
4274 case CP_LKM_IOCTL_USB_PLUG_INTF:
4275 {
4276 struct cp_lkm_usb_plug_intf* pi = (struct cp_lkm_usb_plug_intf*)k_argp;
4277 retval = cp_lkm_usb_plug_intf(pi);
4278 }
4279 break;
4280 case CP_LKM_IOCTL_USB_SET_WRAPPER:
4281 {
4282 struct cp_lkm_usb_set_wrapper* sw = (struct cp_lkm_usb_set_wrapper*)k_argp;
4283 retval = cp_lkm_usb_set_wrapper(sw);
4284 }
4285 break;
4286 case CP_LKM_IOCTL_USB_SET_MUX_ID:
4287 {
4288 struct cp_lkm_usb_set_mux_id* smi = (struct cp_lkm_usb_set_mux_id*)k_argp;
4289 retval = cp_lkm_usb_set_mux_id(smi);
4290 }
4291 break;
4292 case CP_LKM_IOCTL_USB_OPEN_INTF:
4293 {
4294 struct cp_lkm_usb_open_intf* oi = (struct cp_lkm_usb_open_intf*)k_argp;
4295 retval = cp_lkm_usb_open_intf(oi);
4296 }
4297 break;
4298 case CP_LKM_IOCTL_USB_CLOSE_INTF:
4299 {
4300 struct cp_lkm_usb_close_intf* ci = (struct cp_lkm_usb_close_intf*)k_argp;
4301 retval = cp_lkm_usb_close_intf(ci);
4302 }
4303 break;
4304 case CP_LKM_IOCTL_USB_UNPLUG_INTF:
4305 {
4306 struct cp_lkm_usb_unplug_intf* ui = (struct cp_lkm_usb_unplug_intf*)k_argp;
4307 retval = cp_lkm_usb_unplug_intf(ui);
4308 }
4309 break;
4310 case CP_LKM_IOCTL_USB_EP_ACTION:
4311 {
4312 struct cp_lkm_usb_ep_action* ea = (struct cp_lkm_usb_ep_action*)k_argp;
4313 retval = cp_lkm_usb_ep_action(ea);
4314 }
4315 break;
4316 case CP_LKM_IOCTL_USB_PM_LINK:
4317 {
4318 struct cp_lkm_usb_pm_link *upl = (struct cp_lkm_usb_pm_link *)k_argp;
4319 retval = cp_lkm_usb_pm_link(upl);
4320 }
4321 break;
4322 case CP_LKM_IOCTL_USB_IS_ALIVE_INTF:
4323 {
4324 struct cp_lkm_usb_is_alive_intf* alivei = (struct cp_lkm_usb_is_alive_intf*)k_argp;
4325 retval = cp_lkm_usb_is_alive_intf(alivei);
4326 }
4327 }
4328
4329 return retval;
4330}
4331
4332
4333/******************************* kernel module PM instance functionality **********************************/
4334struct cp_lkm_pm_ctx {
4335 struct cp_lkm_common_ctx common;
4336 struct list_head pm_list;
4337 spinlock_t pm_list_lock;
4338};
4339
4340struct cp_lkm_pm_ctx cp_lkm_pm_mgr;
4341
4342
4343static void cp_lkm_pm_filter_empty_list(struct cp_lkm_pm_common *pm)
4344{
4345
4346 struct cp_lkm_pm_filter *filter;
4347 struct list_head *entry, *tmp;
4348
4349 list_for_each_safe(entry, tmp, &pm->filter_list) {
4350 filter = list_entry(entry, struct cp_lkm_pm_filter, list);
4351 list_del(&filter->list);
4352 kfree(filter);
4353 }
4354}
4355
4356static bool cp_lkm_pm_filter_ok(struct cp_lkm_pm_common *pm, unsigned char *buf, unsigned int buf_len)
4357{
4358 bool allow = true; // default allow the egress packet
4359
4360 struct list_head *pos;
4361
4362 struct in_device *in_dev;
4363 struct in_ifaddr *ifa;
4364 struct iphdr *ipv4_hdr;
4365 u32 ipv4_src_addr = 0;
4366 u32 ipv4_net_addr = 0;
4367 u32 ipv4_net_mask = 0;
4368
4369 ipv4_hdr = (struct iphdr *)buf;
4370
4371 // these are the include filters (white list) - exclude filters (black list) are not currently supported
4372 // exclude filters may need to be processed in another loop through the filters
4373 list_for_each(pos, &pm->filter_list) {
4374 struct cp_lkm_pm_filter *filter = list_entry(pos, struct cp_lkm_pm_filter, list);
4375 switch(filter->type) {
4376 case CP_LKM_PM_FILTER_TYPE_IP_SRC_WAN_SUBNET_INCLUDE:
4377 if (4 == ipv4_hdr->version) {
4378 // ipv4
4379 allow = false;
4380 ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
4381 if(ipv4_src_addr == 0){
4382 //DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
4383 allow = true;
4384 }
4385 else{
4386 // get network device IP address and check against src packet ip address
4387 rcu_read_lock();
4388 in_dev = rcu_dereference(pm->net_dev->ip_ptr);
4389 // in_dev has a list of IP addresses (because an interface can have multiple - check them all)
4390 for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) {
4391 ipv4_net_addr = __be32_to_cpu(ifa->ifa_local);
4392 ipv4_net_mask = __be32_to_cpu(ifa->ifa_mask);
4393 if ((ipv4_net_addr & ipv4_net_mask) == (ipv4_src_addr & ipv4_net_mask)) {
4394 // allow the packet
4395 allow = true;
4396 break;
4397 }
4398 }
4399 rcu_read_unlock();
4400 }
4401 }/* benk needs to be tested before ok to execute
4402 else if (6 == ipv4_hdr->version) {
4403 struct in6_addr *addr = (struct in6_addr *)&buf[2 * sizeof(u32)];
4404 if (ipv6_chk_prefix(addr, pm->net_dev)) {
4405 allow = true;
4406 }
4407 } */
4408 break;
4409 case CP_LKM_PM_FILTER_TYPE_IP_SRC_SUBNET_INCLUDE:
4410 if (4 == ipv4_hdr->version) {
4411 // ipv4
4412 allow = false;
4413 ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
4414 if(ipv4_src_addr == 0){
4415 //DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
4416 allow = true;
4417 }
4418 else if ((filter->subnet.ipv4_addr & filter->subnet.ipv4_mask) == (ipv4_src_addr & filter->subnet.ipv4_mask)) {
4419 allow = true;
4420 }
4421 }
4422
4423 default:
4424 break;
4425 }
4426
4427 if (allow) {
4428 break;
4429 }
4430 }
4431
4432 if (!allow) {
4433 DEBUG_WARN("%s() dropping packet - src:0x%x\n", __FUNCTION__, ipv4_src_addr);
4434 }
4435
4436 return allow;
4437}
4438/******************************* kernel module pm common functionality **********************************/
4439int cp_lkm_common_init(struct cp_lkm_pm_common *pmc)
4440{
4441 // allocate stats struct
4442 pmc->pcpu_stats64 = netdev_alloc_pcpu_stats(struct cp_lkm_pm_stats64);
4443 if (!pmc->pcpu_stats64) {
4444 return -ENOMEM;
4445 }
4446
4447
4448 pmc->pm_link_count = 0;
4449 spin_lock_init(&pmc->pm_link_lock);
4450 INIT_LIST_HEAD(&pmc->filter_list);
4451
4452 return 0;
4453}
4454
4455void cp_lkm_common_deinit(struct cp_lkm_pm_common *pmc)
4456{
4457 if (!pmc->pcpu_stats64) {
4458 return;
4459 }
4460 free_percpu(pmc->pcpu_stats64);
4461 pmc->pcpu_stats64 = NULL;
4462}
4463// The pm_link_lock is used to coordinate activity between xmit, poll, and link/unlink
4464// It is okay to poll and xmit at the same time, but we don't want to do either if we are linking or unlinking.
4465// link/unlink sets the pm_link_count negative to block both poll and xmit. If pm_link_count is not negative then
4466// both poll and xmit are free to grab the link at any time and at the same time.
4467//retval:
4468// 0 = you have the token, proceed
4469// -1 = you don't have the token, do not pass go
4470int cp_lkm_common_inc_link_lock(struct cp_lkm_pm_common* pmc)
4471{
4472 unsigned long flags;
4473 int retval = 0;
4474 spin_lock_irqsave(&pmc->pm_link_lock, flags);
4475 if(pmc->pm_link_count < 0) {
4476 retval = -1;
4477 }
4478 else{
4479 pmc->pm_link_count++;
4480 }
4481 spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
4482 return retval;
4483}
4484
4485int cp_lkm_common_dec_link_lock(struct cp_lkm_pm_common* pmc)
4486{
4487 unsigned long flags;
4488 int retval = 0;
4489 spin_lock_irqsave(&pmc->pm_link_lock, flags);
4490 if(pmc->pm_link_count > 0) {
4491 pmc->pm_link_count--;
4492 }
4493 else{
4494 //should never hit this
4495 retval = -1;
4496 }
4497 spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
4498 return retval;
4499}
4500
4501/******************************* kernel module net PM functionality **********************************/
4502
4503// common structure for ethernet and IP protocol managers
4504struct cp_lkm_pm_net {
4505 struct cp_lkm_pm_common common;
4506 struct ethhdr eth_hdr;
4507
4508};
4509
Harish Ambati2e2e7b32023-02-22 14:21:36 +00004510static void cp_lkm_pm_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Kyle Swenson74ad7532023-02-16 11:05:29 -07004511{
4512 struct cp_lkm_pm_net *pm_net;
4513 int i;
4514 struct cp_lkm_pm_stats64 *pstats;
4515
4516 pm_net = netdev_priv(netdev);
4517
4518 for_each_possible_cpu(i) {
4519 u64 rx_packets, rx_bytes, rx_errors, rx_dropped, rx_over_errors;
4520 u64 tx_packets, tx_bytes, tx_errors, tx_dropped;
4521 unsigned int start;
4522 pstats = per_cpu_ptr(pm_net->common.pcpu_stats64, i);
4523 do {
4524 start = u64_stats_fetch_begin_irq(&pstats->syncp);
4525 rx_packets = pstats->rx_packets;
4526 tx_packets = pstats->tx_packets;
4527 rx_bytes = pstats->rx_bytes;
4528 tx_bytes = pstats->tx_bytes;
4529 rx_errors = pstats->rx_errors;
4530 tx_errors = pstats->tx_errors;
4531 rx_dropped = pstats->rx_dropped;
4532 tx_dropped = pstats->tx_dropped;
4533 rx_over_errors = pstats->rx_over_errors;
4534 } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
4535
4536 stats->rx_packets += rx_packets;
4537 stats->tx_packets += tx_packets;
4538 stats->rx_bytes += rx_bytes;
4539 stats->tx_bytes += tx_bytes;
4540 stats->rx_errors += rx_errors;
4541 stats->tx_errors += tx_errors;
4542 stats->rx_dropped += rx_dropped;
4543 stats->tx_dropped += tx_dropped;
4544 stats->rx_over_errors += rx_over_errors;
4545 }
4546
Kyle Swenson74ad7532023-02-16 11:05:29 -07004547}
4548
4549static int cp_lkm_pm_net_open(struct net_device *dev)
4550{
4551 struct cp_lkm_pm_net *pm_net;
4552
4553 DEBUG_TRACE("%s()", __FUNCTION__);
4554
4555 pm_net = netdev_priv(dev);
4556 netif_start_queue(dev);
4557
4558 // is this link up?
4559 return 0;
4560}
4561
4562static int cp_lkm_pm_net_close(struct net_device *dev)
4563{
4564 struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
4565 struct cp_lkm_msg_hdr hdr;
4566
4567 DEBUG_TRACE("%s()", __FUNCTION__);
4568
4569 // link change
4570 netif_stop_queue(dev);
4571
4572 // post message to indicate link down
4573 memset(&hdr,0,sizeof(hdr));
4574 hdr.instance_id = pm_net->common.unique_id;
4575 hdr.cmd = CP_LKM_PM_LINK_DOWN;
4576 hdr.status = CP_LKM_STATUS_OK;
4577 cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
4578 LOG("Link Down indicated - id:%d\n", hdr.instance_id);
4579
4580
4581 return 0;
4582}
4583
4584static int cp_lkm_pm_net_xmit(struct sk_buff *skb, struct net_device *dev)
4585{
4586 struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
4587 bool filter_ok = true;
4588 int link_res;
4589
4590 //see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
4591 link_res = cp_lkm_common_inc_link_lock(&pm_net->common);
4592 if(link_res < 0) {
4593 dev_kfree_skb_any(skb);
4594 return NETDEV_TX_OK;
4595 }
4596
4597 if (!pm_net->common.edi) {
4598 // cannot do anything without edi
4599 dev_kfree_skb_any(skb);
4600 goto net_xmit_done;
4601 }
4602
4603 //DEBUG_INFO("%s() - %s len:%d", __FUNCTION__, pm_net->common.net_dev->name, skb->len);
4604 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_bytes, (skb->len - sizeof(struct ethhdr)));
4605 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_packets, 1);
4606 /* Drop packet if interface is not attached */
4607 if (0 == pm_net->common.attached)
4608 goto drop;
4609
4610 if (!pm_net->common.edi->usb_send) {
4611 goto drop;
4612 }
4613
4614 filter_ok = cp_lkm_pm_filter_ok(&pm_net->common, skb->data + sizeof(struct ethhdr), skb->len - sizeof(struct ethhdr));
4615 if (!filter_ok) {
4616 pm_net->common.filter_drop_cnt++;
4617 DEBUG_WARN("%s() filter dropped packet cnt:%u", __FUNCTION__, pm_net->common.filter_drop_cnt);
4618 goto drop;
4619 }
4620
4621 switch(pm_net->common.type) {
4622 case CP_LKM_PM_TYPE_IP_DHCP:
4623 case CP_LKM_PM_TYPE_IP_STATIC:
4624 skb_pull(skb, sizeof(struct ethhdr)); // strip off the ethernet header
4625 break;
4626 default:
4627 break;
4628 }
4629
4630 // send data to USB module
4631 pm_net->common.edi->usb_send(pm_net->common.edi->usb_send_ctx, skb);
4632 goto net_xmit_done;
4633
4634drop:
4635 DEBUG_INFO("%s() - dropped", __FUNCTION__);
4636 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_dropped, 1);
4637 dev_kfree_skb_any(skb);
4638
4639net_xmit_done:
4640 cp_lkm_common_dec_link_lock(&pm_net->common);
4641 return NETDEV_TX_OK;
4642}
4643
4644
4645#if 0
4646static u8 cp_lkm_pm_test_find(u8* pkt, u32 pkt_len, u8* pattern, u32 pattern_len)
4647{
4648 s32 i;
4649 for(i = 0; i < (pkt_len - pattern_len); i++) {
4650 if (memcmp(&pkt[i],pattern,pattern_len) == 0) {
4651 return 1;
4652 }
4653 }
4654 return 0;
4655}
4656
4657static int cp_lkm_pm_test(struct sk_buff *skb)
4658{
4659static u8 first_pkt = 1;
4660static u8 started = 0;
4661static unsigned long total_data = 0;
4662static unsigned long start_time = 0;
4663static unsigned long stop_time = 0;
4664
4665static unsigned long invalid_pkts = 0;
4666static unsigned long total_pkts = 0;
4667
4668 int drop = 0;
4669 unsigned char *ptr = skb->data;
4670 u32 pkt_len = skb->len;
4671 u8 prot;
4672 //u8 type;
4673 u16 udp_len;
4674 u16 dst_port;
4675
4676 if (pkt_len < 20) {
4677 return 0;
4678 }
4679 //function is set up to parse IP pkts, may be called with ether framed pkts as well.
4680 //auto detect ether hdr and remove it
4681 if (ptr[0] != 0x45) {
4682 //ether header
4683 if(ptr[14] == 0x45){
4684 ptr+=14;
4685 pkt_len -= 14;
4686 }
4687 //vlan hdr
4688 else if (ptr[12] == 0x81 && ptr[18] == 0x45) {
4689 ptr+=18;
4690 pkt_len -=18;
4691 }
4692 }
4693
4694 if (ptr[0] != 0x45) {
4695 invalid_pkts++;
4696 }
4697
4698 //printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
4699 //printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
4700 if (pkt_len >= 28) {
4701 prot = ptr[9];
4702 if (prot == 0x11) {
4703 ptr += 20; //skip ip header
4704 pkt_len -= 20;
4705 dst_port = ntohs(*((u16*)(&ptr[2])));
4706 udp_len = ntohs(*((u16*)(&ptr[4])));
4707 //printk("Got UDP pkt\n");
4708 if (started && dst_port == 5001) {
4709 drop = 1;
4710 if (first_pkt == 1) {
4711 first_pkt = 0;
4712 total_data = 0;
4713 start_time = jiffies;
4714 invalid_pkts = 0;
4715 total_pkts = 0;
4716 }
4717 total_data += (udp_len+34); //add ip and ether hdrs
4718 stop_time = jiffies;
4719 total_pkts++;
4720 }
4721 else if(dst_port == 5002) {
4722 drop = 1;
4723 ptr += 8; //skip udp header
4724 printk("SHIM START PORT len: %d data: 0x%x, start=%x, stop=%x\n",udp_len, ptr[0], start_time, stop_time);
4725 if(cp_lkm_pm_test_find(ptr, udp_len, "START", 5)){
4726 printk("Got IPERF START\n");
4727 first_pkt = 1;
4728 started = 1;
4729 cp_lkm_wrapper_start_debug();
4730 }
4731 else if (cp_lkm_pm_test_find(ptr, udp_len, "STOP", 4)) {
4732 u32 delta_time = (stop_time - start_time)*1000/HZ;
4733 u32 bits_per_sec = (total_data/delta_time)*8000; //in bytes per milisecond, need bits per second
4734 delta_time -= 2; //iperf has 2 second delay waiting for an ack we won't send
4735 started = 0;
4736 printk("Got IPERF STOP: Total data: %u, Total pkts: %u, Total invalid: %u, Total time: %u msec, BitsPerSec: %u\n",total_data, total_pkts, invalid_pkts, delta_time,bits_per_sec);
4737 cp_lkm_wrapper_stop_debug();
4738 }
4739 }
4740 }
4741 }
4742 return drop;
4743}
4744#endif
4745
4746// called in soft interrupt context - otherwise some protection around pm_net is required
4747//int num_ip_copies = 0;
4748//int num_eth_copies = 0;
4749//int num_pkts = 0;
4750//int num_iters = 0;
4751//int num_unaligned = 0;
4752static int cp_lkm_pm_net_recv(void *ctx, struct sk_buff *skb)
4753{
4754 struct cp_lkm_pm_net *pm_net;
4755 int err;
4756 int recv_bytes;
4757 struct sk_buff *skb_new;
4758 int align = 0; //set to 1 to always send 4 byte aligned IP pkts to network stack
4759 int pad = 20; //number of bytes to put on front of new skbs
4760
4761 //DEBUG_INFO("%s()", __FUNCTION__);
4762 if(NULL == ctx) {
4763 dev_kfree_skb_any(skb);
4764 return 0;
4765 }
4766
4767 //num_pkts++;
4768 //num_iters++;
4769 pm_net = (struct cp_lkm_pm_net *)ctx;
4770
4771 //printk("%s() pm_net: %p\n", __FUNCTION__, pm_net);
4772
4773
4774 skb->dev = pm_net->common.net_dev;
4775
4776 switch(pm_net->common.type) {
4777 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
4778 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
4779 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
4780 //this strips the ether header off the packet
4781 skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
4782 //Need IP hdr aligned for IP stack to avoid unaligned access interrupts
4783 if(align && ((uintptr_t)(skb->data) & 0x3)) {
4784 //num_eth_copies++;
4785 skb_new = skb_copy_expand(skb, pad, 0, GFP_ATOMIC);
4786 dev_kfree_skb_any(skb);
4787 skb=skb_new;
4788 }
4789 if (!skb) {
4790 // packet dropped
4791 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
4792 return -ENOMEM;
4793 }
4794 break;
4795
4796 case CP_LKM_PM_TYPE_IP_DHCP:
4797 case CP_LKM_PM_TYPE_IP_STATIC:
4798 // Need to add ether header first for processing, then remove it. Need IP hdr aligned when done.
4799 //
4800 // Note: avoid the temptation to skip adding the ether header and doing manually what the call
4801 // to eth_type_trans() does. We did that and it bit us (see Jira issue FW-16149)
4802 // The kernel expects the ether header to be present in the skb buff even though the data ptr
4803 // has been moved past it. Also, if the skb has been cloned, then we are dealing with an
4804 // aggregated modem protocol (multiple pkts per skb), so we have to make a copy to guarantee
4805 // our tmp ether header isn't written into the data space of the previous pkt from the set.
4806 //
4807 if((align && ((uintptr_t)(skb->data) & 0x3)) || (skb_headroom(skb) < ETH_HLEN) || skb_cloned(skb)){
4808 //printk("copy: align: %d, head: %d, cloned: %d, len: %d\n", ((uintptr_t)(skb->data) & 0x3), skb_headroom(skb), skb_cloned(skb), skb->len);
4809 //num_ip_copies++;
4810 skb_new = skb_copy_expand(skb, 16+pad, 0, GFP_ATOMIC);
4811 dev_kfree_skb_any(skb);
4812 skb=skb_new;
4813 }
4814
4815 if (!skb) {
4816 // packet dropped
4817 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
4818 return -ENOMEM;
4819 }
4820
4821 if (0x60 == (skb->data[0] & 0xF0)) { //mask off version bits of first byte of IP packet to check for ip version
4822 // set the hdr protocol type to IPV6
4823 pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IPV6);
4824 } else {
4825 // probably ipv4, but not explicitly checking
4826 // set the hdr protocol type to IPV4
4827 pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IP);
4828 }
4829 memcpy(skb_push(skb, sizeof(struct ethhdr)), (unsigned char *)&pm_net->eth_hdr, sizeof(struct ethhdr));
4830 //this strips the ether hdr off the packet
4831 skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
4832 break;
4833
4834 default:
4835 DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, pm_net->common.type);
4836 // packet dropped
4837 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_errors, 1);
4838 kfree(skb);
4839 return NET_RX_DROP;
4840 }
4841
4842 recv_bytes = skb->len;
4843
4844 //if (cp_lkm_pm_test(skb) == 1) {
4845 // dev_kfree_skb_any(skb);
4846 // return NET_RX_SUCCESS;
4847 //}
4848
4849 //if((int)(skb->data) & 0x3){
4850 //printk("Unaligned IP pkt!!!!!!!!!!!!\n");
4851 //num_unaligned++;
4852 //}
4853
4854
4855 //if(num_iters >= 10000) {
4856 // num_iters = 0;
4857 // printk("num_ip_copies: %d, num_eth_copies: %d, num_unaligned: %d, num_pkts: %d\n",num_ip_copies,num_eth_copies,num_unaligned,num_pkts);
4858 //}
4859
4860 netif_rx(skb);
4861 err = NET_RX_SUCCESS;
4862
4863 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_packets, 1);
4864 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_bytes, recv_bytes);
4865
4866 return 0;
4867}
4868
4869
4870static void cp_lkm_pm_net_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
4871{
4872 struct cp_lkm_pm_net *pm_net;
4873 int pad;
4874 int tmp_size;
4875 int pm_hdr = ETH_HLEN;
4876 int pm_extra = 6;
4877
4878 *hdr_size = 0;
4879 *hdr_offset = 0;
4880
4881 pm_net = (struct cp_lkm_pm_net *)ctx;
4882 if(!pm_net) {
4883 return;
4884 }
4885 //temp return here
4886 //return;
4887
4888 //calculate how much header space there is before the IP hdr.
4889 //this is needed to align the IP hdr properly for optimal performance
4890 switch(pm_net->common.type) {
4891 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
4892 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
4893 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
4894 //pkts will need room for the wrapper header and the ether hdr.
4895 //both headers will be present at the same time.
4896 tmp_size = wrapper_hdr_size + pm_hdr + pm_extra;
4897 pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
4898 *hdr_size = tmp_size + pad;
4899 *hdr_offset = pad+pm_extra;
4900 break;
4901
4902 case CP_LKM_PM_TYPE_IP_DHCP:
4903 case CP_LKM_PM_TYPE_IP_STATIC:
4904 //pkts will need room for the wrapper header or the ether hdr
4905 //both headers won't be present at the same time. The wrapper is present
4906 //up through the USB side of the shim. We (the pm) add a temp ether header
4907 //for processing after the wrapper header is removed
4908 tmp_size = max(wrapper_hdr_size, pm_hdr+pm_extra);
4909 pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
4910 *hdr_size = tmp_size + pad;
4911 *hdr_offset = *hdr_size - wrapper_hdr_size;
4912 break;
4913 default:
4914 break;
4915 }
4916}
4917
4918
4919static u32 cp_lkm_pm_net_get_link(struct net_device *dev)
4920{
4921 struct cp_lkm_pm_net *pm_net;
4922
4923 DEBUG_TRACE("%s()", __FUNCTION__);
4924 pm_net = netdev_priv(dev);
4925 if(!pm_net) {
4926 return 0;
4927 }
4928 return pm_net->common.attached;
4929}
4930
4931
4932#ifndef KERNEL_2_6_21
4933static const struct net_device_ops cp_lkm_pm_net_device_ops = {
4934 .ndo_open = cp_lkm_pm_net_open,
4935 .ndo_start_xmit = cp_lkm_pm_net_xmit,
4936 .ndo_stop = cp_lkm_pm_net_close,
4937 .ndo_get_stats64 = cp_lkm_pm_get_stats64
4938};
4939#endif
4940
4941static const struct ethtool_ops cp_lkm_pm_net_ethtool_ops = {
4942 .get_link = cp_lkm_pm_net_get_link,
4943};
4944
4945static void cp_lkm_pm_net_setup(struct net_device *net_dev)
4946{
4947 struct cp_lkm_pm_net *pm_net;
4948
4949 DEBUG_INFO("%s()", __FUNCTION__);
4950 pm_net = netdev_priv(net_dev);
4951 ether_setup(net_dev);
4952
4953#ifdef KERNEL_2_6_21
4954 net_dev->open = cp_lkm_pm_net_open;
4955 net_dev->hard_start_xmit = cp_lkm_pm_net_xmit;
4956 net_dev->stop = cp_lkm_pm_net_close;
4957#else
4958 net_dev->netdev_ops = &cp_lkm_pm_net_device_ops;
4959 net_dev->needed_headroom = 48;
4960 net_dev->needed_tailroom = 8;
4961#endif
4962
4963 net_dev->ethtool_ops = &cp_lkm_pm_net_ethtool_ops;
4964
4965}
4966
4967static int cp_lkm_pm_net_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name, unsigned char *mac)
4968{
4969 int err;
4970 struct cp_lkm_pm_net *pm_net;
4971 struct net_device *net_dev;
4972 unsigned long flags;
4973#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17,0)
4974 net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, NET_NAME_UNKNOWN, cp_lkm_pm_net_setup);
4975#else
4976 net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, cp_lkm_pm_net_setup);
4977#endif
4978 if (!net_dev) {
4979 DEBUG_INFO("%s() alloc failed: %s", __FUNCTION__, name);
4980 return -ENOMEM;
4981 }
4982
4983 pm_net= netdev_priv(net_dev);
4984
4985 err = cp_lkm_common_init(&pm_net->common);
4986 if (err) {
4987 free_netdev(net_dev);
4988 return err;
4989 }
4990
4991 pm_net->common.net_dev = net_dev;
4992 pm_net->common.unique_id = uid;
4993 pm_net->common.type = type;
4994 pm_net->common.edi = NULL;
4995
4996 //printk("%s(%p) pm-uid: %d, pm_net: %p\n", __FUNCTION__, mgr, uid, pm_net);
4997
4998 switch (type) {
4999 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5000 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5001 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5002 if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN)) {
5003 random_ether_addr(net_dev->dev_addr);
5004 } else {
5005 memcpy (net_dev->dev_addr, mac, ETH_ALEN);
5006 }
5007
5008 /////////////////////////Need to only do if driver says so.
5009 if (type == CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP) {
5010 net_dev->flags |= IFF_NOARP;
5011 }
5012 break;
5013 case CP_LKM_PM_TYPE_IP_DHCP:
5014 case CP_LKM_PM_TYPE_IP_STATIC:
5015 // random addr for DHCP functionality
5016 if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) || !memcmp(mac, "\x00\x30\x44\x00\x00\x00", ETH_ALEN)) {
5017 random_ether_addr(net_dev->dev_addr);
5018 } else {
5019 memcpy (net_dev->dev_addr, mac, ETH_ALEN);
5020 }
5021
5022 net_dev->flags |= IFF_NOARP;
5023 memcpy(pm_net->eth_hdr.h_dest, net_dev->dev_addr, ETH_ALEN);
5024 random_ether_addr(pm_net->eth_hdr.h_source);
5025 break;
5026 default:
5027 DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, type);
5028 cp_lkm_common_deinit(&pm_net->common);
5029 free_netdev(net_dev);
5030 return -EINVAL;
5031 }
5032
5033 DEBUG_INFO("%s register netdev", __FUNCTION__);
5034 err = register_netdev(net_dev);
5035 if (err < 0) {
5036 DEBUG_INFO("%s netdev registration error", __FUNCTION__);
5037 cp_lkm_common_deinit(&pm_net->common);
5038 free_netdev(net_dev);
5039 return err;
5040 }
5041
5042 netif_device_attach(pm_net->common.net_dev);
5043
5044 netif_stop_queue(pm_net->common.net_dev);
5045
5046 pm_net->common.attached = 1;
5047
5048 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5049 list_add(&pm_net->common.list, &mgr->pm_list);
5050 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5051
5052 return 0;
5053}
5054
5055static int cp_lkm_pm_net_detach(struct cp_lkm_pm_ctx *mgr, int uid)
5056{
5057
5058 // find the object in the list
5059 struct list_head *pos;
5060 struct cp_lkm_pm_common *pm = NULL;
5061 unsigned long flags;
5062
5063 DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
5064
5065 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5066 list_for_each(pos, &mgr->pm_list){
5067 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5068 if(pm_tmp->unique_id == uid) {
5069 pm = pm_tmp;
5070 break;
5071 }
5072 }
5073
5074 if (!pm) {
5075 // already detached
5076 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5077 DEBUG_INFO("%s() already detached", __FUNCTION__);
5078 return 0;
5079 }
5080
5081 // remove the object
5082 list_del(&pm->list);
5083 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5084
5085 if (pm->attached) {
5086 DEBUG_INFO("%s() detaching", __FUNCTION__);
5087 netif_device_detach(pm->net_dev);
5088 pm->attached = 0;
5089 }
5090
5091 unregister_netdev(pm->net_dev);
5092
5093 // clean the filter list
5094 cp_lkm_pm_filter_empty_list(pm);
5095
5096 cp_lkm_common_deinit(pm);
5097 free_netdev(pm->net_dev); // this also frees the pm since it was allocated as part of the net_dev
5098
5099 return 0;
5100}
5101
5102static int cp_lkm_pm_net_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
5103{
5104 // find the object in the list
5105 struct list_head *pos;
5106 struct cp_lkm_pm_common *pm = NULL;
5107 unsigned long flags;
5108 //printk("%s(%p) activate: %d\n", __FUNCTION__, mgr, activate);
5109
5110 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5111 list_for_each(pos, &mgr->pm_list){
5112 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5113 if(pm_tmp->unique_id == uid) {
5114 pm = pm_tmp;
5115 break;
5116 }
5117 }
5118
5119 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5120
5121 if (!pm) {
5122 // couldn't find object - already unplugged
5123 DEBUG_INFO("%s() already unplugged", __FUNCTION__);
5124 return 0;
5125 }
5126
5127 if (activate) {
5128 //netif_start_queue(pm->net_dev);
5129 if (pm->edi) {
5130 pm->edi->pm_recv_ctx = pm;
5131 }
5132 netif_wake_queue(pm->net_dev);
5133 } else {
5134 netif_stop_queue(pm->net_dev);
5135 if (pm->edi) {
5136 pm->edi->pm_recv_ctx = NULL;
5137 //printk("pm_recv_ctx null\n");
5138 }
5139
5140 // remove the filters - will be added back in before activate
5141 cp_lkm_pm_filter_empty_list(pm);
5142 }
5143
5144 return 0;
5145}
5146
5147int cp_lkm_pm_net_pause(void *ctx)
5148{
5149 struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
5150 if(!ctx) {
5151 return 0;
5152 }
5153 netif_stop_queue(pm->net_dev);
5154 return 0;
5155
5156}
5157int cp_lkm_pm_net_resume(void *ctx)
5158{
5159 struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
5160 if(!ctx) {
5161 return 0;
5162 }
5163 //netif_start_queue(pm->net_dev);
5164 netif_wake_queue(pm->net_dev);
5165 return 0;
5166}
5167
5168
5169/******************************* kernel module PPP/tty PM functionality **********************************/
5170struct cp_lkm_pm_ppp {
5171 struct cp_lkm_pm_common common;
5172 u8 *no_carrier_ptr;
5173 bool in_frame;
5174
5175 struct tty_struct *tty; // pointer to the tty for this device
5176 int minor;
5177 int open_count;
5178};
5179
5180#define CP_TTY_MINORS 10
5181#define CP_TTY_DEVICE_NAME "ttyCP"
5182
5183#define PPP_MGR_NO_CARRIER "NO CARRIER"
5184#define PPP_FLAG 0x7E
5185
5186static struct cp_lkm_pm_ppp *cp_lkm_pm_ppp_table[CP_TTY_MINORS];
5187static struct tty_driver *cp_lkm_pm_tty_driver = NULL;
5188static struct tty_port cp_lkm_pm_tty_port[CP_TTY_MINORS];
5189
5190static void cp_lkm_pm_ppp_finalize(void *arg)
5191{
5192 struct cp_lkm_pm_ppp *pm_ppp = (struct cp_lkm_pm_ppp *)arg;
5193 tty_unregister_device(cp_lkm_pm_tty_driver, pm_ppp->minor);
5194 cp_lkm_pm_ppp_table[pm_ppp->minor] = NULL;
5195 if (pm_ppp->common.edi) {
5196 pm_ppp->common.edi = NULL;
5197 }
5198 // clean the filter list
5199 cp_lkm_pm_filter_empty_list(&pm_ppp->common);
5200}
5201
5202static int cp_lkm_pm_ppp_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name)
5203{
5204 int minor;
5205 int err;
5206 unsigned long flags;
5207 struct cp_lkm_pm_ppp *pm_ppp;
5208
5209 DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
5210
5211 //printk("%s() uid: %d, type: %d\n", __FUNCTION__, uid, type);
5212
5213 // find an empty minor device slot and register
5214 for (minor = 0; minor < CP_TTY_MINORS && cp_lkm_pm_ppp_table[minor]; minor++);
5215
5216 if (minor == CP_TTY_MINORS) {
5217 DEBUG_WARN("%s(%p) - out of devices", __FUNCTION__, mgr);
5218 return -ENODEV;
5219 }
5220
5221 if (!(pm_ppp = memref_alloc_and_zero(sizeof(struct cp_lkm_pm_ppp), cp_lkm_pm_ppp_finalize))) {
5222 DEBUG_WARN("%s(%p) - no memory", __FUNCTION__, mgr);
5223 return -ENOMEM;
5224 }
5225
5226 err = cp_lkm_common_init(&pm_ppp->common);
5227 if (err) {
5228 return -ENOMEM;
5229 }
5230 pm_ppp->common.type = type;
5231 pm_ppp->common.unique_id = uid;
5232
5233 pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
5234
5235 pm_ppp->minor = minor;
5236
5237 cp_lkm_pm_ppp_table[minor] = pm_ppp;
5238 sprintf(name, "%s%d", CP_TTY_DEVICE_NAME, minor);
5239
5240 //printk("%s(%p) attached\n", __FUNCTION__, &pm_ppp->common);
5241 pm_ppp->common.attached = 1;
5242 pm_ppp->open_count = 0;
5243
5244 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5245 list_add(&pm_ppp->common.list, &mgr->pm_list);
5246 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5247
5248 tty_port_register_device(&cp_lkm_pm_tty_port[minor], cp_lkm_pm_tty_driver, minor, NULL);
5249
5250 return 0;
5251}
5252
5253static int cp_lkm_pm_ppp_detach(struct cp_lkm_pm_ctx *mgr, int uid)
5254{
5255
5256 // find the object in the list
5257 struct list_head *pos;
5258 struct cp_lkm_pm_common *pm = NULL;
5259 struct cp_lkm_pm_ppp *pm_ppp;
5260 unsigned long flags;
5261
5262 DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
5263 //printk("%s() uid: %d\n", __FUNCTION__, uid);
5264
5265 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5266 list_for_each(pos, &mgr->pm_list){
5267 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5268 if(pm_tmp->unique_id == uid) {
5269 pm = pm_tmp;
5270 break;
5271 }
5272 }
5273
5274 if (!pm) {
5275 // already detached
5276 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5277 DEBUG_INFO("%s() already detached", __FUNCTION__);
5278 return 0;
5279 }
5280
5281 // remove the object
5282 list_del(&pm->list);
5283
5284 pm_ppp = (struct cp_lkm_pm_ppp *)pm;
5285
5286 //printk("%s() !attached\n", __FUNCTION__);
5287 pm->attached = 0;
5288
5289 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5290
5291 // clean the filter list
5292 cp_lkm_pm_filter_empty_list(pm);
5293
5294 cp_lkm_common_deinit(pm);
5295
5296 memref_deref(pm_ppp);
5297
5298 return 0;
5299}
5300
5301static int cp_lkm_pm_ppp_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
5302{
5303 // find the object in the list
5304 struct list_head *pos;
5305 struct cp_lkm_pm_common *pm = NULL;
5306 unsigned long flags;
5307
5308 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5309 list_for_each(pos, &mgr->pm_list){
5310 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5311 if(pm_tmp->unique_id == uid) {
5312 pm = pm_tmp;
5313 break;
5314 }
5315 }
5316
5317 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5318
5319 if (!pm) {
5320 // already detached
5321 DEBUG_INFO("%s() already detached", __FUNCTION__);
5322 return 0;
5323 }
5324 //printk("%s(%p) activate: %d, attached: %d\n", __FUNCTION__, pm, activate, pm->attached);
5325
5326 if (activate) {
5327 if (pm->edi) {
5328 pm->edi->pm_recv_ctx = pm;
5329 }
5330 } else {
5331 if (pm->edi) {
5332 pm->edi->pm_recv_ctx = NULL;
5333 //printk("pm_recv_ctx null\n");
5334 }
5335 // clean the filter list
5336 cp_lkm_pm_filter_empty_list(pm);
5337 }
5338
5339 return 0;
5340}
5341
5342
5343static int cp_lkm_pm_tty_open(struct tty_struct * tty, struct file * filp)
5344{
5345 struct cp_lkm_pm_ppp *pm_ppp;
5346 int index;
5347 unsigned long flags;
5348
5349 DEBUG_INFO("%s()", __FUNCTION__);
5350
5351 index = tty->index;
5352
5353 // get the pm_ppp associated with this tty pointer
5354 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5355 pm_ppp = cp_lkm_pm_ppp_table[index];
5356 if (!pm_ppp /*|| tty->driver_data */|| !pm_ppp->common.attached) {
5357 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5358 return -EINVAL;
5359 }
5360
5361 if (pm_ppp->open_count++) {
5362 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5363 return 0;
5364 }
5365
5366 memref_ref(pm_ppp);
5367 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5368
5369 // save our structure within the tty structure
5370 tty->driver_data = pm_ppp;
5371 pm_ppp->tty = tty;
5372
5373 // XXX 3.10 hack
5374 //tty->low_latency = 0;
5375
5376 return 0;
5377}
5378
5379static void cp_lkm_pm_tty_close(struct tty_struct * tty, struct file * filp)
5380{
5381 struct cp_lkm_pm_ppp *pm_ppp;
5382 unsigned long flags;
5383
5384 DEBUG_INFO("%s()", __FUNCTION__);
5385
5386 pm_ppp = tty->driver_data;
5387 if(!pm_ppp) {
5388 return;
5389 }
5390
5391 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5392 if (--pm_ppp->open_count) {
5393 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5394 return;
5395 }
5396 tty->driver_data = NULL;
5397 pm_ppp->tty = NULL;
5398 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5399 memref_deref(pm_ppp);
5400}
5401static bool cp_lkm_pm_ppp_check_match(struct cp_lkm_pm_ppp *pm_ppp, u8 ch)
5402{
5403 if (*(pm_ppp->no_carrier_ptr) == ch) {
5404 // character match - advance to next character
5405 pm_ppp->no_carrier_ptr++;
5406 if (! *(pm_ppp->no_carrier_ptr)) {
5407 // end of no carrier string - found oob no carrier
5408 return true;
5409 }
5410 return false;
5411 }
5412 // characters don't match
5413 if (pm_ppp->no_carrier_ptr != (u8 *)PPP_MGR_NO_CARRIER) {
5414 // characters don't match - start over
5415 pm_ppp->no_carrier_ptr = (u8 *)PPP_MGR_NO_CARRIER;
5416 // check not matching character against first character of no carrier - 1 level of recursion
5417 return cp_lkm_pm_ppp_check_match(pm_ppp, ch);
5418 }
5419
5420 return false;
5421}
5422
5423static bool cp_lkm_pm_ppp_is_no_carrier(struct cp_lkm_pm_ppp *pm_ppp, struct sk_buff *skb)
5424{
5425 // search thru skb for data between frame markers for NO CARRIER
5426 bool no_carrier = false;
5427 unsigned int len = skb->len;
5428 u8 *pos = skb->data;
5429
5430 DEBUG_TRACE("%s()", __FUNCTION__);
5431
5432 while (len--) {
5433 if (PPP_FLAG == (*pos)) {
5434 pm_ppp->in_frame = !pm_ppp->in_frame;
5435 } else if (!pm_ppp->in_frame) {
5436 // look for match
5437 no_carrier = cp_lkm_pm_ppp_check_match(pm_ppp, *pos);
5438 if (no_carrier) {
5439 DEBUG_INFO("%s() found no carrier", __FUNCTION__);
5440 return true;
5441 }
5442 } else {
5443 pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
5444 }
5445
5446 pos++;
5447 }
5448
5449 return false;
5450}
5451
5452static void cp_lkm_pm_ppp_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
5453{
5454 *hdr_size = 0;
5455 *hdr_offset = 0;
5456}
5457
5458// called in soft interrupt context
5459static int cp_lkm_pm_ppp_recv(void *ctx, struct sk_buff *skb)
5460{
5461#ifdef KERNEL_2_6_21
5462 int size;
5463#endif
5464 struct cp_lkm_pm_ppp *pm_ppp;
5465 bool oob_no_carrier;
5466
5467 if(NULL == ctx || !skb->len) {
5468 DEBUG_INFO("%s() - null ctx - dropped", __FUNCTION__);
5469 goto done;
5470 }
5471
5472 pm_ppp = (struct cp_lkm_pm_ppp *)ctx;
5473
5474 if (!pm_ppp) {
5475 DEBUG_INFO("%s() - NULL pm_ppp - dropped", __FUNCTION__);
5476 goto done;
5477 }
5478
5479 // check for OOB NO CARRIER - signal up through file descriptor
5480 oob_no_carrier = cp_lkm_pm_ppp_is_no_carrier(pm_ppp, skb);
5481 if (oob_no_carrier) {
5482 struct cp_lkm_msg_hdr hdr;
5483
5484 DEBUG_INFO("%s() - posting no carrier", __FUNCTION__);
5485 memset(&hdr,0,sizeof(hdr));
5486 hdr.instance_id = pm_ppp->common.unique_id;
5487 hdr.cmd = CP_LKM_PM_LINK_DOWN;
5488 hdr.status = CP_LKM_STATUS_OK;
5489 hdr.len = 0;
5490
5491 LOG("Received NO CARRIER\n");
5492 DEBUG_INFO("%s() - posting link down", __FUNCTION__);
5493 cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
5494
5495 goto done;
5496 }
5497
5498 if (!pm_ppp->tty || !pm_ppp->tty->driver_data) {
5499 DEBUG_INFO("%s() - not setup - dropped", __FUNCTION__);
5500 goto done;
5501 }
5502
5503#ifdef KERNEL_2_6_21
5504 size = tty_buffer_request_room(pm_ppp->tty, skb->len);
5505 if(size < skb->len) {
5506 // dropped data - or we need to queue for later
5507 DEBUG_WARN("%s() - dropping network data", __FUNCTION__);
5508 goto done;
5509 }
5510#endif
5511
5512 tty_insert_flip_string(pm_ppp->tty->port, skb->data, skb->len);
5513 tty_flip_buffer_push(pm_ppp->tty->port);
5514
5515done:
5516 dev_kfree_skb_any(skb);
5517 return 0;
5518}
5519
5520// this can be called from interrupt thread or normal kernel thread
5521static int cp_lkm_pm_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
5522{
5523 struct cp_lkm_pm_ppp *pm_ppp;
5524 struct sk_buff *skb;
5525 int link_res;
5526 int retval = count;
5527
5528 if (!count) {
5529 //printk("%s() !count \n", __FUNCTION__);
5530 return 0;
5531 }
5532
5533 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5534
5535 if (!pm_ppp) {
5536 //printk("%s() !pm_ppp \n", __FUNCTION__);
5537 return -EINVAL;
5538 }
5539
5540 //printk("%s(%p) id:%d, attached: %d\n", __FUNCTION__, &pm_ppp->common, pm_ppp->common.unique_id, pm_ppp->common.attached);
5541
5542 //see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
5543 link_res = cp_lkm_common_inc_link_lock(&pm_ppp->common);
5544 if(link_res < 0) {
5545 //printk("%s() !link \n", __FUNCTION__);
5546 return 0;
5547 }
5548
5549 /* Drop packet if interface is not attached */
5550 if (!pm_ppp->common.attached){
5551 retval = 0;
5552 //printk("%s() !attached: %d \n", __FUNCTION__, pm_ppp->common.attached);
5553 goto drop;
5554 }
5555
5556 if (!(pm_ppp->common.edi) || !(pm_ppp->common.edi->usb_send) || !(pm_ppp->common.edi->usb_send_ctx)) {
5557 retval = 0;
5558 //printk("%s() !edi \n", __FUNCTION__);
5559 goto drop;
5560 }
5561
5562 //benk check for enabled filter - send in buffer pointer to ip header
5563
5564 // alloc skb to send
5565 if ((skb = alloc_skb (count, GFP_ATOMIC)) == NULL) {
5566 retval = -ENOMEM;
5567 goto pm_tty_write_done;
5568 }
5569
5570 memcpy(skb->data, buf, count);
5571 skb->len = count;
5572 skb_set_tail_pointer(skb, skb->len);
5573
5574 // send data to USB module
5575 pm_ppp->common.edi->usb_send(pm_ppp->common.edi->usb_send_ctx, skb);
5576 retval = count;
5577 goto pm_tty_write_done;
5578
5579drop:
5580pm_tty_write_done:
5581 cp_lkm_common_dec_link_lock(&pm_ppp->common);
5582 //printk("%s() done\n", __FUNCTION__);
5583
5584 return retval;
5585}
5586
5587static int cp_lkm_pm_tty_write_room(struct tty_struct *tty)
5588{
5589 struct cp_lkm_pm_ppp *pm_ppp;
5590
5591 DEBUG_INFO("%s()", __FUNCTION__);
5592
5593 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5594
5595 if (!pm_ppp) {
5596 return -EINVAL;
5597 }
5598
5599 return 2048;
5600}
5601
5602static int cp_lkm_pm_tty_chars_in_buffer(struct tty_struct *tty)
5603{
5604 struct cp_lkm_pm_ppp *pm_ppp;
5605
5606 DEBUG_INFO("%s()", __FUNCTION__);
5607
5608 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5609
5610 if (!pm_ppp) {
5611 return -EINVAL;
5612 }
5613
5614 return 0;
5615}
5616
5617static void cp_lkm_pm_tty_set_termios(struct tty_struct *tty, struct ktermios * old)
5618{
5619 DEBUG_INFO("%s()", __FUNCTION__);
5620
5621}
5622
5623#ifdef KERNEL_2_6_21
5624static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg)
5625#else
5626static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
5627#endif
5628{
5629 struct cp_lkm_pm_ppp *pm_ppp;
5630
5631 DEBUG_TRACE("%s(%x)", __FUNCTION__, cmd);
5632
5633 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5634
5635 if (!pm_ppp) {
5636 return -EINVAL;
5637 }
5638
5639 return -ENOIOCTLCMD;
5640}
5641
5642static struct tty_operations cp_lkm_pm_tty_ops = {
5643.open = cp_lkm_pm_tty_open,
5644.close = cp_lkm_pm_tty_close,
5645.write = cp_lkm_pm_tty_write,
5646.write_room = cp_lkm_pm_tty_write_room,
5647.chars_in_buffer = cp_lkm_pm_tty_chars_in_buffer,
5648.set_termios = cp_lkm_pm_tty_set_termios,
5649.ioctl = cp_lkm_pm_tty_ioctl
5650
5651/*
5652.throttle = acm_tty_throttle,
5653.unthrottle = acm_tty_unthrottle,
5654*/
5655};
5656
5657static int cp_lkm_pm_tty_init(void)
5658{
5659 int retval;
5660 int i;
5661
5662 for(i = 0; i < CP_TTY_MINORS; i++) {
5663 tty_port_init(&cp_lkm_pm_tty_port[i]);
5664 }
5665
5666 cp_lkm_pm_tty_driver = alloc_tty_driver(CP_TTY_MINORS);
5667 if (!cp_lkm_pm_tty_driver) {
5668 return -ENOMEM;
5669 }
5670
5671 // initialize the tty driver
5672 cp_lkm_pm_tty_driver->owner = THIS_MODULE;
5673 cp_lkm_pm_tty_driver->driver_name = "cptty";
5674 cp_lkm_pm_tty_driver->name = CP_TTY_DEVICE_NAME;
5675 cp_lkm_pm_tty_driver->major = 0; // dynamically assign major number
5676 cp_lkm_pm_tty_driver->minor_start = 0,
5677 cp_lkm_pm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
5678 cp_lkm_pm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
5679 cp_lkm_pm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
5680 cp_lkm_pm_tty_driver->init_termios = tty_std_termios;
5681 tty_set_operations(cp_lkm_pm_tty_driver, &cp_lkm_pm_tty_ops);
5682
5683 retval = tty_register_driver(cp_lkm_pm_tty_driver);
5684 if (retval) {
5685 DEBUG_ERROR("%s() failed to register cp tty driver", __FUNCTION__);
5686 put_tty_driver(cp_lkm_pm_tty_driver);
5687 for(i = 0; i < CP_TTY_MINORS; i++) {
5688 tty_port_destroy(&cp_lkm_pm_tty_port[i]);
5689 }
5690 }
5691 return retval;
5692
5693}
5694
5695static void cp_lkm_pm_tty_cleanup(void)
5696{
5697 int i;
5698 if (cp_lkm_pm_tty_driver) {
5699 tty_unregister_driver(cp_lkm_pm_tty_driver);
5700 put_tty_driver(cp_lkm_pm_tty_driver);
5701 for(i = 0; i < CP_TTY_MINORS; i++) {
5702 tty_port_destroy(&cp_lkm_pm_tty_port[i]);
5703 }
5704 cp_lkm_pm_tty_driver = NULL;
5705 }
5706}
5707
5708/******************************* kernel module PM mgr functionality **********************************/
5709
5710
5711static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx);
5712static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx);
5713static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
5714static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
5715
5716
5717static int cp_lkm_pm_init(void)
5718{
5719 DEBUG_INFO("%s()", __FUNCTION__);
5720
5721 memset(&cp_lkm_pm_mgr, 0x00, sizeof(struct cp_lkm_pm_ctx));
5722 cp_lkm_pm_mgr.common.open = cp_lkm_pm_open;
5723 cp_lkm_pm_mgr.common.close = cp_lkm_pm_close;
5724 cp_lkm_pm_mgr.common.handle_msg = cp_lkm_pm_handle_msg;
5725 cp_lkm_pm_mgr.common.handle_ioctl = cp_lkm_pm_handle_ioctl;
5726 INIT_LIST_HEAD(&cp_lkm_pm_mgr.pm_list);
5727 spin_lock_init(&cp_lkm_pm_mgr.pm_list_lock);
5728
5729 cp_lkm_common_ctx_init(&cp_lkm_pm_mgr.common);
5730
5731 return 0;
5732}
5733
5734static int cp_lkm_pm_cleanup(void)
5735{
5736 struct cp_lkm_pm_common *pmi;
5737 struct list_head *entry, *tmp;
5738 unsigned long flags;
5739
5740 DEBUG_INFO("%s()", __FUNCTION__);
5741
5742 // clean up msg list
5743 cp_lkm_cleanup_msg_list(&cp_lkm_pm_mgr.common);
5744
5745 // cleanup any PM in list
5746 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5747
5748 list_for_each_safe(entry, tmp, &cp_lkm_pm_mgr.pm_list) {
5749 pmi = list_entry(entry, struct cp_lkm_pm_common, list);
5750 if (pmi->edi) {
5751 pmi->edi->pm_recv_ctx = NULL;
5752 //printk("pm_recv_ctx null\n");
5753 pmi->edi->pm_stats64_ctx = NULL;
5754 pmi->edi = NULL;
5755 }
5756 list_del(&pmi->list);
5757 // clean the filter list
5758 cp_lkm_pm_filter_empty_list(pmi);
5759
5760 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5761 if (pmi->net_dev) {
5762 // network device
5763 cp_lkm_common_deinit(pmi);
5764 unregister_netdev(pmi->net_dev);
5765 free_netdev(pmi->net_dev); // this also frees the pmi since it was allocated as part of the net_dev
5766 } else {
5767 // tty device
5768 memref_deref(pmi);
5769 }
5770
5771 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5772 }
5773 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5774
5775 return 0;
5776}
5777
5778static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx)
5779{
5780// struct cp_lkm_pm_ctx *pm_mgr;
5781
5782 DEBUG_INFO("%s(%p)", __FUNCTION__, ctx);
5783
5784// pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5785
5786 return 0;
5787}
5788
5789static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx)
5790{
5791 //struct cp_lkm_pm_ctx *pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5792 //struct cp_lkm_pm_common *pm_tmp = NULL;
5793 //struct list_head *entry, *tmp;
5794 //unsigned long flags;
5795
5796 LOG("%s() called unexpectedly.", __FUNCTION__);
5797
5798 //NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
5799 // things are in a bad state and the router will be rebooting. We decided not
5800 // to clean things up here because close code on usb side got into an infinite loop
5801 // and prevented the router from rebooting. Revisit if close ever becomes a normal event.
5802
5803 /*
5804 spin_lock_irqsave(&pm_mgr->pm_list_lock, flags);
5805
5806 list_for_each_safe(entry, tmp, &pm_mgr->pm_list) {
5807 pm_tmp = list_entry(entry, struct cp_lkm_pm_common, list);
5808 spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
5809
5810 // call detach to clean up network interface
5811 if (CP_LKM_PM_TYPE_PPP_CLIENT == pm_tmp->type || CP_LKM_PM_TYPE_PPP_SERVER == pm_tmp->type) {
5812 cp_lkm_pm_ppp_detach(pm_mgr, pm_tmp->unique_id);
5813 } else {
5814 cp_lkm_pm_net_detach(pm_mgr, pm_tmp->unique_id);
5815 }
5816 }
5817
5818 spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
5819
5820 cp_lkm_cleanup_msg_list(ctx);
5821 */
5822 return 0;
5823}
5824
5825static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
5826{
5827 struct cp_lkm_pm_ctx *pm_mgr;
5828
5829 //printk("%s(%p)\n", __FUNCTION__, ctx);
5830
5831 pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5832
5833
5834 // how to write back response with common function?
5835 if (skb) {
5836 kfree(skb);
5837 }
5838
5839 return 0;
5840}
5841
5842static int cp_lkm_pm_add_filter(struct cp_lkm_pm_ctx *mgr, int uid, struct cp_lkm_pm_filter *filter)
5843{
5844 // find the object in the list
5845 struct list_head *pos;
5846 struct cp_lkm_pm_common *pm = NULL;
5847 unsigned long flags;
5848 struct cp_lkm_pm_filter *new_filter;
5849
5850 DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
5851
5852 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5853 list_for_each(pos, &mgr->pm_list){
5854 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5855 if(pm_tmp->unique_id == uid) {
5856 pm = pm_tmp;
5857 break;
5858 }
5859 }
5860
5861 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5862
5863 if (!pm) {
5864 DEBUG_WARN("%s() pm not attached", __FUNCTION__);
5865 return -ENODEV;
5866 }
5867
5868 new_filter = kmalloc(sizeof(struct cp_lkm_pm_filter), GFP_ATOMIC);
5869 if (!new_filter) {
5870 DEBUG_WARN("%s() - failed to alloc filter\n", __FUNCTION__);
5871 return -1;
5872 }
5873
5874 memcpy(new_filter, filter, sizeof(struct cp_lkm_pm_filter));
5875 INIT_LIST_HEAD(&new_filter->list);
5876
5877 list_add_tail(&new_filter->list, &pm->filter_list);
5878
5879 return 0;
5880}
5881
5882static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
5883{
5884 struct cp_lkm_pm_ctx *pm_mgr;
5885 int result = 0;
5886 struct cp_lkm_pm_attach_ioctl *attach_params;
5887 struct cp_lkm_pm_detach_ioctl *detach_params;
5888 struct cp_lkm_pm_activate_deactivate_ioctl *activate_params;
5889 struct cp_lkm_pm_add_filter_ioctl *filter_params;
5890
5891 char name[CP_LKM_MAX_IF_NAME];
5892 unsigned long not_copied;
5893
5894 //printk("%s(%p) cmd:%d\n", __FUNCTION__, ctx, _IOC_NR(cmd));
5895
5896 pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5897
5898 switch (cmd) {
5899 case CP_LKM_IOCTL_PM_ATTACH:
5900 attach_params = (struct cp_lkm_pm_attach_ioctl *)k_argp;
5901 not_copied = copy_from_user(name, attach_params->name, CP_LKM_MAX_IF_NAME);
5902 if (not_copied) {
5903 return -ENOMEM;
5904 }
5905 DEBUG_INFO("%s(%s) attach", __FUNCTION__, name);
5906 switch(attach_params->type) {
5907 case CP_LKM_PM_TYPE_PPP_CLIENT:
5908 case CP_LKM_PM_TYPE_PPP_SERVER:
5909 result = cp_lkm_pm_ppp_attach(pm_mgr, attach_params->type, attach_params->uid, name);
5910 if (!result) {
5911 not_copied = copy_to_user(attach_params->name, name, CP_LKM_MAX_IF_NAME);
5912 if (not_copied) {
5913 return -ENOMEM;
5914 }
5915 }
5916 break;
5917 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5918 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5919 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5920 case CP_LKM_PM_TYPE_IP_STATIC:
5921 case CP_LKM_PM_TYPE_IP_DHCP:
5922 result = cp_lkm_pm_net_attach(pm_mgr, attach_params->type, attach_params->uid, name, attach_params->mac);
5923 break;
5924 default:
5925 result = -ENOTSUPP;
5926 break;
5927 }
5928 break;
5929 case CP_LKM_IOCTL_PM_DETACH:
5930 detach_params = (struct cp_lkm_pm_detach_ioctl *)k_argp;
5931 DEBUG_INFO("%s() detach uid:%d", __FUNCTION__, detach_params->uid);
5932 switch(detach_params->type) {
5933 case CP_LKM_PM_TYPE_PPP_CLIENT:
5934 case CP_LKM_PM_TYPE_PPP_SERVER:
5935 result = cp_lkm_pm_ppp_detach(pm_mgr, detach_params->uid);
5936 break;
5937 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5938 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5939 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5940 case CP_LKM_PM_TYPE_IP_STATIC:
5941 case CP_LKM_PM_TYPE_IP_DHCP:
5942 result = cp_lkm_pm_net_detach(pm_mgr, detach_params->uid);
5943 break;
5944 default:
5945 result = -ENOTSUPP;
5946 break;
5947 }
5948 break;
5949 case CP_LKM_IOCTL_PM_ACTIVATE:
5950 activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
5951 switch(activate_params->type) {
5952 case CP_LKM_PM_TYPE_PPP_CLIENT:
5953 case CP_LKM_PM_TYPE_PPP_SERVER:
5954 result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, true);
5955 break;
5956 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5957 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5958 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5959 case CP_LKM_PM_TYPE_IP_STATIC:
5960 case CP_LKM_PM_TYPE_IP_DHCP:
5961 result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, true);
5962 break;
5963 default:
5964 result = -ENOTSUPP;
5965 break;
5966 }
5967 break;
5968 case CP_LKM_IOCTL_PM_DEACTIVATE:
5969 activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
5970 switch(activate_params->type) {
5971 case CP_LKM_PM_TYPE_PPP_CLIENT:
5972 case CP_LKM_PM_TYPE_PPP_SERVER:
5973 result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, false);
5974 break;
5975 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5976 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5977 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5978 case CP_LKM_PM_TYPE_IP_STATIC:
5979 case CP_LKM_PM_TYPE_IP_DHCP:
5980 result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, false);
5981 break;
5982 default:
5983 result = -ENOTSUPP;
5984 break;
5985 }
5986 break;
5987 case CP_LKM_IOCTL_PM_ADD_FILTER:
5988 filter_params = (struct cp_lkm_pm_add_filter_ioctl *)k_argp;
5989 result = cp_lkm_pm_add_filter(pm_mgr, filter_params->uid, &filter_params->filter);
5990 break;
5991 default:
5992 break;
5993 }
5994
5995 return result;
5996}
5997
5998static bool cp_lkm_pm_usb_do_link_lock(void* ctx1, void* ctx2)
5999{
6000 struct cp_lkm_pm_common *pm = (struct cp_lkm_pm_common*)ctx1;
6001 bool done = false;
6002 unsigned long flags;
6003 // grab the lock and set the link_count. The link_count is used to keep send and poll from
6004 // being called over to the USB layer while we are mucking with the send and poll pointers
6005 spin_lock_irqsave(&pm->pm_link_lock, flags);
6006 if(pm->pm_link_count <= 0) {
6007 pm->pm_link_count = -1;
6008 done = true;
6009 }
6010 spin_unlock_irqrestore(&pm->pm_link_lock, flags);
6011
6012 return done;
6013}
6014
6015// This function changes the shared edi pointers.
6016// !!!It is the only function in the pm that is permitted to change edi function pointers!!!
6017// Other functions can change the ctxt pointers
6018static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link)
6019{
6020 struct list_head *pos;
6021 struct cp_lkm_pm_common *pm = NULL;
6022 unsigned long flags;
6023 struct cp_lkm_edi *tmp_edi;
6024
6025 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
6026 list_for_each(pos, &cp_lkm_pm_mgr.pm_list){
6027 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
6028 if(pm_tmp->unique_id == pm_unique_id) {
6029 pm = pm_tmp;
6030 break;
6031 }
6032 }
6033 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
6034
6035 if (!pm) {
6036 // couldn't find object
6037 //printk("%s() unable to find protocol manager with id:%d\n", __FUNCTION__, pm_unique_id);
6038 return -EINVAL;
6039 }
6040
6041 //printk("%s() pm_net: %p\n", __FUNCTION__, pm);
6042
6043 // grab the lock and set the link_count. The link_count is used to keep send and poll from
6044 // being called over to the USB layer while we are mucking with the send and poll pointers
6045 cp_lkm_do_or_die(pm, NULL, cp_lkm_pm_usb_do_link_lock, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to grab cp pm lock");
6046
6047 //printk("%s() pm: %p, attached: %d, pm_type: %d\n", __FUNCTION__, pm, pm->attached,pm->type);
6048 tmp_edi = pm->edi;
6049 pm->edi = NULL;
6050 if (link) {
6051 if (tmp_edi) {
6052 // already linked - unlink from previous edi
6053 // just a precaution, should never happen
6054 tmp_edi->pm_recv = NULL;
6055 tmp_edi->pm_recv_ctx = NULL;
6056 tmp_edi->pm_get_hdr_size = NULL;
6057
6058 //printk("pm_recv_ctx null\n");
6059 tmp_edi->pm_send_pause = NULL;
6060 tmp_edi->pm_send_resume = NULL;
6061
6062 tmp_edi->pm_stats64_ctx = NULL;
6063
6064 //pm->edi = NULL;
6065 }
6066
6067 tmp_edi = edi;
6068 tmp_edi->pm_recv_ctx = pm;
6069
6070 switch(pm->type) {
6071 case CP_LKM_PM_TYPE_PPP_CLIENT:
6072 case CP_LKM_PM_TYPE_PPP_SERVER:
6073 tmp_edi->pm_recv = cp_lkm_pm_ppp_recv;
6074 tmp_edi->pm_get_hdr_size = cp_lkm_pm_ppp_get_hdr_size;
6075 tmp_edi->pm_stats64_ctx = NULL;
6076 break;
6077 default:
6078 tmp_edi->pm_recv = cp_lkm_pm_net_recv;
6079 tmp_edi->pm_get_hdr_size = cp_lkm_pm_net_get_hdr_size;
6080 tmp_edi->pm_send_pause = cp_lkm_pm_net_pause;
6081 tmp_edi->pm_send_resume = cp_lkm_pm_net_resume;
6082 tmp_edi->pm_stats64_ctx = pm;
6083 break;
6084 }
6085
6086 pm->edi = tmp_edi;
6087
6088 // release the link_count on link so things can start flowing.
6089 // don't release it on unlink since we don't want things to flow when unlinked
6090 spin_lock_irqsave(&pm->pm_link_lock, flags);
6091 pm->pm_link_count = 0;
6092 spin_unlock_irqrestore(&pm->pm_link_lock, flags);
6093
6094 } else {
6095 if (tmp_edi) {
6096 tmp_edi->pm_recv = NULL;
6097 tmp_edi->pm_recv_ctx = NULL;
6098 tmp_edi->pm_get_hdr_size = NULL;
6099
6100 //printk("pm_recv_ctx null\n");
6101 tmp_edi->pm_send_pause = NULL;
6102 tmp_edi->pm_send_resume = NULL;
6103 tmp_edi->pm_stats64_ctx = NULL;
6104
6105 //pm->edi = NULL;
6106 }
6107 }
6108
6109 return 0;
6110
6111}
6112
6113/******************** common user/kernel communication functions **************/
6114
6115static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common)
6116{
6117 DEBUG_WARN("%s()", __FUNCTION__);
6118
6119 INIT_LIST_HEAD(&common->read_list);
6120 spin_lock_init(&common->read_list_lock);
6121
6122 init_waitqueue_head(&common->inq);
6123 common->open_cnt = 0;
6124 common->reading_data = false;
6125 common->write_skb = NULL;
6126}
6127
6128static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common)
6129{
6130 struct cp_lkm_read_msg *msg;
6131 unsigned long flags;
6132 struct list_head *entry, *tmp;
6133
6134 spin_lock_irqsave(&common->read_list_lock, flags);
6135
6136 list_for_each_safe(entry, tmp, &common->read_list) {
6137 msg = list_entry(entry, struct cp_lkm_read_msg, list);
6138 list_del(&msg->list);
6139 dev_kfree_skb_any(msg->skb);
6140 kfree(msg);
6141 }
6142 spin_unlock_irqrestore(&common->read_list_lock, flags);
6143}
6144
6145// this may be called from soft interrupt context or normal kernel thread context
6146static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb)
6147{
6148
6149 struct cp_lkm_read_msg *msg;
6150 unsigned long flags;
6151
6152 msg = kmalloc(sizeof(struct cp_lkm_read_msg), GFP_ATOMIC);
6153 if (!msg) {
6154 if (skb) {
6155 dev_kfree_skb_any(skb);
6156 }
6157 return -ENOMEM;
6158 }
6159
6160 msg->skb = skb;
6161 memcpy(&msg->hdr, hdr, sizeof(struct cp_lkm_msg_hdr));
6162
6163 spin_lock_irqsave(&mgr->read_list_lock, flags);
6164 list_add_tail(&msg->list, &mgr->read_list);
6165 spin_unlock_irqrestore(&mgr->read_list_lock, flags);
6166
6167 mgr->q_waiting = false;
6168
6169 // signal poll
6170 wake_up_interruptible(&mgr->inq);
6171
6172 return 0;
6173}
6174
6175int cp_lkm_open(struct inode *inode, struct file *filp)
6176{
6177
6178 int result = 0;
6179 struct cp_lkm_common_ctx *common;
6180
6181 DEBUG_TRACE("%s()", __FUNCTION__);
6182
6183 try_module_get(THIS_MODULE);
6184
6185 // set private data
6186 if (iminor(inode) == CP_LKM_USB_MGR_MINOR) {
6187 filp->private_data = &cp_lkm_usb_mgr;
6188 common = &cp_lkm_usb_mgr.common;
6189 DEBUG_INFO("%s() open usb manager", __FUNCTION__);
6190 } else if (iminor(inode) == CP_LKM_PM_MGR_MINOR) {
6191 filp->private_data = &cp_lkm_pm_mgr;
6192 common = &cp_lkm_pm_mgr.common;
6193 DEBUG_INFO("%s() open pm manager", __FUNCTION__);
6194 } else {
6195 return -ENOENT;
6196 }
6197
6198 if (common->open_cnt) {
6199 return -EBUSY;
6200 }
6201
6202 common->open_cnt++;
6203
6204 if (common->open) {
6205 result = common->open(common);
6206 }
6207
6208 return result;
6209}
6210
6211int cp_lkm_release(struct inode *inode, struct file *filp)
6212{
6213
6214 int result = 0;
6215 struct cp_lkm_common_ctx *common;
6216 common = (struct cp_lkm_common_ctx *)filp->private_data;
6217
6218 DEBUG_TRACE("%s() release", __FUNCTION__);
6219
6220 if (0 == common->open_cnt) {
6221 return 0;
6222 }
6223
6224 if (common->close) {
6225 result = common->close(common);
6226 }
6227
6228 module_put(THIS_MODULE);
6229
6230 common->open_cnt--;
6231
6232 return result;
6233}
6234
6235// first read is the header
6236// second read is the data. If no data, then no second read
6237// if error in either stage, negative value is returned and next read will be for header
6238// messages are not removed until successfully read header and data (if any)
6239ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
6240{
6241
6242 struct cp_lkm_common_ctx *common;
6243 ssize_t result;
6244 struct cp_lkm_read_msg *msg;
6245 unsigned long flags;
6246 unsigned long not_copied;
6247
6248// DEBUG_INFO("%s() reading %d bytes", __FUNCTION__, count);
6249 common = (struct cp_lkm_common_ctx *)filp->private_data;
6250
6251 spin_lock_irqsave(&common->read_list_lock, flags);
6252 if (list_empty(&common->read_list)) {
6253 spin_unlock_irqrestore(&common->read_list_lock, flags);
6254 return -EAGAIN;
6255 }
6256 msg = list_first_entry(&common->read_list, struct cp_lkm_read_msg, list);
6257 spin_unlock_irqrestore(&common->read_list_lock, flags);
6258
6259 if (!common->reading_data) { // header mode
6260 // read header
6261 if (sizeof(struct cp_lkm_msg_hdr) != count) {
6262 return -EINVAL;
6263 }
6264
6265 not_copied = copy_to_user(buf, &msg->hdr, sizeof(struct cp_lkm_msg_hdr));
6266 if (not_copied) {
6267 return -ENOMEM;
6268 }
6269
6270 if (!msg->hdr.len) {
6271 result = count;
6272 goto read_free;
6273 }
6274
6275 // switch to data mode
6276 common->reading_data = !common->reading_data;
6277 return count;
6278 }
6279
6280 // switch to header mode
6281 common->reading_data = !common->reading_data;
6282
6283 // data mode - handle the data transfer
6284 if (msg->hdr.len != count) {
6285 return -EINVAL;
6286 }
6287
6288 not_copied = copy_to_user(buf, msg->skb->data, msg->hdr.len);
6289
6290 if (not_copied) {
6291 return -ENOMEM;
6292 }
6293
6294 result = count;
6295
6296read_free:
6297 spin_lock_irqsave(&common->read_list_lock, flags);
6298 list_del(&msg->list);
6299 spin_unlock_irqrestore(&common->read_list_lock, flags);
6300
6301 if (msg->skb) {
6302 dev_kfree_skb_any(msg->skb);
6303 }
6304 kfree(msg);
6305
6306 return result;
6307}
6308// the user must write the header first
6309// then the user must write the data equivalent to the hdr.len
6310// on error, a negative value is returned and the entire message is lost
6311// on error, the next write must be header
6312ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
6313{
6314 struct cp_lkm_common_ctx *common;
6315 unsigned long not_copied;
6316 int result;
6317 struct sk_buff *skb = NULL;
6318 struct cp_lkm_msg_hdr hdr;
6319 struct cp_lkm_msg_hdr *hdrp;
6320
6321// DEBUG_INFO("%s() writing %d bytes", __FUNCTION__, count);
6322
6323 common = (struct cp_lkm_common_ctx *)filp->private_data;
6324
6325 if (!common->write_skb) {
6326 // handle the header
6327 if (count != sizeof(struct cp_lkm_msg_hdr)) {
6328 return -EINVAL;
6329 }
6330 not_copied = copy_from_user(&hdr, buf, count);
6331 if (not_copied) {
6332 return -ENOMEM;
6333 }
6334
6335 if ((skb = alloc_skb (count + hdr.len, GFP_KERNEL)) == NULL) {
6336 return -ENOMEM;
6337 }
6338
6339 memcpy(skb->data, &hdr, count);
6340
6341 // setup skb pointers - skb->data points to message data with header immediately before skb->data
6342 skb->len = hdr.len;
6343 skb->data += sizeof(struct cp_lkm_msg_hdr);
6344 skb_set_tail_pointer(skb, hdr.len);
6345
6346 if (!hdr.len) {
6347 goto send_msg;
6348 }
6349
6350 // save until we get the data
6351 common->write_skb = skb;
6352
6353 return count;
6354 }
6355
6356 // handle the data
6357 skb = common->write_skb;
6358 common->write_skb = NULL;
6359
6360 hdrp = (struct cp_lkm_msg_hdr *)(skb->data) - 1;
6361 if (count != hdrp->len) {
6362 dev_kfree_skb_any(skb);
6363 return -EINVAL;
6364 }
6365
6366 not_copied = copy_from_user(skb->data, buf, count);
6367 if (not_copied) {
6368 dev_kfree_skb_any(skb);
6369 return -ENOMEM;
6370 }
6371
6372
6373send_msg:
6374 if (common->handle_msg) {
6375 result = common->handle_msg(common, (struct cp_lkm_msg_hdr *)(skb->data) - 1, skb);
6376 if (result) {
6377 return result;
6378 }
6379 }
6380
6381 return count;
6382}
6383
6384unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *wait)
6385{
6386 unsigned long flags;
6387 unsigned int mask = 0;
6388 struct cp_lkm_common_ctx *common;
6389
6390 common = (struct cp_lkm_common_ctx *)filp->private_data;
6391
6392 poll_wait(filp, &common->inq, wait);
6393
6394 spin_lock_irqsave(&common->read_list_lock, flags);
6395
6396 if (!list_empty(&common->read_list)) {
6397 mask = POLLIN | POLLRDNORM; // readable
6398 }
6399
6400 spin_unlock_irqrestore(&common->read_list_lock, flags);
6401
6402 return mask;
6403}
6404
6405#ifdef KERNEL_2_6_21
6406int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
6407#else
6408long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg)
6409#endif
6410{
6411 int result = -EINVAL;
6412
6413 void __user *uargp = (void __user *)arg;
6414 void *kargp = NULL;
6415 struct cp_lkm_common_ctx *common = (struct cp_lkm_common_ctx *)filp->private_data;
6416
6417 DEBUG_TRACE("%s(%p) - cmd:%d", __FUNCTION__, filp, _IOC_NR(cmd));
6418
6419 switch(cmd) {
6420 case CP_LKM_IOCTL_SET_LOG_LEVEL:
6421 cp_lkm_log_level = (uintptr_t)uargp;
6422 LOG("Setting debug log level:%d", cp_lkm_log_level);
6423 cp_lkm_wrapper_set_log_level(cp_lkm_log_level);
6424 return 0;
6425 default:
6426 if (_IOC_SIZE(cmd)) {
6427 kargp = kmalloc(_IOC_SIZE(cmd), GFP_ATOMIC);
6428 if (!kargp) {
6429 result = -ENOMEM;
6430 goto done;
6431 }
6432 if (copy_from_user(kargp, uargp, _IOC_SIZE(cmd))) {
6433 result = -EFAULT;
6434 goto done;
6435 }
6436 }
6437 }
6438
6439 if (common->handle_ioctl) {
6440 result = common->handle_ioctl(common, cmd, kargp);
6441 }
6442
6443
6444 if (_IOC_DIR(cmd) & _IOC_READ) {
6445 if (copy_to_user(uargp, kargp, _IOC_SIZE(cmd))) {
6446 result = -EFAULT;
6447 goto done;
6448 }
6449 }
6450
6451done:
6452 if (kargp) {
6453 kfree(kargp);
6454 }
6455
6456 return result;
6457}
6458
6459
6460static int __init cp_lkm_start(void)
6461{
6462 int err;
6463
6464 //printk("%s() Initializing module...\n", __FUNCTION__);
6465
6466 // initialize global structures
6467
6468 err = cp_lkm_pm_tty_init();
6469 if (err) {
6470 return err;
6471 }
6472
6473 cp_lkm_usb_init();
6474
6475 cp_lkm_pm_init();
6476
6477 // Allocating memory for the buffer
6478 if ((major = register_chrdev(0, "cp_lkm", &cp_lkm_fops)) < 0) {
6479 DEBUG_INFO("%s() failed dynamic registration", __FUNCTION__);
6480 cp_lkm_pm_tty_cleanup();
6481 return major;
6482 }
6483
6484 cp_lkm_class = class_create(THIS_MODULE, "cp_lkm");
6485 if (IS_ERR(cp_lkm_class)) {
6486 DEBUG_INFO("%s() failed class create", __FUNCTION__);
6487 unregister_chrdev(major, "cp_lkm");
6488 cp_lkm_pm_tty_cleanup();
6489 return -ENODEV;
6490 }
6491#ifdef KERNEL_2_6_21
6492 cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), "cp_lkm_usb");
6493#else
6494 cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), NULL, "cp_lkm_usb");
6495#endif
6496 if (IS_ERR(cp_lkm_dev[0])){
6497 DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
6498 // clean up previous devices
6499 class_destroy(cp_lkm_class);
6500 unregister_chrdev(major, "cp_lkm");
6501 cp_lkm_pm_tty_cleanup();
6502 return -ENODEV;
6503 }
6504
6505#ifdef KERNEL_2_6_21
6506 cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), "cp_lkm_pm");
6507#else
6508 cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), NULL, "cp_lkm_pm");
6509#endif
6510 if (IS_ERR(cp_lkm_dev[1])){
6511 DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
6512 // clean up previous devices
6513 device_destroy(cp_lkm_class, MKDEV(major, 0));
6514 class_destroy(cp_lkm_class);
6515 unregister_chrdev(major, "cp_lkm");
6516 cp_lkm_pm_tty_cleanup();
6517 return -ENODEV;
6518 }
6519
6520 LOG("cp_lkm: Inserting kernel module");
6521
6522 return 0;
6523}
6524
6525static void __exit cp_lkm_end(void)
6526{
6527 int i;
6528
6529 //TODO remove
6530 //del_timer_sync (&dbg_memleak_timer);
6531
6532
6533 cp_lkm_pm_cleanup();
6534 cp_lkm_usb_cleanup();
6535
6536 for (i = 0; i < 2; i++) {
6537 device_destroy(cp_lkm_class, MKDEV(major, i));
6538 }
6539 class_destroy(cp_lkm_class);
6540 unregister_chrdev(major, "cp_lkm");
6541
6542 cp_lkm_pm_tty_cleanup();
6543
6544 LOG("cp_lkm: Removing kernel module");
6545}
6546
6547module_init(cp_lkm_start);
6548module_exit(cp_lkm_end);
6549MODULE_LICENSE("GPL");
6550
6551