blob: 697fbd34ff8521d32f90a38fac0f1a4ad66e6b0c [file] [log] [blame]
Kyle Swenson74ad7532023-02-16 11:05:29 -07001/*
2 * FILE NAME cpmodem_shim.c
3 *
4 * BRIEF MODULE DESCRIPTION
5 * Frankendriver - USB to ethernet, ip or PPP controlled via a block driver.
6 *
7 * Author: CradlePoint Technology, Inc. <source@cradlepoint.com>
8 * Ben Kendall <benk@cradlepoint.com>
9 * Cory Atkin <catkin@cradlepoint.com>
10 *
11 * Copyright 2012, CradlePoint Technology, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to:
24 * Free Software Foundation
25 * 51 Franklin Street, Fifth Floor
26 * Boston, MA 02111-1301 USA
27 */
28
29
30// Necessary includes for device drivers
31#include <linux/module.h> // Needed by all modules
32#include <linux/kernel.h> // Needed for KERN_xxxx
33#include <linux/init.h> // Needed for the macros
34#include <linux/cdev.h>
35#include <linux/slab.h> // kmalloc()
36#include <linux/fs.h> // everything...
37#include <linux/poll.h>
38#include <linux/errno.h> // error codes
39#include <linux/types.h> // size_t
40#include <linux/proc_fs.h>
41#include <linux/fcntl.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/if_ether.h>
45#include <linux/if_arp.h>
46#include <linux/ethtool.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include <linux/inetdevice.h>
50#include <linux/ip.h>
51#include <net/addrconf.h>
52#include <linux/tty.h>
53#include <linux/tty_flip.h>
54#include <linux/spinlock.h>
55#include <linux/ktime.h>
56/* #include <asm/system.h> // cli(), *_flags */
57#include <asm/uaccess.h> // copy_from/to_user
58#include <linux/usb.h>
59#include <linux/version.h> // LINUX_VERSION_CODE
60#include <cpmodem_shim.h>
61#include <cpmodem_wrapper.h>
62
63
64//#define KERNEL_2_6_21 // comment this out for 3.0.29 kernel
65/*********************************************** logging and debug ************************************************/
66
67#define RUNTIME_DEBUG_TRACE (1 << 0)
68#define RUNTIME_DEBUG_INFO (1 << 1)
69#define RUNTIME_DEBUG_WARN (1 << 2)
70#define RUNTIME_DEBUG_ERROR (1 << 3)
71#define RUNTIME_LOG 0
72#define RUNTIME_ASSERT -1
73
74//#undef RUNTIME_DEBUG
75//#define RUNTIME_DEBUG ( /*RUNTIME_DEBUG_TRACE |*/ RUNTIME_DEBUG_INFO | RUNTIME_DEBUG_WARN | RUNTIME_DEBUG_ERROR )
76
77
78static int cp_lkm_log_level = 0;
79
80#ifdef RUNTIME_DEBUG
81static const char *cp_lkm_shim_runtime_debug_level_str[] = {
82 "ASSERT",
83 "TRACE",
84 "INFO",
85 "WARN",
86 "ERROR",
87};
88#else
89static const char *cp_lkm_shim_debug_log_level_str[] = {
90 "ASSERT",
91 "ERROR",
92 "WARN",
93 "INFO",
94 "TRACE",
95 "PRINTF"
96};
97#endif
98
99static int cp_out_get_level_index(int level)
100{
101 int level_index = 0;
102 while (level) {
103 level = level >> 1;
104 level_index++;
105 }
106 return level_index;
107}
108
109static void cp_out(int level, const char * file, int line, const char *fmt, ...)
110{
111 int file_str_len = 0;
112 char *file_pos = (char *)file;
113 char *fmt1;
114 va_list arg;
115 int level_index = 0;
116 const char *level_str = NULL;
117 const char *kernel_lvl_str = NULL;
118
119 if (level>0) { // level of 0 is LOG and -1 is ASSERT - always output
120 level_index = cp_out_get_level_index(level);
121
122#ifdef RUNTIME_DEBUG
123 if (!(RUNTIME_DEBUG & level)) {
124 return;
125 }
126 level_str = cp_lkm_shim_runtime_debug_level_str[level_index];
127#else
128 if (!(cp_lkm_log_level & level)) {
129 return;
130 }
131 level_str = cp_lkm_shim_debug_log_level_str[level_index];
132#endif
133 }
134
135
136 switch(level) {
137 case RUNTIME_DEBUG_TRACE:
138 kernel_lvl_str = KERN_INFO;
139 break;
140 case RUNTIME_DEBUG_INFO:
141 kernel_lvl_str = KERN_INFO;
142 break;
143 case RUNTIME_DEBUG_WARN:
144 kernel_lvl_str = KERN_WARNING;
145 break;
146 case RUNTIME_DEBUG_ERROR:
147 kernel_lvl_str = KERN_ERR;
148 break;
149 case RUNTIME_LOG:
150 kernel_lvl_str = KERN_INFO;
151 break;
152 case RUNTIME_ASSERT:
153 kernel_lvl_str = KERN_ERR;
154 break;
155 default:
156 kernel_lvl_str = KERN_INFO;
157 break;
158 }
159
160
161 va_start(arg, fmt);
162
163 if (file) {
164 char *pos = (char *)file;
165 while ((pos = strchr(pos, '/'))) {
166 pos++;
167 file_pos = pos;
168 }
169
170 file_str_len = strlen(file_pos);
171 }
172
173 fmt1 = kmalloc(strlen(fmt) + file_str_len + 12 + 6 + 2, GFP_ATOMIC); // +6 for debug type indication, +2 for linux syslog level
174 if (!fmt1) {
175 return;
176 }
177 if (level_str) {
178 if (file) {
179 sprintf(fmt1, "%s%6s %s(%4d):%s\n", kernel_lvl_str, level_str, file_pos, line, fmt);
180 } else {
181 sprintf(fmt1, "%s%6s %s\n", kernel_lvl_str, level_str, fmt);
182 }
183 } else {
184 if (file) {
185 sprintf(fmt1, "%s%s(%4d):%s\n", kernel_lvl_str, file_pos, line, fmt);
186 } else {
187 sprintf(fmt1, "%s%s\n", kernel_lvl_str, fmt);
188 }
189 }
190 vprintk(fmt1, arg);
191 kfree(fmt1);
192 va_end(arg);
193}
194
195#ifdef RUNTIME_DEBUG
196// assert is always defined if RUNTIME_DEBUG is defined
197// bad idea to kill things in kernel, so we just print the assert msg and keep going
198#define DEBUG_ASSERT(a, args...) \
199 if (!(a)) { \
200 printk(KERN_ERR "\n!!! CPMODEM_SHIM ASSERT !!!\n"); \
201 cp_out(RUNTIME_ASSERT, __FILE__, __LINE__, args); \
202 dump_stack(); \
203 }
204#define DEBUG_TRACE(args...) cp_out(RUNTIME_DEBUG_TRACE, __FILE__, __LINE__, args)
205#define DEBUG_INFO(args...) cp_out(RUNTIME_DEBUG_INFO, __FILE__, __LINE__, args)
206#define DEBUG_WARN(args...) cp_out(RUNTIME_DEBUG_WARN, __FILE__, __LINE__, args)
207#define DEBUG_ERROR(args...) cp_out(RUNTIME_DEBUG_ERROR, __FILE__, __LINE__, args)
208#else
209#define DEBUG_ASSERT(a, args...)
210#define DEBUG_TRACE(args...) cp_out(LOG_DEBUG_LEVEL_TRACE, __FILE__, __LINE__, args)
211
212#define DEBUG_INFO(args...) cp_out(LOG_DEBUG_LEVEL_INFO, __FILE__, __LINE__, args)
213
214#define DEBUG_WARN(args...) cp_out(LOG_DEBUG_LEVEL_WARN, __FILE__, __LINE__, args)
215
216#define DEBUG_ERROR(args...) cp_out(LOG_DEBUG_LEVEL_ERROR, __FILE__, __LINE__, args)
217
218#define DEBUG_PRINTF(args...) cp_out(LOG_DEBUG_LEVEL_PRINTF, __FILE__, __LINE__, args)
219
220#endif
221
222#define LOG(args...) cp_out(RUNTIME_LOG, NULL, 0, args)
223
224/*********************************************** general definitions and helper functions *************************/
225
226// Buffer to store data
227struct cp_lkm_read_msg {
228 struct cp_lkm_msg_hdr hdr;
229 struct sk_buff *skb;
230 struct list_head list;
231};
232
233struct cp_lkm_common_ctx {
234 u8 open_cnt;
235
236 // read operation members
237 wait_queue_head_t inq;
238 struct list_head read_list;
239 spinlock_t read_list_lock;
240 bool reading_data;
241 bool q_waiting;
242 // write operation members
243 struct sk_buff *write_skb;
244
245 int (*open)(struct cp_lkm_common_ctx *ctx); // called at open
246 int (*close)(struct cp_lkm_common_ctx *ctx); // called at close
247 int (*handle_msg)(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb); // called at write
248 int (*handle_ioctl)(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp); // called at ioctl
249};
250
251
252int cp_lkm_open(struct inode *inode, struct file *filp);
253int cp_lkm_release(struct inode *inode, struct file *filp);
254ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos);
255ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos);
256#ifdef KERNEL_2_6_21
257int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
258#else
259long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg);
260#endif
261unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *);
262
263static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common);
264static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common);
265static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb);
266
267/* Structure that declares the usual file
268 access functions */
269struct file_operations cp_lkm_fops = {
270 .owner = THIS_MODULE,
271 .read = cp_lkm_read,
272 .write = cp_lkm_write,
273#ifdef KERNEL_2_6_21
274 .ioctl = cp_lkm_ioctl,
275#else
276 .unlocked_ioctl = cp_lkm_ioctl,
277#endif
278 .open = cp_lkm_open,
279 .poll = cp_lkm_poll,
280 .release = cp_lkm_release
281};
282
283static int major;
284static struct device *cp_lkm_dev[2];
285static struct class *cp_lkm_class;
286
287#define CP_LKM_USB_MGR_MINOR 0
288#define CP_LKM_PM_MGR_MINOR 1
289#define CP_LKM_ITER 3000 //CP_LIM_ITER * CP_LKM_TIMEOUT_MS = 30000 or 30 seconds
290#define CP_LKM_TIMEOUT_MS 10
291
292typedef int (*cp_lkm_data_transfer_t)(void *ctx, struct sk_buff *skb);
293typedef void (*cp_lkm_data_hdr_size_t)(void *ctx, int wrapper_hdr_size, int *hdr_size, int* hdr_offset);
294typedef int (*cp_lkm_poll_t)(void *ctx, int budget);
295typedef void (*cp_lkm_schedule_t)(void *ctx);
296typedef void (*cp_lkm_complete_t)(void *ctx);
297typedef int (*cp_lkm_msg_t)(void *ctx);
298struct cp_lkm_edi {
299 //values provided by usb side, called by pm side
300 cp_lkm_data_transfer_t usb_send;
301 void *usb_send_ctx;
302
303 //value provided by pm side, called by usb side
304 cp_lkm_msg_t pm_send_pause; //called by usb to pause the network q
305 cp_lkm_msg_t pm_send_resume; //called by usb to resume the network q
306 cp_lkm_data_transfer_t pm_recv;
307 cp_lkm_data_hdr_size_t pm_get_hdr_size; //ask pm how much space it needs for headers
308 void *pm_recv_ctx;
309
310 void *pm_stats64_ctx;
311};
312
313static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link);
314
315struct cp_lkm_pm_stats64 {
316 u64 rx_packets;
317 u64 tx_packets;
318 u64 rx_bytes;
319 u64 tx_bytes;
320 u64 rx_errors;
321 u64 tx_errors;
322 u64 rx_dropped;
323 u64 tx_dropped;
324
325 u64 rx_over_errors;
326
327 struct u64_stats_sync syncp;
328};
329
330struct cp_lkm_pm_common {
331 int unique_id;
332 u32 attached;
333 cp_lkm_pm_type_t type;
334 struct net_device *net_dev;
335 struct cp_lkm_edi *edi;
336 struct list_head filter_list;
337 u32 filter_drop_cnt;
338
339 // keep these in pm context so dual sim hidden unplug/plug do not affect the stats
340 struct cp_lkm_pm_stats64 *pcpu_stats64;
341
342 int pm_link_count; //token used to prevent xmit and poll from being called if we are linking or unlinking, -1 = unlinking so block xmit and poll,
343 spinlock_t pm_link_lock; //lock to protect getting and releasing the pm_link_count token
344
345 struct list_head list;
346};
347
348//static void cp_lkm_pm_update_stats64(struct cp_lkm_pm_stats64 *stats, u64 *field, u64 incr);
349#define UPDATE_STATS(stats_ctx, field, incr) if (stats_ctx) { \
350 struct cp_lkm_pm_stats64 *stats = this_cpu_ptr(((struct cp_lkm_pm_common *)stats_ctx)->pcpu_stats64); \
351 if (stats) { \
352 u64_stats_update_begin(&stats->syncp); \
353 stats->field += incr; \
354 u64_stats_update_end(&stats->syncp); \
355 } \
356 }
357
358//Keep these commented out for release
359//static int dbg_memleak_timer_started = 0;
360//static struct timer_list dbg_memleak_timer;
361//static spinlock_t dbg_state_lock;
362//static int dbg_state_init = 0;
363//static int g_dbg_memalloc_cnt = 0;
364//static int g_stuck_cnt = 0;
365//static int g_stuck_chk = 0;
366//static int g_unlink_cnt = 0;
367
368typedef size_t ref_t;
369typedef void (*memref_final_method_t)(void *buf);
370struct memref {
371 memref_final_method_t mfree;
372 atomic_t refs;
373};
374
375
376void *memref_alloc(size_t size, memref_final_method_t mfree)
377{
378 struct memref *ptr;
379
380 ptr = (struct memref *)kmalloc(sizeof(struct memref) + size, GFP_ATOMIC);
381 if (!ptr) {
382 return NULL;
383 }
384 //g_dbg_memalloc_cnt++;
385 ptr->mfree = mfree;
386 atomic_set(&ptr->refs, 1);
387
388 return (ptr + 1);
389}
390
391void *memref_alloc_and_zero(size_t size, memref_final_method_t mfree)
392{
393 void *ptr;
394
395 ptr = memref_alloc(size, mfree);
396 if (!ptr) {
397 return NULL;
398 }
399
400 memset(ptr, 0x00, size);
401
402 return ptr;
403}
404
405static void *memref_ref(void *buf)
406{
407 struct memref *mb;
408
409 if (!buf) {
410 return NULL;
411 }
412
413 mb = (struct memref *)(buf) - 1;
414
415// if (0 == atomic_read(&mb->refs)) {
416// DEBUG_INFO("%s() !refs", __FUNCTION__);
417// return NULL;
418// }
419
420 atomic_inc(&mb->refs);
421
422 return buf;
423}
424
425#if 0
426static ref_t memref_cnt(void *buf)
427{
428 struct memref *mb;
429
430 if (!buf) {
431 return 0;
432 }
433
434 mb = (struct memref *)(buf) - 1;
435 return atomic_read(&mb->refs);
436}
437#endif
438
439static ref_t memref_deref(void *buf)
440{
441 struct memref *mb;
442
443 if (!buf) {
444 return 0;
445 }
446
447 mb = (struct memref *)(buf) - 1;
448
449// if (0 == atomic_read(&mb->refs)) {
450// DEBUG_INFO("%s() !refs", __FUNCTION__);
451// return NULL;
452// }
453
454 if (atomic_dec_and_test(&mb->refs)) {
455 //g_dbg_memalloc_cnt--;
456 if (mb->mfree) {
457 mb->mfree(buf);
458 }
459 kfree(mb);
460 return 0;
461 }
462
463 return atomic_read(&mb->refs);
464}
465
466/*
467 * Generic function to repeatedly call a function until it either succeeds or the delay and iters
468 * have been exhausted. Optionally it can throw a kernel panic on failure.
469 *
470 * ctxt - the ctxt to pass into do_fun
471 * do_fun - the function to call until it returns success
472 * delay_ms - the amount of time to delay between calls to do_fun on failure
473 * iter - the number of times to call do_fun
474 * die_str - if should panic on failure, then pass in the die_str to display
475 *
476 * if die_str provided, this function will not exit on failure.
477 * else it will exit with the result of the call to do_fun
478 * Note: total wait time is delay_ms * iter
479*/
480typedef bool (*do_function_t)(void* ctx1, void* ctx2);
481bool cp_lkm_do_or_die(void* ctx1, void*ctx2, do_function_t do_fun, u32 delay_ms, u32 iter, const char* die_str)
482{
483 bool done = false;
484 //set_current_state(TASK_UNINTERRUPTIBLE);
485 while (!done && iter) {
486 iter--;
487 done = do_fun(ctx1,ctx2);
488 if (!done) {
489 msleep(delay_ms);
490 //schedule_timeout(msecs_to_jiffies(delay_ms));
491 //set_current_state(TASK_UNINTERRUPTIBLE);
492 }
493 }
494 if(!done && die_str) {
495 panic(die_str);
496 //BUG_ON()
497 }
498 //set_current_state(TASK_RUNNING);
499 return done;
500}
501
502/******************************* kernel module USB/Wrapper functionality *********************************
503 *
504 * The shim has multiple entry points. It can be pumped by hw interrupts, software interrupts, or threads.
505 * The trick to getting the shim to work properly is knowing from which contexts the different functions can be called
506 * and what you can do in that context.
507 *
508 * The biggest concern is to make sure we aren't nulling out a function or instance pointer in one context while another
509 * context is using it. Pointers are changed when linking or unlinking to the protocol manager or when the device unplugs.
510 * For link/unlink or unplug, we need to make sure all other processing has been blocked or stopped. We use a combination of
511 * tokens and spinlocks to achieve this.
512 *
513 * Another complexity is dealing with multi-core processors such as we have in some routers. With multi-core you can have
514 * a hw interrupt, software interrupt or thread running on one core and a hw interrupt, soft interrupt, or thread running on
515 * another at the same time. In addition, the same soft interrupt code can run on both cores at the same time.
516 * With single core, the hw int would block the thread. The shim was orginally designed with a single-core system, so a lot of work
517 * has been put into verifying multi-core works.
518 *
519 * Single core: We can be pumped by:
520 * Hardware interrupt - all interrupts disabled, can't be preempted
521 * Software interrupt - hw interrupts not disabled, can be preempted by hw interrupt
522 * Thread or other process - can be preempted by hw or sw interrupt.
523 *
524 * Multi core: all bets are off. Everything can run at the same time so you have to be very careful with locks and tokens to not corrupt
525 * variables and to not run funtions reentrantly.
526 *
527 * Here are the specific contexts (threads, processes)that pump us:
528 * 1. USB on a hardware interrupt context. This happens on tx and rx done (all interrupts disabled, schedule callbacks and get out fast)
529 * 2. USB on the hub thread. This happens on unplug (can sleep or pause, but be careful because it stops all USB system hub processing)
530 * 3. Kernel workqueue thread (our own callback, can sleep or pause, but be careful, it stops all the kernel workqueue processing)
531 * 4. tasklet or timer soft interrupt context (our own callbacks on sw interrupt, hw interrupts enabled, can't sleep or do pause)
532 * 5. ioctl or device write on a kernel thread (this is cpusb in app space talking to us, runs on a thread, can be prempted in multi-core)
533 * 6. network (send from network side, runs as a software interrupt)
534 *
535 * Which functions are called in which contexts and what they do:
536 * #1 - cp_lkm_usb_xmit_complete - called by usb layer when transmit is done in hw interrupt context
537 * throw transfer in done q, on success, schedule tasklet or NAPI poll (#4) by calling
538 * cp_lkm_usb_done_and_defer_data() for data packets or cp_lkm_usb_done_and_defer_other() for non-data pkts.
539 * On error schedule kevent (#3) by calling cp_lkm_usb_defer_kevent()
540 * cp_lkm_usb_recv_complete - called by usb layer when recv is done in hw interrupt context
541 * throw transfer in done q, schedule tasklet or NAPI poll (#4), on error schedule kevent (#3)
542 *
543 * #2 - cp_lkm_usb_probe - called when the usb hub layer detects a plug, called on hub thread context
544 * cp_lkm_usb_disconnect - called when the usb hub layer detects an unplug, called on hub thread context
545 * schedule mgr_kevent to clean up
546 *
547 * #3 - cp_lkm_usb_kevent - scheduled by tx and rx complete (#1) on USB halt errors or out of memory failure. Is a workqueue thread
548 * clears the halts, sees if memory available. On success, schedules the tasklet or NAPI poll(#4)
549 *
550 * #4 - cp_lkm_usb_process_data_done_tasklet - Scheduled by rx or tx complete (#1). Runs in soft int context. This function is used when we
551 * are using a non-NAPI compliant protocol manager (i.e. PPP). It processes recv'd pkts and sends
552 * them onto the protocol manager, frees all sent skb's and restock more recv urbs to the USB layer.
553 * cp_lkm_usb_process_other_done_tasklet -Same as first one except is it scheduled anytime we recv a pkt that needs to go to the common
554 * modem stack instead of to the network stack (ctrl, status or diagnostics pkt)
555 *
556 * #5 - cp_lkm_usb_handle_ioctl - ioctl mux function called by the kernel when the app ioctl is called
557 * calls the appropriate mux function
558 * cp_lkm_usb_plug_intf - called by ioctl mux to register a device. Register a usb driver to catch
559 * the plug event from the usb stack
560 * cp_lkm_usb_open_intf - called by ioctl mux indicate the data channel is active. This causes us to
561 * mux all data packets to the network stack instead of up to cpusb in app space
562 * cp_lkm_usb_close_intf - called by ioctl mux to indicate the data connection has gone down.
563 * This causes us to mux all packets up to cpusb in app space instead of to network
564 *
565 * cp_lkm_usb_unplug_intf - called by ioctl mux. Releases the interface, deregisters the usb driver, cleans up memory
566 * cp_lkm_usb_handle_msg - called by the device driver write function. This is how cpusb sends us usb packets that
567 * we need to send to usb
568 * #6 - cp_lkm_usb_start_xmit - called by the network interface
569 * sends a transmit to the usb layer
570*/
571
572
573struct cp_lkm_usb_dev;
574struct cp_lkm_usb_base_dev;
575
576
577/* we record the state for each of our queued skbs */
578enum skb_state {
579 illegal = 0,
580 out_start, // start a data or other transmit
581 out_done, // data or other transmit done
582 in_data_start, // start a recv (either data or other)
583 in_data_done, // recv data done
584 in_data_cleanup,
585 in_other_start,
586 in_other_done, // recv other done
587 in_other_cleanup,
588 ctrl_start, // start a usb ctrl transfer
589 ctrl_done, // usb ctrl transfer finished
590 unlink_start // telling usb to give our urb back
591};
592
593#define EVENT_TX_HALT 0
594#define EVENT_RX_HALT 1
595#define EVENT_RX_MEMORY 2
596#define EVENT_STS_SPLIT 3
597#define EVENT_LINK_RESET 4
598
599//These are standard USB defines
600#define UE_BULK 0x02
601#define UE_INTERRUPT 0x03
602
603#define MAX_INTF_EPS 10
604
605#define CP_LKM_USB_RECV 0x01
606#define CP_LKM_USB_LISTEN 0x02
607
608struct cp_lkm_base_ep
609{
610 struct list_head list; // for inserting in the cpbdev list of base endpoints
611 struct list_head eps; // list of cloned endpoints based off this one
612 struct cp_lkm_usb_base_dev* cpbdev; // pointer back to the cpdev this endpoint belongs to
613 int ep_num; // endpoint number
614 unsigned long err_flags; // errors on the ep (halt, no mem)
615 int con_flags; //connection flags (recv, listen)
616 int q_cnt; //number of urbs down at the lower layer
617 int type; //ep type (interrupt, bulk etc)
618 int max_transfer_size;
619 int pipe;
620 int interval; // interval for interrupt end points
621};
622
623struct cp_lkm_ep
624{
625 struct list_head list_bep; // for being inserted into the bep's list of eps
626 struct list_head list_cpdev; // for being inserted into the cpdev's list of eps
627 struct cp_lkm_base_ep* bep; // pointer to this ep's base endpoint
628 struct cp_lkm_usb_dev* cpdev; // pointer back to the cpdev this endpoint belongs to
629 int con_flags; //connection flags (recv, listen)
630 int ep_num; // duplicated from base endpoint for convenience
631};
632
633/* This struct gets stored in skb->cb which is currently a 48 byte buffer
634 The size of this struct needs to not ever be bigger than 48
635*/
636struct skb_data {
637 //if pointers and ints are 64 bits (8 bytes) then this is 48 bytes currently and
638 //no other variables can be added
639 struct urb *urb;
640 struct cp_lkm_usb_base_dev *cpbdev;
641 struct cp_lkm_base_ep* bep;
642 enum skb_state state;
643 int status;
644 int unique_id; //id of cpdev that sent the tx pkt
645};
646
647#define MAX_USB_DRVR_NAME 10
648#define USB_DRVR_FRMT_STR "cpusb%d"
649
650struct cp_lkm_usb_base_dev
651{
652 struct list_head list; //for inserting in global dev list
653 struct list_head cpdev_list; //list of cpdevs cloned from this base dev
654 struct list_head in_bep_list; // list of base in endpoints
655 struct list_head out_bep_list; // list of base out endpoints
656 int data_in_bep_num; //data in ep number
657 int data_out_bep_num; //data out ep number
658
659 struct usb_driver* usb_driver;
660 struct usb_device_id* usb_id_table;
661 int vid;
662 int pid;
663 int intf_num;
664 int alt_intf_num;
665 int usb_bus;
666 int usb_addr;
667 int feature_flags;
668 int base_id; //unique id of the first clone to plug
669 cp_lkm_usb_state_t base_state;
670
671 struct sk_buff_head in_q; //recv skb's are stored here while down at usb waiting to be filled with recv data
672 struct sk_buff_head out_q; //send skb's are stored here while down at usb waiting to be transmitted
673 struct sk_buff_head ctrlq; //ctrl skb's are stored here while down at usb waiting to be filled or transmitted
674 struct sk_buff_head data_tx_done; //tx skb's are stored here while waiting to be freed
675 struct sk_buff_head data_rx_done; //recv and ctrl skb's are stored here while waiting to have recv data processed
676 struct sk_buff_head other_done; //sent skb's are stored here while waiting to be freed
677
678 u32 data_q_len; // holds count of data pkts (both rx and tx) needing to be processed
679 spinlock_t data_q_lock; // lock to keep data_q_len sync'd
680 spinlock_t processing_state_lock;
681 cp_lkm_usb_process_state_t processing_state;
682 spinlock_t other_state_lock;
683 cp_lkm_usb_process_state_t other_state;
684 bool scheduled; //tasklet scheduled to process the pending
685
686 struct tasklet_struct other_process_tasklet;
687 struct tasklet_struct data_process_tasklet;
688
689 int rx_schedule_threshold;
690 int tx_schedule_threshold;
691 int tx_resume_threshold;
692
693 struct work_struct kevent;
694 char usb_drvr_name[MAX_USB_DRVR_NAME];
695 void* wrapper_ctxt;
696 int wrapper_hdr_size;
697 int pm_hdr_size;
698 int pm_hdr_offset;
699
700 struct usb_interface* intf;
701 struct usb_device *udev;
702
703 int plug_result;
704 bool disconnect_wait;
705
706 struct timer_list rx_delay;
707
708 int tx_usb_q_count;
709 bool tx_paused;
710
711 struct timer_list usb_pause_stuck_timer;
712 int tx_proc_cnt; //how many data tx pkts have we successfully sent
713 int tx_proc_cnt_at_pause; //how many data tx pkts we had sent when we paused
714
715 #if 0
716 //debug stuff, comment out
717 //unsigned int dbg_total_stuck_cnt;
718 //unsigned int dbg_total_tx_at_stuck_cnt;
719 //unsigned int dbg_total_tx_proc;
720 #endif
721};
722
723struct cp_lkm_usb_dev
724{
725 //init at open
726 struct cp_lkm_usb_base_dev* cpbdev;
727 int unique_id;
728 int pm_id;
729 int clone_num;
730 int mux_id;
731
732 cp_lkm_usb_state_t state;
733 struct list_head list; //for inserting in base dev list
734
735 struct cp_lkm_edi* edi;
736
737 struct list_head in_ep_list; //list of in endpoints on the dev
738 struct list_head out_ep_list; //list of out endpoints on the dev
739 int data_in_ep_num; //data in ep number
740 int data_out_ep_num; //data out ep number
741
742 //for debug
743 #if 0
744 struct timer_list dbg_timer;
745 unsigned int dbg_total_rx_irq;
746 unsigned int dbg_total_tx_irq;
747 unsigned int dbg_total_rx_proc;
748 unsigned int dbg_total_d_done;
749 unsigned int dbg_total_o_done;
750 unsigned int dbg_total_pause;
751 unsigned int dbg_total_resume;
752 unsigned int dbg_total_max_work;
753 unsigned int dbg_total_timeout;
754 unsigned int dbg_total_budget;
755 unsigned int dbg_total_o_tasklet;
756 unsigned int dbg_total_d_resched;
757 unsigned int dbg_total_wq_sched;
758 unsigned int dbg_total_napi_sched;
759 unsigned int dbg_total_tasklet_sched;
760 unsigned int dbg_total_d_comp;
761 //unsigned int dbg_total_ic;
762 //unsigned int dbg_total_tc;
763 unsigned int dbg_total_rx_qlen;
764 unsigned int dbg_total_tx_qlen;
765 unsigned int dbg_total_num_hybrid_t;
766 unsigned int dbg_total_num_normal_t;
767 unsigned int dbg_total_num_hybrid;
768 unsigned int dbg_total_num_normal;
769 unsigned int dbg_total_num_d_timers;
770 unsigned int dbg_total_sch_sk;
771 #endif
772};
773
774struct cp_lkm_usb_ctx
775{
776 struct cp_lkm_common_ctx common;
777 struct list_head dev_list;
778 spinlock_t lock; //used to protect access to dev_list from different instances. Also used to coordinate thread accesses from usb and cpmodem layers.
779 //when one thread grabs the lock, no other threads can run (soft and hw IRQs can still run). The usb hub unplug handler runs on a thread.
780 //this means if one thread grabs the lock it can be guaranteed the modem can unplug while it is doing its thing.
781};
782
783//static void cp_lkm_usb_dbg_memleak_timer (unsigned long param);
784//static void cp_lkm_usb_dbg_timer (unsigned long param);
785
786enum {
787 CP_LKM_STUCK_INIT = 0,
788 CP_LKM_STUCK_START,
789 CP_LKM_STUCK_STOP,
790 CP_LKM_STUCK_DEINIT
791};
792static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action);
Kyle Swenson9b510922023-06-27 09:22:55 -0600793#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
794static void cp_lkm_usb_pause_stuck_timer(unsigned long param);
795static void cp_lkm_usb_delay_timer (unsigned long param);
796#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +0000797static void cp_lkm_usb_pause_stuck_timer(struct timer_list *timer);
Harish Ambati2e2e7b32023-02-22 14:21:36 +0000798static void cp_lkm_usb_delay_timer (struct timer_list *timer);
Kyle Swenson9b510922023-06-27 09:22:55 -0600799#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -0700800static void cp_lkm_usb_kevent (struct work_struct *work);
801static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx);
802static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx);
803static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
804static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
805
806static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb);
807static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb, int src, struct cp_lkm_ep* ep);
808static void cp_lkm_usb_xmit_complete (struct urb *urb);
809static int cp_lkm_usb_submit_recv (struct cp_lkm_usb_base_dev* cpbdev, struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data);
810static void cp_lkm_usb_recv_complete (struct urb *urb);
811
812static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
813static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb);
814static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in);
815
816static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci);
817static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep);
818
819static void cp_lkm_usb_process_other_done_tasklet (unsigned long param);
820static void cp_lkm_usb_process_data_done_tasklet (unsigned long param);
821static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpdev);
822static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev);
823static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work);
824static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_resume, bool have_lock);
825
826static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep);
827static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in);
828static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev);
829
830static struct cp_lkm_usb_ctx cp_lkm_usb_mgr;
831
832// Knobs we can tweak on a processor by processor basis to maximize performance
833// Dummy values filled in here so we don't get warning on using unitialized variables
834static int CP_LKM_PM_NAPI_WEIGHT = 0; //budget we register with NAPI (max number of pkts it thinks we will process).
835static int CP_LKM_USB_NAPI_MAX_WORK = 0; //actual number of pkts we will process (we're not entirely honest with NAPI)
836static int CP_LKM_USB_MAX_RX_QLEN = 0; //max number of rx data URBs we allow to flow in the shim (we alloc these)
837static int CP_LKM_USB_MAX_OTHER_QLEN = 0; //max number of rx urbs on non-data endpoints
838static int CP_LKM_USB_TX_PAUSE_Q_PKTS = 0; //max number of tx data URBs we allow to flow in the shim (alloc'd by network stack, we control this by pausing)
839static int CP_LKM_USB_TX_RESUME_Q_PKTS = 0; //un-pause network at this number
840//static int CP_LKM_USB_TX_RESUME_Q_PKTS_HYBRID = 0; //un-pause network at this number when in hybrid mode with pkt counting
841static int CP_LKM_USB_TX_SCHED_CNT = 0; //How many done tx's we allow to accumulate before scheduling cleanup in normal mode
842//static int CP_LKM_USB_TX_SCHED_CNT_HYBRID = 0; //How many done tx's we allow to accumulate before scheduling cleanup in hybrid mode with pkt counting
843static int CP_LKM_USB_RX_SCHED_CNT = 0; //How many done rx's we allow to accumulate before scheduling processing in normal mode
844//static int CP_LKM_USB_RX_SCHED_CNT_HYBRID = 0; //How many done rx's we allow to accumulate before scheduling processing in hybrid mode with pkt counting
845static int CP_LKM_USB_RESTOCK_MULTIPLE = 0; //How many rx URBs we should restock as we process them (0 means don't restock as we go, 1 means every one, 2 means 1 out of every 2 etc)
846//static int CP_LKM_USB_DATA_MAX_PPS = 0; //Packets per second that will cause us to transition from normal to hybrid mode when using pkt counting
847//static int CP_LKM_USB_DATA_MIN_PPS = 0; //packets per second that will cause us to transition from hybrid back to normal when using pkt counting
848static int CP_LKM_USB_TASKLET_CNT = 0; //in hybrid mode, schedule tasklet on cnts 0 to this number
849static int CP_LKM_USB_WORKQUEUE_CNT = 0; //in hybrid mode, schedule workqueue on cnts CP_LKM_USB_TASKLET_CNT to this number, then start cnt over
850static int CP_LKM_USB_PROCESS_DIVISOR = 0; //times to loop through the process loop, doing pkts/divisor pkts each time. Set to 1 to only process what was there when entering
851//broadcom EHCI controller has issues we need to work around
852static int cp_lkm_is_broadcom = 0;
853
854#define CP_LKM_USB_PAUSED_CNT 5000
855
856//TODO remove
857#if 0
858static int g_dbg_data_skballoc_cnt = 0;
859static int g_dbg_other_skballoc_cnt = 0;
860static int g_dbg_ctrl_skballoc_cnt = 0;
861static int g_dbg_xmit_skballoc_cnt = 0;
862static int g_dbg_urballoc_cnt = 0;
863static int g_dbg_unplug_cnt = 0;
864static void cp_lkm_usb_urb_cnt(int inc)
865{
866 unsigned long flags;
867 spin_lock_irqsave(&dbg_state_lock, flags);
868 g_dbg_urballoc_cnt += inc;
869 spin_unlock_irqrestore(&dbg_state_lock, flags); //release lock so interrupts can resume firing
870}
871static void cp_lkm_usb_cnts(int state, int inc)
872{
873 #if 1
874 unsigned long flags;
875 spin_lock_irqsave(&dbg_state_lock, flags);
876
877 switch (state) {
878 case in_other_start:
879 case in_other_done:
880 case in_other_cleanup:
881 g_dbg_other_skballoc_cnt+=inc;
882 break;
883 case ctrl_start:
884 case ctrl_done:
885 g_dbg_ctrl_skballoc_cnt+=inc;
886 break;
887 case out_start:
888 case out_done:
889 g_dbg_xmit_skballoc_cnt+=inc;
890 break;
891 case in_data_start:
892 case in_data_done:
893 case in_data_cleanup:
894 g_dbg_data_skballoc_cnt+=inc;
895 break;
896 case unlink_start:
897 g_dbg_unplug_cnt+=inc;
898 break;
899 default:
900 printk("!!clean: unknown skb state: %d\n",state);
901 break;
902 }
903 spin_unlock_irqrestore(&dbg_state_lock, flags);
904 #endif
905}
906#endif
907
908static struct cp_lkm_usb_dev* cp_lkm_usb_find_muxed_dev(struct cp_lkm_usb_base_dev* cpbdev, int mux_id)
909{
910 struct list_head *pos;
911 list_for_each(pos, &cpbdev->cpdev_list){
912 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
913 //printk("%s() cpdev: %p, cpdev->mux_id: %d\n", __FUNCTION__, cpdev, cpdev->mux_id);
914 if(cpdev->mux_id == mux_id) {
915 return cpdev;
916 }
917 }
918 return NULL;
919}
920
921static struct cp_lkm_usb_dev* cp_lkm_usb_find_dev(int uniqueid)
922{
923 struct list_head *bpos;
924 struct list_head *pos;
925 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
926 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
927 list_for_each(pos, &cpbdev->cpdev_list){
928 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
929 if(cpdev->unique_id == uniqueid) {
930 return cpdev;
931 }
932 }
933 }
934 return NULL;
935}
936
937#define CP_LKM_DEV_MATCH_ALL 1
938#define CP_LKM_DEV_MATCH_BUS_ADDR_ONLY 2
939
940// Find base device from its bus, addr and unique id
941static struct cp_lkm_usb_base_dev* cp_lkm_usb_find_base_dev(int bus, int addr, int unique_id, int match)
942{
943 struct list_head *pos;
944 struct list_head *bpos;
945 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
946 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
947 if(cpbdev->usb_bus == bus && cpbdev->usb_addr == addr) {
948 if (match == CP_LKM_DEV_MATCH_BUS_ADDR_ONLY) {
949 return cpbdev;
950 }
951 if (cpbdev->base_id == unique_id) {
952 //matches the base_id so don't need to look further
953 return cpbdev;
954 }
955 //look to see if matches the unique_id of one of the cpdevs (only hit this case when running clones)
956 list_for_each(pos, &cpbdev->cpdev_list){
957 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
958 if (cpdev->unique_id == unique_id) {
959 return cpbdev;
960 }
961 }
962 }
963 }
964 return NULL;
965}
966
967/*
968static struct cp_lkm_usb_dev* cp_lkm_usb_get_head_dev(void)
969{
970 struct list_head *bpos;
971 struct list_head *pos;
972 list_for_each(bpos, &cp_lkm_usb_mgr.dev_list){
973 struct cp_lkm_usb_base_dev* cpbdev = list_entry(bpos, struct cp_lkm_usb_base_dev, list);
974 list_for_each(pos, &cpbdev->cpdev_list){
975 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
976 return cpdev;
977 }
978 }
979 return NULL;
980}
981*/
982
983// pause or unpause all cpdevs associated with this cpbdev
984static void cp_lkm_usb_dev_pause(struct cp_lkm_usb_base_dev* cpbdev, bool pause)
985{
986 struct list_head *pos;
987
988 list_for_each(pos, &cpbdev->cpdev_list){
989 struct cp_lkm_usb_dev* cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
990 if (pause) {
991 if(cpdev->edi->pm_send_pause) {
992 cpdev->edi->pm_send_pause(cpdev->edi->pm_recv_ctx);
993 //cpdev->dbg_total_pause++;
994 }
995 }
996 else{
997 if (cpdev->edi->pm_send_resume) {
998 //cpdev->dbg_total_resume++;
999 cpdev->edi->pm_send_resume(cpdev->edi->pm_recv_ctx);
1000 }
1001 }
1002 }
1003 cpbdev->tx_paused = pause;
1004}
1005
1006static void cp_lkm_usb_clean_list(struct sk_buff_head* list)
1007{
1008 struct sk_buff *skb;
1009 struct skb_data *entry;
1010
1011 while((skb = skb_dequeue(list)) != NULL){
1012 DEBUG_TRACE("%s() found a straggler", __FUNCTION__);
1013 entry = (struct skb_data *) skb->cb;
1014 if(entry->urb) {
1015 //cp_lkm_usb_urb_cnt(-1);
1016 usb_free_urb (entry->urb);
1017 }
1018 //cp_lkm_usb_cnts(entry->state, -1);
1019 dev_kfree_skb_any(skb);
1020 }
1021}
1022
1023static void cp_lkm_usb_mark_as_dead(struct cp_lkm_usb_dev* cpdev)
1024{
1025 cpdev->edi->usb_send_ctx = NULL;
1026 if(cpdev->state != CP_LKM_USB_DEAD) {
1027 LOG("Device with id:%d unplugged", cpdev->unique_id);
1028 }
1029 cpdev->state = CP_LKM_USB_DEAD;
1030}
1031
1032static void cp_lkm_usb_mark_base_as_dead(struct cp_lkm_usb_base_dev* cpbdev)
1033{
1034 cpbdev->base_state = CP_LKM_USB_DEAD;
1035}
1036
1037static struct cp_lkm_base_ep* cp_lkm_usb_get_bep(struct cp_lkm_usb_base_dev* cpbdev, int ep_num)
1038{
1039 struct cp_lkm_base_ep* bep = NULL;
1040 struct list_head *entry, *nxt, *head;
1041
1042 if(USB_DIR_IN & ep_num) {
1043 //printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1044 head = &cpbdev->in_bep_list;
1045 }
1046 else{
1047 //printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1048 head = &cpbdev->out_bep_list;
1049 }
1050
1051 list_for_each_safe(entry, nxt, head) {
1052 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1053 if (bep->ep_num == ep_num) {
1054 //printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
1055 return bep;
1056 }
1057 }
1058 //printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
1059
1060 return NULL;
1061}
1062
1063static struct cp_lkm_ep* cp_lkm_usb_get_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
1064{
1065 struct cp_lkm_ep* ep = NULL;
1066 struct list_head *entry, *nxt, *head;
1067
1068 if(USB_DIR_IN & ep_num) {
1069 //printk("%s() search IN list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1070 head = &cpdev->in_ep_list;
1071 }
1072 else{
1073 //printk("%s() search OUT list for ep_num: 0x%x\n", __FUNCTION__, ep_num);
1074 head = &cpdev->out_ep_list;
1075 }
1076
1077 list_for_each_safe(entry, nxt, head) {
1078 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1079 if (ep->ep_num == ep_num) {
1080 //printk("%s() found ep_num: %d\n", __FUNCTION__, ep_num);
1081 return ep;
1082 }
1083 }
1084 //printk("%s() didn't find ep_num: %d\n", __FUNCTION__,ep_num);
1085
1086 return NULL;
1087}
1088
1089static void cp_lkm_usb_bep_finalize(void *arg)
1090{
1091 struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)arg;
1092 struct list_head *entry, *nxt;
1093 struct cp_lkm_ep *ep;
1094
1095 //printk("%s() start\n", __FUNCTION__);
1096 //todo remove
1097 //del_timer_sync(&cpdev->dbg_timer);
1098
1099 //printk("%s() - free eps\n",__FUNCTION__);
1100 list_for_each_safe(entry, nxt, &bep->eps) {
1101 ep = list_entry(entry, struct cp_lkm_ep, list_bep);
1102 //printk("%s() - free ep: %p from bep: %p\n",__FUNCTION__,ep,bep);
1103 list_del(&ep->list_bep);
1104 memref_deref(ep);
1105 }
1106
1107}
1108
1109static void cp_lkm_usb_ep_finalize(void *arg)
1110{
1111 //struct cp_lkm_ep* ep = (struct cp_lkm_ep*)arg;
1112 //printk("%s() - free ep: %p, ep_num: 0x%x\n",__FUNCTION__,arg ,ep->ep_num);
1113}
1114
1115static struct cp_lkm_ep* cp_lkm_usb_create_ep(struct cp_lkm_usb_dev* cpdev, int ep_num)
1116{
1117 struct cp_lkm_ep* ep;
1118 struct cp_lkm_base_ep* bep;
1119 struct cp_lkm_usb_base_dev* cpbdev;
1120
1121 DEBUG_ASSERT(cpdev, "cpdev is null");
1122 cpbdev = cpdev->cpbdev;
1123 DEBUG_ASSERT(cpbdev, "cpbdev is null");
1124
1125 //see if already exists first
1126 ep = cp_lkm_usb_get_ep(cpdev, ep_num);
1127 if(ep) {
1128 DEBUG_TRACE("%s() ep: %p already exists", __FUNCTION__, ep);
1129 //printk("%s() ep: 0x%x already exists\n", __FUNCTION__, ep_num);
1130 return ep;
1131 }
1132 //printk("%s() - create new ep, cpdev: %p, ep_num: 0x%x\n",__FUNCTION__,cpdev, ep_num);
1133
1134 //Need to create new ep and possibly a new bep. We will alloc and init everything first and
1135 //then if that all works, we will put everything in its proper place (in lists and stuff)
1136 ep = memref_alloc_and_zero(sizeof(struct cp_lkm_ep), cp_lkm_usb_ep_finalize);
1137 if(!ep) {
1138 DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
1139 return NULL;
1140 }
1141 INIT_LIST_HEAD(&ep->list_bep);
1142 INIT_LIST_HEAD(&ep->list_cpdev);
1143 ep->ep_num = ep_num;
1144
1145 //may need to create a new base ep if this is the first time we've seen this endpoint number and direction
1146 //this is always the case for non-cloned interfaces
1147 bep = cp_lkm_usb_get_bep(cpbdev, ep_num);
1148 if (!bep) {
1149 bep = memref_alloc_and_zero(sizeof(struct cp_lkm_base_ep), cp_lkm_usb_bep_finalize);
1150 if(!bep) {
1151 DEBUG_ERROR("%s() failed to alloc new ep", __FUNCTION__);
1152 memref_deref(ep);
1153 return NULL;
1154 }
1155 //printk("%s() - create new bep: %p, cpbdev: %p, ep_num: 0x%x\n",__FUNCTION__,bep, cpbdev, ep_num);
1156 bep->ep_num = ep_num;
1157 bep->cpbdev = cpbdev;
1158 INIT_LIST_HEAD(&bep->list);
1159 INIT_LIST_HEAD(&bep->eps);
1160 if(USB_DIR_IN & ep_num) {
1161 list_add_tail(&bep->list, &cpbdev->in_bep_list);
1162 }
1163 else{
1164 list_add_tail(&bep->list, &cpbdev->out_bep_list);
1165 }
1166 }
1167
1168 //if we get here, everything alloc'd ok, so can insert in lists and stuf
1169
1170 // Each ep will have two memrefs, one from the alloc which is for entry in the cpdev list
1171 // and another for entry into the bep list. This way the ep won't be freed until it is removed
1172 // from both lists at unplug time
1173 ep->cpdev = cpdev;
1174 ep->bep = bep;
1175 if(USB_DIR_IN & ep_num) {
1176 list_add_tail(&ep->list_cpdev, &cpdev->in_ep_list);
1177 }
1178 else{
1179 list_add_tail(&ep->list_cpdev, &cpdev->out_ep_list);
1180 }
1181 memref_ref(ep);
1182 list_add_tail(&ep->list_bep, &bep->eps);
1183 return ep;
1184
1185}
1186
1187// cp_lkm_usb_plug_intf is called by cpusb via the ioctl. It registers a driver for the interface.
1188// This function is then called by the lower usb layer so we can claim that interface.
1189int cp_lkm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1190{
1191 struct cp_lkm_usb_base_dev* cpbdev;
1192 struct usb_device* udev;
1193 struct usb_host_interface* interface;
1194 int unique_id;
1195 //unsigned long flags;
1196 int rc;
1197 uintptr_t tmp_uid;
1198
1199 usb_get_intf(intf);
1200
1201 //printk("%s()\n",__FUNCTION__);
1202
1203 udev = interface_to_usbdev (intf);
1204 interface = intf->cur_altsetting;
1205
1206 unique_id = (int)id->driver_info;
1207 tmp_uid = unique_id;
1208 spin_lock(&cp_lkm_usb_mgr.lock);
1209
1210 // Error scenario to watch for here:
1211 // 1. Device unplugs and replugs before the upper app detects the unplug and calls our unplug_intf. In
1212 // this case this driver is still registered and will get the new probe (we don't want this, we want the app driver
1213 // to get the plug and claim the device orginally). When disconnect happens we set the state to DEAD. If we get
1214 // a probe on a dead device, don't take it.
1215 cpbdev = cp_lkm_usb_find_base_dev(udev->bus->busnum, udev->devnum, unique_id, CP_LKM_DEV_MATCH_ALL);
1216 if(!cpbdev || cpbdev->base_state == CP_LKM_USB_DEAD) {
1217 spin_unlock(&cp_lkm_usb_mgr.lock);
1218
1219 DEBUG_TRACE("%s() no cpdev or already dead", __FUNCTION__);
1220 return -ENXIO;
1221 }
1222
1223 //make sure it is for our device (match the usb addresses)
1224 //printk("%s() id: %d ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!\n", __FUNCTION__, unique_id,
1225 // cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
1226 if(cpbdev->usb_bus != udev->bus->busnum || cpbdev->usb_addr != udev->devnum || cpbdev->intf_num != interface->desc.bInterfaceNumber) {
1227 spin_unlock(&cp_lkm_usb_mgr.lock);
1228
1229 DEBUG_TRACE("%s() reject ourbus: %d, probebus: %d, ouraddr:%d, probeaddr:%d, ourintf:%d, probeintf:%d!", __FUNCTION__,
1230 cpbdev->usb_bus, udev->bus->busnum, cpbdev->usb_addr,udev->devnum,cpbdev->intf_num,interface->desc.bInterfaceNumber);
1231 return -ENXIO;
1232 }
1233 cpbdev->intf = intf;
1234 cpbdev->udev = udev;
1235
1236 spin_unlock(&cp_lkm_usb_mgr.lock);
1237
1238 if(cpbdev->alt_intf_num) {
1239 rc = usb_set_interface(udev, cpbdev->intf_num, cpbdev->alt_intf_num);
1240 if(rc) {
1241 DEBUG_ERROR("%s() set intf failed :%d", __FUNCTION__,rc);
1242 cpbdev->plug_result = -1; //only set this on failure, not reject
1243 return -1;
1244 }
1245 }
1246
1247 spin_lock(&cp_lkm_usb_mgr.lock);
1248 cpbdev->base_state = CP_LKM_USB_CTRL;
1249
1250 usb_set_intfdata(intf, (void*)tmp_uid);
1251 usb_get_dev (udev);
1252 memref_ref(cpbdev);
1253 spin_unlock(&cp_lkm_usb_mgr.lock);
1254
1255 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_INIT);
1256
1257 //throughput control stuff
1258 cpbdev->rx_schedule_threshold = CP_LKM_USB_RX_SCHED_CNT;
1259 cpbdev->tx_schedule_threshold = CP_LKM_USB_TX_SCHED_CNT;
1260 cpbdev->tx_resume_threshold = CP_LKM_USB_TX_RESUME_Q_PKTS;
1261
1262
1263 //todo remove
1264 //if (!dbg_memleak_timer_started) {
1265 // dbg_memleak_timer_started = 1;
1266 // dbg_memleak_timer.function = cp_lkm_usb_dbg_memleak_timer;
1267 // dbg_memleak_timer.data = 0;
1268
1269 // init_timer(&dbg_memleak_timer);
1270 // mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(20000));
1271 //}
1272 //if (dbg_state_init == 0) {
1273 // spin_lock_init(&dbg_state_lock);
1274 // dbg_state_init = 1;
1275 //}
1276
1277
1278
1279 DEBUG_TRACE("%s() probe done", __FUNCTION__);
1280 return 0;
1281}
1282
1283static bool cp_lkm_usb_shuter_down_do_pm_unlink(void* ctx1, void* ctx2)
1284{
1285 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1286 struct cp_lkm_usb_dev* cpdev;
1287 struct list_head *pos;
1288 unsigned long flags;
1289 //Unlink from the pm and disable the data state machine
1290 bool done = false;
1291 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
1292 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
1293 cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //data soft interrupt handlers now won't run
1294
1295 spin_lock(&cpbdev->data_q_lock);
1296 cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT;
1297 spin_unlock(&cpbdev->data_q_lock); //usb hw interrupts now won't schedule soft interrupt handlers
1298
1299 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags); //release lock so interrupts can resume firing
1300 //unlink the pm side for all cpdevs associated with this cpbdev. Once this returns we are guaranteed not to get any new xmit skb's from the pm
1301 list_for_each(pos, &cpbdev->cpdev_list){
1302 cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
1303 LOG("Unlink cpdev: %p from pm", cpdev);
1304 cp_lkm_pm_usb_link(cpdev->edi, cpdev->pm_id, 0);
1305 cpdev->edi->usb_send_ctx = NULL;
1306 }
1307
1308 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
1309 done = true;
1310 }
1311 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
1312 return done;
1313}
1314
1315static bool cp_lkm_usb_shuter_down_do_other_tasklet(void* ctx1, void* ctx2)
1316{
1317 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1318 unsigned long flags;
1319 bool done = false;
1320 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
1321 if(cpbdev->other_state == USB_PROCESS_STATE_IDLE){
1322 cpbdev->other_state = USB_PROCESS_STATE_PAUSED;
1323 done = true;
1324 }
1325 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
1326 return done;
1327}
1328
1329static bool cp_lkm_usb_shuter_down_do_empty_queues(void* ctx1, void* ctx2)
1330{
1331 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1332 bool done = false;
1333
1334 if (skb_queue_empty(&cpbdev->in_q) &&
1335 skb_queue_empty(&cpbdev->out_q) &&
1336 skb_queue_empty(&cpbdev->ctrlq)){
1337 done = true;
1338 }
1339 return done;
1340}
1341
1342static void cp_lkm_usb_shuter_down(struct cp_lkm_usb_base_dev* cpbdev)
1343{
1344 struct list_head *entry, *nxt;
1345 struct cp_lkm_base_ep *bep;
1346
1347
1348 //printk("%s() done\n", __FUNCTION__);
1349
1350 //Unlink from the pm and disable the data state machine
1351 LOG("Unlink cpdev from pm");
1352 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_pm_unlink, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to unlink pm from cpdev");
1353
1354 //disable the 'other' tasklet
1355 LOG("Disable cpdev other tasklet");
1356 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_other_tasklet, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to shutdown cpdev other tasklet");
1357
1358 //Once we get here no xmits can happen or any recv or xmit done processing can happen so no new kevents can be scheduled
1359 //so we can stop them here
1360 //clear all the flags before flushing the kevents so that we won't try to do anything during the kevent callback
1361 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
1362 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1363 bep->err_flags = 0;
1364 bep->con_flags = 0;
1365 }
1366 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
1367 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1368 bep->err_flags = 0;
1369 bep->con_flags = 0;
1370 }
1371
1372 //This forces the kernel to run all scheduled kevents, so any of our pending ones will run. (Note: Make sure
1373 //our kevent handlers check to see if we are attached before doing anything so that we don't schedule anything new while
1374 //shutting down)
1375 LOG("Cancel cpdev kevents");
1376 cancel_work_sync(&cpbdev->kevent);
1377
1378 //Make sure all the urbs have been cancelled
1379 // ensure there are no more active urbs
1380 //set_current_state(TASK_UNINTERRUPTIBLE);
1381 //these cause the urbs to be cancelled and the callbacks to be called. The urbs are removed from
1382 //the queues in the callbacks.
1383 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, NULL);
1384 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, NULL);
1385 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->ctrlq, NULL);
1386
1387 LOG("Wait for all cpdev urbs to be returned");
1388 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_shuter_down_do_empty_queues, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to empty cpdev queues");
1389
1390 //shutdown timer and tasklets
1391 LOG("Shutdown cpdev timers and tasklets");
1392 del_timer_sync (&cpbdev->rx_delay);
1393 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_DEINIT);
1394
1395 tasklet_kill(&cpbdev->data_process_tasklet);
1396 tasklet_kill(&cpbdev->other_process_tasklet);
1397
1398 // All outstanding transfers are back, so now we can clean up.
1399 cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
1400 cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
1401 cp_lkm_usb_clean_list(&cpbdev->other_done);
1402
1403 //printk("%s() done\n", __FUNCTION__);
1404 usb_set_intfdata(cpbdev->intf, NULL);
1405 usb_put_intf(cpbdev->intf);
1406 cpbdev->intf = NULL;
1407 LOG("cpdev unplug done");
1408
1409 return;
1410
1411}
1412
1413// Called when the USB hub detects that our device just unplugged.
1414// Called in a thread context. We do the lower usb cleanup here because there
1415// are some things that have to be done before exiting from disconnect.
1416// We don't clean up the upper layer stuff because the upper layer doesn't yet know
1417// we are unplugged and will continue to send us data. When the upper layer gets the
1418// unplug notify, it will call cp_lkm_usb_unplug_intf. We finish cleaning up in there.
1419void cp_lkm_usb_disconnect(struct usb_interface *intf)
1420{
1421 struct cp_lkm_usb_dev* cpdev;
1422 struct cp_lkm_usb_base_dev* cpbdev;
1423 //unsigned long flags;
1424 int unique_id;
1425
1426 // We don't want this function to run at the same time as any of the calls from the modem common stack (ioctl and write)
1427 // They all grab this lock for the duration of their calls. They also check the state of the device before proceeding.
1428 // Once we have the lock, we know none of them are running. Any new calls will block waiting on the lock.
1429 // If we then change the state to dead we can release the lock while we do the rest of cleanup. When they get the lock
1430 // they will see the state is dead and error out and return immediately. This prevents us from blocking the common modem thread.
1431 spin_lock(&cp_lkm_usb_mgr.lock);
1432
1433 //If cpdev is not in intf, then this is the close->disconnect path, so do nothing
1434 unique_id = (uintptr_t)usb_get_intfdata(intf);
1435
1436 //struct usb_device *udev;
1437 //printk("%s() start, id: %d\n", __FUNCTION__, unique_id);
1438
1439 //see if device already went away, this should be impossible
1440 //the unique id is always for the first instance if running clones
1441 cpdev = cp_lkm_usb_find_dev(unique_id);
1442 if(!cpdev) {
1443 //printk("%s() no cpdev, id: %d\n", __FUNCTION__, unique_id);
1444 spin_unlock(&cp_lkm_usb_mgr.lock);
1445 return;
1446 }
1447 cpbdev = cpdev->cpbdev;
1448 cpbdev->disconnect_wait = true;
1449
1450 // Mark the device as dead so we won't start anything new.
1451 // NOTE: make sure nothing new can be started on the USB side from this point on.
1452 // This includes transmits from the network. Transmits from cpusb.
1453 // Recv packets, halt clears, ioctls etc
1454 cp_lkm_usb_mark_base_as_dead(cpbdev);
1455
1456 // Once device is marked dead, we can release the semaphore. This is so write and ioctl from the modem stack
1457 // can return quickly with errors instead of blocking while the disconnect completes.
1458 spin_unlock(&cp_lkm_usb_mgr.lock);
1459
1460 cp_lkm_usb_shuter_down(cpbdev);
1461
1462 cpbdev->disconnect_wait = false;
1463 memref_deref(cpbdev);
1464
1465 //printk("%s() done id: %d\n", __FUNCTION__,unique_id);
1466}
1467
1468static void cp_lkm_usb_base_dev_finalize(void *arg)
1469{
1470 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)arg;
1471 struct list_head *entry, *nxt;
1472 struct cp_lkm_base_ep *bep;
1473 //int unique_id = cpbdev->base_id;
1474 //printk("%s()\n", __FUNCTION__);
1475
1476 //if was added to the list, need to remove it.
1477 if(cpbdev->list.next != &cpbdev->list) {
1478 spin_lock(&cp_lkm_usb_mgr.lock);
1479 list_del(&cpbdev->list);
1480 //printk("%s() free cpbdev from global list \n", __FUNCTION__);
1481 spin_unlock(&cp_lkm_usb_mgr.lock);
1482 }
1483
1484 //These should already be empty, but just in case
1485 //printk("%s() clean lists\n", __FUNCTION__);
1486 cp_lkm_usb_clean_list(&cpbdev->in_q);
1487 cp_lkm_usb_clean_list(&cpbdev->out_q);
1488 cp_lkm_usb_clean_list(&cpbdev->ctrlq);
1489 cp_lkm_usb_clean_list(&cpbdev->data_tx_done);
1490 cp_lkm_usb_clean_list(&cpbdev->data_rx_done);
1491 cp_lkm_usb_clean_list(&cpbdev->other_done);
1492
1493 if(cpbdev->wrapper_ctxt) {
1494 //printk("%s() free wrapper\n", __FUNCTION__);
1495 cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
1496 cpbdev->wrapper_ctxt = NULL;
1497 }
1498 if(cpbdev->usb_driver) {
1499 //printk("%s() free driver\n", __FUNCTION__);
1500 kfree(cpbdev->usb_driver);
1501 cpbdev->usb_driver = NULL;
1502 }
1503 if(cpbdev->usb_id_table) {
1504 //printk("%s() free id table\n", __FUNCTION__);
1505 kfree(cpbdev->usb_id_table);
1506 cpbdev->usb_id_table = NULL;
1507 }
1508 if(cpbdev->udev) {
1509 //printk("%s() free udev\n", __FUNCTION__);
1510 usb_put_dev (cpbdev->udev);
1511 cpbdev->udev = NULL;
1512 }
1513
1514 //printk("%s() - free eps\n",__FUNCTION__);
1515 list_for_each_safe(entry, nxt, &cpbdev->cpdev_list) {
1516 struct cp_lkm_usb_dev* cpdev = list_entry(entry, struct cp_lkm_usb_dev, list);
1517 //printk("%s() - free cpdev: %p from cpbdev: %p\n",__FUNCTION__, cpdev, cpbdev);
1518 list_del(&cpdev->list);
1519 memref_deref(cpdev);
1520 }
1521 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
1522 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1523 //printk("%s() - free in bep: %p from cpbdev: %p\n",__FUNCTION__,bep, cpbdev);
1524 list_del(&bep->list);
1525 memref_deref(bep);
1526 }
1527 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
1528 bep = list_entry(entry, struct cp_lkm_base_ep, list);
1529 //printk("%s() - free out bep: %p from cpbdev: %p\n ",__FUNCTION__,bep, cpbdev);
1530 list_del(&bep->list);
1531 memref_deref(bep);
1532 }
1533 //printk("%s() done base_id: %d\n", __FUNCTION__,unique_id);
1534
1535}
1536
1537static void cp_lkm_usb_dev_finalize(void *arg)
1538{
1539 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)arg;
1540 struct list_head *entry, *nxt;
1541 struct cp_lkm_ep *ep;
1542
1543 //printk("%s() start\n", __FUNCTION__);
1544 //todo remove
1545 //del_timer_sync(&cpdev->dbg_timer);
1546
1547 //printk("%s() - free eps\n",__FUNCTION__);
1548 list_for_each_safe(entry, nxt, &cpdev->in_ep_list) {
1549 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1550 //printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
1551 list_del(&ep->list_cpdev);
1552 memref_deref(ep);
1553 }
1554 list_for_each_safe(entry, nxt, &cpdev->out_ep_list) {
1555 ep = list_entry(entry, struct cp_lkm_ep, list_cpdev);
1556 //printk("%s() - free ep: %p, num: %d from cpdev: %p\n",__FUNCTION__,ep, ep->ep_num, cpdev);
1557 list_del(&ep->list_cpdev);
1558 memref_deref(ep);
1559 }
1560
1561 if(cpdev->edi) {
1562 //printk("%s() free edi\n", __FUNCTION__);
1563 cpdev->edi->usb_send_ctx = NULL;
1564 cpdev->edi->usb_send = NULL;
1565
1566 memref_deref(cpdev->edi);
1567 cpdev->edi = NULL;
1568 }
1569
1570 //printk("%s() end \n", __FUNCTION__);
1571}
1572
1573static int cp_lkm_usb_plug_intf(struct cp_lkm_usb_plug_intf* pi)
1574{
1575 int retval;
1576 struct cp_lkm_usb_dev* cpdev = NULL;
1577 struct cp_lkm_usb_base_dev* cpbdev = NULL;
1578 bool need_new;
1579 bool is_cloneable;
1580
1581 //Make sure we aren't going to overflow the skb space reserved for us to use
1582 //DEBUG_ASSERT(sizeof(struct skb_data) < sizeof(((struct sk_buff*)0)->cb));
1583 //DEBUG_INFO("%s(), skb_data size: %d, skb_buff cb size: %d",__FUNCTION__,sizeof(struct skb_data),sizeof(((struct sk_buff*)0)->cb));
1584
1585 // We need to alloc a new cpbdev on plug if:
1586 // 1. The device is not cloned at this layer (thus each plug has its own cpbdev)
1587 // Note: Some devices are cloned at other layers (cpusb_linux.c), so they can be running as clones in the system, but not at this layer.
1588 // This is why we can't just look at the clone_num to determine.
1589 // 2. It is cloneable and clone_num is 0 (only the first clone gets a new cpbdev, the rest share it)
1590 is_cloneable = pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF;
1591 need_new = !is_cloneable || (is_cloneable && pi->clone_num == 0);
1592
1593 //printk("%s() start id:%d vid/pid: 0x%x/0x%x, bus/addr: %d/%d, intf: %d, flags: 0x%x, clone: %d, mux: %d\n", __FUNCTION__, pi->unique_id, pi->vid, pi->pid, pi->bus, pi->addr, pi->intf_num, pi->feature_flags, pi->clone_num, pi->mux_id);
1594
1595 if (need_new) {
1596 //first instance, so need a new cpbdev
1597 cpbdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_base_dev), cp_lkm_usb_base_dev_finalize);
1598 if(!cpbdev) {
1599 //printk("%s() failed to alloc cpbdev\n", __FUNCTION__);
1600 goto init_fail;
1601 }
1602 //printk("%s() id: %d, alloc'd new cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
1603 cpbdev->base_state = CP_LKM_USB_INIT;
1604 cpbdev->vid = pi->vid;
1605 cpbdev->pid = pi->pid;
1606 cpbdev->intf_num = pi->intf_num;
1607 cpbdev->alt_intf_num = pi->alt_intf_num;
1608 cpbdev->usb_bus = pi->bus;
1609 cpbdev->usb_addr = pi->addr;
1610 cpbdev->feature_flags = pi->feature_flags;
1611 cpbdev->base_id = pi->unique_id;
1612 INIT_LIST_HEAD(&cpbdev->in_bep_list);
1613 INIT_LIST_HEAD(&cpbdev->out_bep_list);
1614 INIT_LIST_HEAD(&cpbdev->list);
1615 INIT_LIST_HEAD(&cpbdev->cpdev_list);
1616 cpbdev->data_in_bep_num = pi->ep_in;
1617 cpbdev->data_out_bep_num = pi->ep_out;
1618
1619 //alloc and register the usb driver
1620 cpbdev->usb_driver = kzalloc(sizeof(struct usb_driver), GFP_KERNEL);
1621 if(!cpbdev->usb_driver) {
1622 //printk("%s() failed to alloc driver\n", __FUNCTION__);
1623 goto init_fail;
1624 }
1625
1626 cpbdev->usb_id_table = kzalloc(sizeof(struct usb_device_id)*2, GFP_KERNEL);
1627 if(!cpbdev->usb_id_table) {
1628 //printk("%s() failed to alloc table\n", __FUNCTION__);
1629 goto init_fail;
1630 }
1631
1632 cpbdev->usb_id_table[0].idVendor = cpbdev->vid;
1633 cpbdev->usb_id_table[0].idProduct = cpbdev->pid;
1634 cpbdev->usb_id_table[0].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
1635 cpbdev->usb_id_table[0].driver_info = (unsigned long)pi->unique_id;
1636
1637 //create unique drvr string
1638 sprintf(cpbdev->usb_drvr_name, USB_DRVR_FRMT_STR, pi->unique_id);
1639 cpbdev->usb_driver->name = cpbdev->usb_drvr_name;
1640 cpbdev->usb_driver->probe = cp_lkm_usb_probe;
1641 cpbdev->usb_driver->disconnect = cp_lkm_usb_disconnect;
1642 cpbdev->usb_driver->id_table = cpbdev->usb_id_table;
1643
1644
1645 skb_queue_head_init (&cpbdev->in_q);
1646 skb_queue_head_init (&cpbdev->out_q);
1647 skb_queue_head_init (&cpbdev->ctrlq);
1648 skb_queue_head_init (&cpbdev->data_tx_done);
1649 skb_queue_head_init (&cpbdev->data_rx_done);
1650 skb_queue_head_init (&cpbdev->other_done);
1651 cpbdev->data_q_len = 0;
1652 spin_lock_init(&cpbdev->data_q_lock);
1653 spin_lock_init(&cpbdev->processing_state_lock);
1654 spin_lock_init(&cpbdev->other_state_lock);
1655 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
1656 cpbdev->other_state = USB_PROCESS_STATE_IDLE;
1657 INIT_WORK(&cpbdev->kevent, cp_lkm_usb_kevent);
Kyle Swenson9b510922023-06-27 09:22:55 -06001658#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
1659 cpbdev->rx_delay.function = cp_lkm_usb_delay_timer; //TODO: this needs to handle the cpdev or cpbdev??
1660 cpbdev->rx_delay.data = (unsigned long) cpbdev; //????? should this be cpdev??
1661 init_timer (&cpbdev->rx_delay);
1662#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00001663 timer_setup(&cpbdev->rx_delay, cp_lkm_usb_delay_timer, 0);
Kyle Swenson9b510922023-06-27 09:22:55 -06001664#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07001665 cpbdev->data_process_tasklet.func = cp_lkm_usb_process_data_done_tasklet; //TODO: modify to take cpbdev
1666 cpbdev->data_process_tasklet.data = (unsigned long) cpbdev;
1667
1668 cpbdev->other_process_tasklet.func = cp_lkm_usb_process_other_done_tasklet; //TODO: modify to take cpbdev
1669 cpbdev->other_process_tasklet.data = (unsigned long) cpbdev;
1670
1671 cpbdev->disconnect_wait = false;
1672
1673 spin_lock(&cp_lkm_usb_mgr.lock);
1674 list_add_tail(&cpbdev->list, &cp_lkm_usb_mgr.dev_list);
1675 spin_unlock(&cp_lkm_usb_mgr.lock);
1676
1677 // When we call register, it calls our probe function with all available matching interfaces. In probe
1678 // we save the result of the probe so we can return fail here if it didn't go well
1679 //printk("%s() reg drvr for vid:%x, pid:%x, addr:%d, intf:%d\n", __FUNCTION__, pi->vid,pi->pid,pi->addr,pi->intf_num);
1680 retval = usb_register(cpbdev->usb_driver);
1681 if(retval || cpbdev->plug_result != 0) {
1682 //printk("%s() failed to register driver or probe failed retval:%d, plug_result:%d\n", __FUNCTION__, retval, cpbdev->plug_result);
1683 goto init_fail;
1684 }
1685 cpbdev->base_state = CP_LKM_USB_CTRL;
1686 DEBUG_TRACE("%s() done", __FUNCTION__);
1687 }
1688 else{
1689 //clone, should already have a base dev
1690 cpbdev = cp_lkm_usb_find_base_dev(pi->bus, pi->addr, pi->unique_id, CP_LKM_DEV_MATCH_BUS_ADDR_ONLY);
1691 if(!cpbdev) {
1692 //printk("%s() failed to find cpbdev\n", __FUNCTION__);
1693 goto init_fail;
1694 }
1695 //printk("%s() id: %d, already have cpbdev: %p\n", __FUNCTION__, pi->unique_id, cpbdev);
1696 }
1697
1698 // make sure base dev has all the feature flags of every clone
1699 cpbdev->feature_flags |= pi->feature_flags;
1700
1701 //printk("%s() id: %d, cpbdev: %p, alloc new cpdev\n", __FUNCTION__, pi->unique_id, cpbdev);
1702 cpdev = memref_alloc_and_zero(sizeof(struct cp_lkm_usb_dev), cp_lkm_usb_dev_finalize);
1703 if(!cpdev) {
1704 //printk("%s() failed to alloc cpdev\n", __FUNCTION__);
1705 goto init_fail;
1706 }
1707 //printk("%s() id: %d, cpdev: %p\n", __FUNCTION__, pi->unique_id, cpdev);
1708
1709 INIT_LIST_HEAD(&cpdev->in_ep_list);
1710 INIT_LIST_HEAD(&cpdev->out_ep_list);
1711 INIT_LIST_HEAD(&cpdev->list);
1712 //add to list right away so if anything below fails, it will be cleaned up when cpbdev is cleaned up
1713 list_add_tail(&cpdev->list, &cpbdev->cpdev_list);
1714 cpdev->cpbdev = cpbdev;
1715 cpdev->unique_id = pi->unique_id;
1716 //clone and mux are only used with muxed clone interfaces.
1717 cpdev->clone_num = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->clone_num : 0;
1718 cpdev->mux_id = (pi->feature_flags & CP_LKM_FEATURE_CLONE_MUXED_INTF) ? pi->mux_id : CP_LKM_WRAPPER_DEFAULT_ID;
1719 //printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, pi->unique_id, pi->clone_num, cpdev->mux_id);
1720 cpdev->data_in_ep_num = pi->ep_in;
1721 cpdev->data_out_ep_num = pi->ep_out;
1722 //pre-create the data endpoints so they will be first in the list, since they are most often used
1723 cp_lkm_usb_create_ep(cpdev, pi->ep_in);
1724 cp_lkm_usb_create_ep(cpdev, pi->ep_out);
1725 cpdev->edi = memref_alloc_and_zero(sizeof(struct cp_lkm_edi), NULL);
1726 if(!cpdev->edi) {
1727 //printk("%s() failed to alloc edi\n", __FUNCTION__);
1728 goto init_fail;
1729 }
1730 cpdev->edi->usb_send = cp_lkm_usb_start_xmit;
1731
1732 //for debug, comment out before checkin
1733 //cpdev->dbg_timer.function = cp_lkm_usb_dbg_timer;
1734 //cpdev->dbg_timer.data = (unsigned long)cpdev;
1735 //init_timer(&cpdev->dbg_timer);
1736 //mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(10000));
1737
1738 //TODO CA: I think this shouldn't be set until open, commenting out for now to see if blows chow in plug fest
1739 //cpdev->edi->usb_send_ctx = cpdev;
1740
1741 cpdev->state = CP_LKM_USB_CTRL;
1742
1743 //printk("%s() done success id: %d\n", __FUNCTION__, pi->unique_id);
1744
1745 return 0;
1746
1747init_fail:
1748 if(cpbdev) {
1749 //the finalizer for cpbdev does the clean up
1750 memref_deref(cpbdev);
1751 }
1752 //returning an error to the modem stack on plug will cause it to hard reset
1753 //the modem, thus causing the rest of the driver cleanup to occur
1754 //printk("%s() open_intf fail\n", __FUNCTION__);
1755 return -1;
1756}
1757
1758static int cp_lkm_usb_set_wrapper(struct cp_lkm_usb_set_wrapper* sw)
1759{ //unsigned long flags;
1760 struct cp_lkm_usb_dev* cpdev;
1761 struct cp_lkm_usb_base_dev* cpbdev;
1762 void* wrapper_info = NULL;
1763 unsigned long not_copied;
1764 int res = 0;
1765 //printk("%s() unique_id: %d, clone: %d, mux_id: %d\n", __FUNCTION__, sw->unique_id, sw->clone_num, sw->mux_id);
1766
1767 spin_lock(&cp_lkm_usb_mgr.lock);
1768 cpdev = cp_lkm_usb_find_dev(sw->unique_id);
1769
1770 if(!cpdev) {
1771 spin_unlock(&cp_lkm_usb_mgr.lock);
1772 //printk("%s() no cpdev found for id: %d\n", __FUNCTION__, sw->unique_id);
1773 return -1;
1774 }
1775 cpbdev = cpdev->cpbdev;
1776 if(cpbdev->base_state == CP_LKM_USB_DEAD){
1777 //modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
1778 spin_unlock(&cp_lkm_usb_mgr.lock);
1779 //printk("%s() set_wrapper fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev->state);
1780 return 0;
1781 }
1782
1783// benk - what if wrapper_info_len is 0???
1784 if(cpbdev->wrapper_ctxt){
1785 //already have a wrapper so free it
1786 cp_lkm_wrapper_instance_free(cpbdev->wrapper_ctxt);
1787 }
1788
1789 if(sw->wrapper_info_len) {
1790 wrapper_info = kzalloc(sw->wrapper_info_len, GFP_KERNEL);
1791 if(!wrapper_info) {
1792 DEBUG_ERROR("%s() couldn't alloc wrapper info", __FUNCTION__);
1793 res = -1;
1794 goto set_wrapper_done;
1795 }
1796 }
1797
1798
1799 //copy the wrapper info from user to kernel space
1800 not_copied = copy_from_user(wrapper_info, sw->wrapper_info, sw->wrapper_info_len);
1801 if (not_copied) {
1802 DEBUG_ERROR("%s() couldn't copy wrapper info", __FUNCTION__);
1803 res = -1;
1804 goto set_wrapper_done;
1805 }
1806 //alloc the wrapper instance. On success it takes ownership of the wrapper_info and is responsible for freeing it
1807 DEBUG_INFO("%s() wrapper: %d", __FUNCTION__, sw->wrapper);
1808 cpbdev->wrapper_ctxt = cp_lkm_wrapper_instance_alloc(sw->wrapper, wrapper_info, sw->wrapper_info_len);
1809 if(!cpbdev->wrapper_ctxt){
1810 DEBUG_ERROR("%s() couldn't alloc wrapper", __FUNCTION__);
1811 res = -1;
1812 goto set_wrapper_done;
1813 }
1814 cpbdev->wrapper_hdr_size = cp_lkm_wrapper_hdr_size(cpbdev->wrapper_ctxt);
1815 cp_lkm_wrapper_set_state(cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
1816
1817 cpdev->clone_num = sw->clone_num;
1818 cpdev->mux_id = sw->mux_id;
1819
1820
1821set_wrapper_done:
1822 if(wrapper_info) {
1823 kfree(wrapper_info);
1824 }
1825
1826 spin_unlock(&cp_lkm_usb_mgr.lock);
1827 return res;
1828
1829}
1830
1831static int cp_lkm_usb_set_mux_id(struct cp_lkm_usb_set_mux_id* smi)
1832{ //unsigned long flags;
1833 struct cp_lkm_usb_dev* cpdev;
1834 //struct cp_lkm_usb_base_dev* cpbdev;
1835 int res = 0;
1836
1837 //printk("%s()\n", __FUNCTION__);
1838
1839 spin_lock(&cp_lkm_usb_mgr.lock);
1840 cpdev = cp_lkm_usb_find_dev(smi->unique_id);
1841 if(!cpdev) {
1842 spin_unlock(&cp_lkm_usb_mgr.lock);
1843 //printk("%s() failed to find cpdev for id: %d\n", __FUNCTION__, smi->unique_id);
1844 return -1;
1845 }
1846 if(cpdev->cpbdev->base_state == CP_LKM_USB_DEAD){
1847 //modem is unplugging, upper layer just doesn't know it yet, so act like ok until it finds out
1848 spin_unlock(&cp_lkm_usb_mgr.lock);
1849 return 0;
1850 }
1851 cpdev->mux_id = smi->mux_id;
1852 //printk("%s() unique_id: %d, mux_id: %d\n", __FUNCTION__, smi->unique_id, smi->mux_id);
1853
1854 spin_unlock(&cp_lkm_usb_mgr.lock);
1855 return res;
1856
1857}
1858
1859static int cp_lkm_usb_open_intf(struct cp_lkm_usb_open_intf* oi)
1860{
1861 //unsigned long flags;
1862 struct cp_lkm_usb_dev* cpdev;
1863
1864 //printk("%s() u-uid: %d\n", __FUNCTION__,oi->unique_id);
1865
1866 spin_lock(&cp_lkm_usb_mgr.lock);
1867 cpdev = cp_lkm_usb_find_dev(oi->unique_id);
1868
1869 //if state isn't CP_LKM_USB_CTRL, then the interface either did not plug for some reason (i.e. didn't get probe from usb),
1870 //or it plugged, but then unplugged before open was called.
1871 if(!cpdev || cpdev->cpbdev->base_state != CP_LKM_USB_CTRL) {
1872 spin_unlock(&cp_lkm_usb_mgr.lock);
1873 //printk("%s() open_intf fail cpdev:%p, state:%d\n", __FUNCTION__, cpdev, cpdev?cpdev->state:0xff);
1874 return -1;
1875 }
1876 cpdev->state = CP_LKM_USB_ACTIVE;
1877 cpdev->edi->usb_send_ctx = cpdev; //this allows the network side to call me
1878 cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_ACTIVE);
1879 spin_unlock(&cp_lkm_usb_mgr.lock);
1880 //printk("%s() done\n", __FUNCTION__);
1881 return 0;
1882
1883}
1884
1885static int cp_lkm_usb_close_intf(struct cp_lkm_usb_close_intf* ci)
1886{
1887 //unsigned long flags;
1888 struct cp_lkm_usb_dev* cpdev;
1889
1890 //printk("%s() u-uid: %d\n", __FUNCTION__, ci->unique_id);
1891
1892 //down(&cp_lkm_usb_mgr.thread_sem);
1893 spin_lock(&cp_lkm_usb_mgr.lock);
1894 cpdev = cp_lkm_usb_find_dev(ci->unique_id);
1895
1896 if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_DEAD) {
1897 //device has already unplugged, or is half-unplugged, so don't allow this action to complete
1898 spin_unlock(&cp_lkm_usb_mgr.lock);
1899 //up(&cp_lkm_usb_mgr.thread_sem);
1900 return 0;
1901 }
1902 cpdev->edi->usb_send_ctx = NULL; //disconnect from network side so he won't send me any more data
1903 cpdev->state = CP_LKM_USB_CTRL;
1904 cp_lkm_wrapper_set_state(cpdev->cpbdev->wrapper_ctxt, cpdev->mux_id, CP_LKM_WRAPPER_CTRL);
1905 spin_unlock(&cp_lkm_usb_mgr.lock);
1906 //up(&cp_lkm_usb_mgr.thread_sem);
1907 //printk("%s() done\n", __FUNCTION__);
1908
1909 return 0;
1910}
1911
1912static bool cp_lkm_usb_unplug_do_disconnect_wait(void* ctx1, void* ctx2)
1913{
1914 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev*)ctx1;
1915 bool done = false;
1916 if (cpbdev->disconnect_wait == false){
1917 done = true;
1918 }
1919 return done;
1920}
1921
1922/*
1923 * This function is called when the common modem stack wants to give up the interface.
1924 * There are two scenarios:
1925 * 1. Modem unplugs which leads to the following flow:
1926 * -> cp_lkm_usb_disconnect is called by USB sublayer, it cleans up bottom half of cpdev and waits for common modem stack unplug
1927 * -> common modem stack sees unplug event
1928 * -> it calls this function to finish the cleanup and deregister the driver
1929 * -> we are done
1930 *
1931 * 2. Common modem stack decides to give up the interface due to one common
1932 * modem driver relinquishing the modem and another common modem driver grabbing it.
1933 * This leads to the following flow:
1934 * -> Common modem stack calls this function.
1935 * -> it calls usb_deregister() which will call cp_lkm_usb_disconnect in context
1936 * -> cp_lkm_usb_disconnect shuts down and frees the usb interface
1937 * -> After usb_deregister() exits we finish and exit.
1938 *
1939 * Notes: This means the two shutdown functions, this one and cp_lkm_usb_disconnect can be
1940 * run in any order, so they must not stomp on each other. For example since
1941 * cp_lkm_usb_disconnect frees the interface with the kernel, this function better
1942 * not do anything that requires the interface after calling usb_deregister()
1943 *
1944 * The modem stack is single threaded so this function can never be reentrant
1945 */
1946static int cp_lkm_usb_unplug_intf(struct cp_lkm_usb_unplug_intf* ui)
1947{
1948 //find dev in list by unique id
1949 struct cp_lkm_usb_dev* cpdev;
1950 struct cp_lkm_usb_base_dev* cpbdev;
1951 bool shuter_down = true;
1952 struct list_head *pos;
1953
1954 //printk("%s() start id: %d\n", __FUNCTION__, ui->unique_id);
1955 spin_lock(&cp_lkm_usb_mgr.lock);
1956 //The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
1957 cpdev = cp_lkm_usb_find_dev(ui->unique_id);
1958 if(!cpdev) {
1959 spin_unlock(&cp_lkm_usb_mgr.lock);
1960 return -1;
1961 }
1962 cpbdev = cpdev->cpbdev;
1963
1964 cp_lkm_usb_mark_as_dead(cpdev);
1965
1966 list_for_each(pos, &cpbdev->cpdev_list){
1967 struct cp_lkm_usb_dev* tmp_cpdev = list_entry(pos, struct cp_lkm_usb_dev, list);
1968 if(tmp_cpdev->state != CP_LKM_USB_DEAD) {
1969 //don't shut down until all clone devices have unplugged
1970 shuter_down = false;
1971 break;
1972 }
1973 }
1974
1975 //free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
1976 //which will try and grab the semaphore, so we would be deadlocked
1977 spin_unlock(&cp_lkm_usb_mgr.lock);
1978
1979 if (shuter_down) {
1980 LOG("Wait for cpdev to finish unplugging");
1981 cp_lkm_do_or_die(cpbdev, NULL, cp_lkm_usb_unplug_do_disconnect_wait,CP_LKM_TIMEOUT_MS,CP_LKM_ITER,"cpdev failed to finish disconnecting");
1982
1983 //printk("%s() usb_deregister\n",__FUNCTION__);
1984 usb_deregister(cpbdev->usb_driver);
1985
1986 /* clean up */
1987 memref_deref(cpbdev);
1988
1989 }
1990 /* IMPORTANT: don't do anything other than deref after call to deregister*/
1991
1992 LOG("cpdev done unplugging");
1993
1994 return 0;
1995}
1996
1997/*
1998 * Handle endpoint action requests from modem stack.
1999 *
2000 * Important things to know:
2001 * In normal mode:
2002 * 1. There will be 1 cpdev per cpbdev, and 1 ep per bep.
2003 * 2. Every different ep can either be listened on or recv'd on, but never both at the same time
2004 *
2005 * In clone mode:
2006 * 1. There will be n cpdevs per cpbdev, and n eps ber bep (depending on number of clones).
2007 * 2. Every different ep can either be listened on or recv'd on, but never both at the same time.
2008 * 3. All cloned data eps can be listened on at the same time (data header allows us to mux data between all the data eps, data endpoints don't use recv).
2009 * 4. With all other cloned eps of the same type (AT, CNS, QMI), only one clone can be listened on or recv'd on at a time.
2010 * This is because there are not headers on these channels to let us know where to mux the data to. Fortunately, the
2011 * modem stack enforces this, so we don't have to enforce it here, but we can use it to know how to route cloned packets
2012 * coming in on non-data channel endpoints
2013*/
2014static int cp_lkm_usb_ep_action(struct cp_lkm_usb_ep_action* ea)
2015{
2016 struct cp_lkm_ep* ep;
2017 struct cp_lkm_base_ep* bep = NULL;
2018 struct cp_lkm_usb_dev* cpdev;
2019 struct cp_lkm_usb_base_dev* cpbdev;
2020 //unsigned long flags;
2021 int pump_recv = 0;
2022
2023 //printk("%s() - action: %d, ep_num: 0x%x, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, ea->unique_id);
2024
2025 spin_lock(&cp_lkm_usb_mgr.lock);
2026 //There should always be a device, and it should always be plugged
2027 cpdev = cp_lkm_usb_find_dev(ea->unique_id);
2028 if(!cpdev) {
2029 spin_unlock(&cp_lkm_usb_mgr.lock);
2030 //printk("%s() no device found for unique id: %d\n", __FUNCTION__, ea->unique_id);
2031 return -1;
2032 }
2033
2034 cpbdev = cpdev->cpbdev;
2035 if(cpbdev->base_state == CP_LKM_USB_INIT) {
2036 spin_unlock(&cp_lkm_usb_mgr.lock);
2037 //printk("%s() no probe yet, unique_id: %d, action: %d\n", __FUNCTION__,ea->unique_id,ea->action);
2038 return -1;
2039 }
2040 if(cpbdev->base_state == CP_LKM_USB_DEAD) {
2041 // The device can unplug down here before cpusb knows about it so it can continue to send us stuff.
2042 // The modem will unplug soon so just act like we did it and return ok. I didn't want to
2043 // return an error because that might cause cpusb unnecessary heartburn.
2044 spin_unlock(&cp_lkm_usb_mgr.lock);
2045 //printk("%s() cpdev already dead, shouldn't be doing this: id: %d, action: %d cpbdev: %p, cpdev: %p\n", __FUNCTION__,ea->unique_id,ea->action,cpbdev,cpdev);
2046 return 0;
2047 }
2048 DEBUG_ASSERT(cpbdev, "cpbdev is null");
2049 //create the ep if it doesn't already exist
2050 if(ea->action == EP_ACTION_CREATE) {
2051 cp_lkm_usb_create_ep(cpdev, ea->ep_num);
2052 }
2053
2054 if (ea->action == EP_ACTION_FLUSH_CONTROL) {
2055 ep = NULL;
2056 } else {
2057 ep = cp_lkm_usb_get_ep(cpdev, ea->ep_num);
2058 if(!ep) {
2059 spin_unlock(&cp_lkm_usb_mgr.lock);
2060 //printk("%s() failed to find ep: 0x%x for action: %d\n", __FUNCTION__, ea->ep_num, ea->action);
2061 return -1;
2062 }
2063 bep = ep->bep;
2064 DEBUG_ASSERT(bep,"base ep is null");
2065 }
2066
2067
2068 //if (ep && ea->action != EP_ACTION_RECV) {
2069 // printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
2070 //}
2071
2072 //printk("ea->action: %d, ep_num: %d\n", ea->action, ea->ep_num);
2073 switch(ea->action) {
2074 case EP_ACTION_CREATE:
2075 //printk("%s() - action: %d, ep_num: 0x%x, bep: %p, ep: %p, cpbdev: %p, cpdev: %p, id: %d\n",__FUNCTION__,ea->action, ea->ep_num, bep, ep, bep->cpbdev, ep->cpdev,ea->unique_id);
2076 //initialize endpoint fields
2077 bep->type = ea->ep_type;
2078 bep->max_transfer_size = ea->max_transfer_size;
2079 bep->interval = ea->interval;
2080
2081 DEBUG_ASSERT(cpbdev->udev,"udev is null");
2082 if(bep->ep_num & USB_DIR_IN) { //in
2083 if(bep->type == UE_BULK) {
2084 bep->pipe = usb_rcvbulkpipe(cpbdev->udev,bep->ep_num);
2085 }
2086 else{ //interrupt
2087 bep->pipe = usb_rcvintpipe(cpbdev->udev, bep->ep_num);
2088 }
2089 }
2090 else{ //out
2091 if(bep->type == UE_BULK) {
2092 bep->pipe = usb_sndbulkpipe(cpbdev->udev,bep->ep_num);
2093 }
2094 else{ //interrupt
2095 bep->pipe = usb_sndintpipe(cpbdev->udev, bep->ep_num);
2096 }
2097 }
2098 DEBUG_TRACE("%s() create action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2099 break;
2100
2101 case EP_ACTION_LISTEN:
2102 DEBUG_TRACE("%s() listen action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2103 ep->con_flags |= CP_LKM_USB_LISTEN;
2104 //listen on any endpoint starts listen on base
2105 bep->con_flags |= CP_LKM_USB_LISTEN;
2106 pump_recv = 1;
2107 break;
2108
2109 case EP_ACTION_LISTEN_STOP:
2110 {
2111 bool listen_done = true;
2112 struct list_head *entry, *nxt;
2113 struct cp_lkm_ep *tmp_ep;
2114
2115 DEBUG_TRACE("%s() listen stop action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2116
2117 // the ep is done listening
2118 ep->con_flags &= ~CP_LKM_USB_LISTEN;
2119
2120 //now see if all eps on this bep are done listening
2121 list_for_each_safe(entry, nxt, &bep->eps) {
2122 tmp_ep = list_entry(entry, struct cp_lkm_ep, list_bep);
2123 if(tmp_ep->con_flags & CP_LKM_USB_LISTEN) {
2124 //if any of the eps on the bep still listening, then still listen on the bep
2125 listen_done = false;
2126 break;
2127 }
2128 }
2129 if(listen_done) {
2130 bep->con_flags &= ~CP_LKM_USB_LISTEN;
2131 //If RX_HALT bit set then there is an error on this endpoint and the kevent will be scheduled to fix the error. As part of the fix
2132 //he will unlink the urbs. Bad things can happen if we call cp_lkm_usb_unlink_urbs here at same time the kevent handler is calling it
2133 if(!test_bit (EVENT_RX_HALT, &bep->err_flags)){
2134 //TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
2135 cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->in_q, bep);
2136 }
2137 }
2138 }
2139 break;
2140
2141 case EP_ACTION_RECV:
2142 DEBUG_TRACE("%s() recv action:%d, ep:0x%x, type:%d, pipe:0x%x", __FUNCTION__, ea->action, ea->ep_num, ea->ep_type, bep->pipe);
2143 // can only have one pending recv on a given ep
2144 ep->con_flags |= CP_LKM_USB_RECV;
2145 bep->con_flags |= CP_LKM_USB_RECV;
2146 pump_recv = 1;
2147 break;
2148
2149 case EP_ACTION_FLUSH_CONTROL:
2150 //printk("%s() flush control action:%d\n", __FUNCTION__, ea->action);
2151 //TODO CORY: is it ok to call unlink while holding the global lock?? Can I set a flag and run the tasklet to do the work instead??
2152 //We don't schedule kevents to clear endpoint halts since they are self recovering so we don't need to test the halt bits on the ctrl channel
2153 cp_lkm_usb_unlink_urbs(cpbdev, &cpbdev->ctrlq, NULL);
2154 break;
2155
2156 case EP_ACTION_SET_MAX_TX_SIZE:
2157 //printk("%s() set max tx size to %d on ep: 0x%x\n",__FUNCTION__,ea->max_transfer_size, ea->ep_num);
2158 bep->max_transfer_size = ea->max_transfer_size;
2159 break;
2160
2161 default:
2162 break;
2163 }
2164
2165
2166 if(pump_recv) {
2167 cp_lkm_schedule_rx_restock(cpbdev, bep);
2168 }
2169
2170 spin_unlock(&cp_lkm_usb_mgr.lock);
2171
2172 return 0;
2173}
2174
2175static bool cp_lkm_usb_do_pm_link(void* ctx1, void* ctx2)
2176{
2177 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev*)ctx1;
2178 struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
2179 struct cp_lkm_usb_pm_link* upl = (struct cp_lkm_usb_pm_link*)ctx2;
2180 unsigned long flags;
2181 bool done = false;
2182 int rc;
2183
2184 //printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
2185
2186 // We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
2187 // function pointers that are used by the data processing state machine and by the code that schedules the data
2188 // processing machine.
2189 //
2190 // We need to shut both of those down before doing the linking.
2191 // 1: We shut the machine down by setting the state to USB_PROCESS_STATE_PAUSED.
2192 // 2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
2193 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2194 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2195 cpbdev->processing_state = USB_PROCESS_STATE_PAUSED; //pauses the data processing soft irq handler
2196
2197 spin_lock(&cpbdev->data_q_lock);
2198 cpbdev->data_q_len = CP_LKM_USB_PAUSED_CNT; //stops the hw irq handlers from trying to schedule the soft irq handler
2199 spin_unlock(&cpbdev->data_q_lock);
2200
2201 if(upl->link) {
2202 cpdev->edi->usb_send_ctx = cpdev;
2203 }
2204
2205 //release lock while calling pm since we don't know how long they may take. We have already set the processing_state to
2206 //paused so the soft interrupt routines won't try to do anything so we are safe.
2207 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2208
2209 rc = cp_lkm_pm_usb_link(cpdev->edi, upl->pm_unique_id, upl->link);
2210 DEBUG_ASSERT(rc == 0, "Failed to link usb and pm");
2211
2212 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2213 if(upl->link) {
2214 if (cpdev->edi->pm_get_hdr_size && cpdev->edi->pm_recv_ctx) {
2215 cpdev->edi->pm_get_hdr_size(cpdev->edi->pm_recv_ctx, cpbdev->wrapper_hdr_size, &cpbdev->pm_hdr_size, &cpbdev->pm_hdr_offset);
2216 }
2217 }
2218 else{
2219 cpdev->edi->usb_send_ctx = NULL;
2220 }
2221
2222 cpdev->pm_id = upl->pm_unique_id;
2223
2224 spin_lock(&cpbdev->data_q_lock);
2225 //set things back up properly before re-enabling the soft irq and hardware handlers
2226 cpbdev->data_q_len = cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen; //this must be set before calling schedule_data_process
2227 spin_unlock(&cpbdev->data_q_lock);
2228
2229 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
2230 done = true;
2231 }
2232 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2233
2234 return done;
2235}
2236
2237static int cp_lkm_usb_pm_link(struct cp_lkm_usb_pm_link* upl)
2238{
2239 struct cp_lkm_usb_dev* cpdev;
2240 struct cp_lkm_usb_base_dev* cpbdev;
2241
2242 spin_lock(&cp_lkm_usb_mgr.lock);
2243 //There should always be a device, and it should always be plugged
2244 cpdev = cp_lkm_usb_find_dev(upl->usb_unique_id);
2245
2246 //printk("%s() cpdev: %p, u-uid: %d, pm-uid: %d, up: %d\n", __FUNCTION__, cpdev, upl->usb_unique_id, upl->pm_unique_id, upl->link);
2247
2248 if(!cpdev || cpdev->cpbdev->base_state == CP_LKM_USB_INIT) {
2249 spin_unlock(&cp_lkm_usb_mgr.lock);
2250 //printk("%s() no device or no probe yet\n", __FUNCTION__);
2251 return -1;
2252 }
2253 cpbdev = cpdev->cpbdev;
2254 // The device can unplug down here before cpusb knows about it so it can continue to send us stuff.
2255 // The modem will unplug soon so just act like we did it and return ok. I didn't want to
2256 // return an error because that might cause cpusb unnecessary heartburn.
2257 if(cpbdev->base_state == CP_LKM_USB_DEAD) {
2258 spin_unlock(&cp_lkm_usb_mgr.lock);
2259 //printk("%s() device already unplugged\n", __FUNCTION__);
2260 return 0;
2261 }
2262
2263 //printk("%s() usb id: %d, pm id: %d, link: %d\n", __FUNCTION__, upl->usb_unique_id, upl->pm_unique_id ,upl->link);
2264 // We are getting ready to either link or unlink the usb to the protocol manager. This means we will be changing
2265 // function pointers that are used by the data processing state machine and by the code that schedules the data
2266 // processing machine.
2267 //
2268 // We need to shut both of those down before doing the linking.
2269 // 1: We shut the machine down by setting the state to USB_processing_state_PAUSED.
2270 // 2: We shut down the scheduling by putting the data_q_len to CP_LKM_USB_PAUSED_CNT so the hw interrupts won't schedule a process
2271 cp_lkm_do_or_die(cpdev, upl, cp_lkm_usb_do_pm_link, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "cpdev failed to link with pm");
2272
2273 //printk("%s() done\n", __FUNCTION__);
2274 spin_unlock(&cp_lkm_usb_mgr.lock);
2275 //force a resume
2276 cp_lkm_schedule_data_process(cpbdev, false, true, false);
2277 return 0;
2278}
2279
2280static int cp_lkm_usb_is_alive_intf(struct cp_lkm_usb_is_alive_intf *alivei)
2281{
2282 //find dev in list by unique id
2283 struct cp_lkm_usb_dev *cpdev;
2284 int alive;
2285
2286 //printk("%s() start\n", __FUNCTION__);
2287 spin_lock(&cp_lkm_usb_mgr.lock);
2288 //The device should always exist, but if it doesn't, there is no need to blow up, so exit peacefully
2289 cpdev = cp_lkm_usb_find_dev(alivei->unique_id);
2290
2291 if(!cpdev) {
2292 spin_unlock(&cp_lkm_usb_mgr.lock);
2293 return -1;
2294 }
2295
2296 alive = (cpdev->state == CP_LKM_USB_DEAD) ? -1 : 0;
2297 //free semaphore before calling usb_deregister because it causes disconnect to be called for case 2 in the header comments
2298 //which will try and grab the semaphore, so we would be deadlocked
2299 spin_unlock(&cp_lkm_usb_mgr.lock);
2300
2301 return alive;
2302}
2303static bool cp_lkm_usb_is_attached(struct cp_lkm_usb_dev* cpdev)
2304{
2305 return (cpdev->state == CP_LKM_USB_ACTIVE || cpdev->state == CP_LKM_USB_CTRL);
2306}
2307
2308static bool cp_lkm_usb_is_base_attached(struct cp_lkm_usb_base_dev* cpbdev)
2309{
2310 //base has three possible states: INIT, CTRL, DEAD (it never goes to ACTIVE, only the cpdev's do that)
2311 return cpbdev->base_state == CP_LKM_USB_CTRL;
2312}
2313
2314
2315//
2316// Input:
2317// if_data: set to true if caller only wants to schedule if there is data pending
2318// is_reschedule: set to true if the caller is the scheduled handler to see if it should be rescheduled
2319// have_lock: true if the caller already has the lock
2320//
2321// returns:
2322// true if scheduled new processing
2323// false if didn't schedule.
2324//
2325// Note: returns false if it was currently scheduled
2326static bool cp_lkm_schedule_data_process(struct cp_lkm_usb_base_dev* cpbdev, bool if_data, bool is_reschedule, bool have_lock)
2327{
2328 unsigned long flags;
2329 bool res = false;
2330
2331 if (!have_lock) {
2332 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2333 }
2334
2335 //never schedule processing when we are paused
2336 if (cpbdev->data_q_len == CP_LKM_USB_PAUSED_CNT) {
2337 goto schedule_done;
2338 }
2339
2340 if (is_reschedule) {
2341 cpbdev->scheduled = false;
2342 }
2343
2344 if (cpbdev->scheduled == true) {
2345 goto schedule_done;
2346 }
2347
2348 if (if_data) {
2349 if(!cp_lkm_usb_have_data(cpbdev)){
2350 goto schedule_done;
2351 }
2352 }
2353
2354 cpbdev->scheduled = true;
2355 res = true;
2356
2357 //cpdev->dbg_total_tasklet_sched++;
2358 tasklet_schedule(&cpbdev->data_process_tasklet);
2359
2360schedule_done:
2361 if (!have_lock) {
2362 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2363 }
2364 return res;
2365}
2366
2367static void cp_lkm_schedule_rx_restock(struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep)
2368{
2369 if(bep == NULL) {
2370 cp_lkm_schedule_data_process(cpbdev,false,false,false);
2371 tasklet_schedule(&cpbdev->other_process_tasklet);
2372 }
2373 else if(bep->ep_num == cpbdev->data_in_bep_num) {
2374 //printk("start data ep listen\n");
2375 cp_lkm_schedule_data_process(cpbdev,false,false,false);
2376 }
2377 else{
2378 tasklet_schedule(&cpbdev->other_process_tasklet);
2379 }
2380}
2381
2382#define DATA_SRC_TX 0
2383#define DATA_SRC_RX 1
2384#define DATA_SRC_OTHER 2
2385static void cp_lkm_usb_done_and_defer_data(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb, int src)
2386{
2387 unsigned long flags;
2388
2389 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2390 if(src == DATA_SRC_TX) {
2391 __skb_queue_tail(&cpbdev->data_tx_done, skb);
2392 }
2393 else{
2394 __skb_queue_tail(&cpbdev->data_rx_done, skb);
2395 }
2396 if(cpbdev->data_q_len != CP_LKM_USB_PAUSED_CNT) {
2397 cpbdev->data_q_len++;
2398 cp_lkm_schedule_data_process(cpbdev,true,false,true);
2399 }
2400 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2401
2402}
2403
2404//for non data endpoint pkts
2405static void cp_lkm_usb_done_and_defer_other(struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb)
2406{
2407 unsigned long flags;
2408
2409 spin_lock_irqsave(&cpbdev->other_done.lock, flags);
2410 __skb_queue_tail(&cpbdev->other_done, skb);
2411 //only rearm the softirq if the list was empty
2412 if(cpbdev->other_done.qlen == 1) {
2413 tasklet_schedule(&cpbdev->other_process_tasklet);
2414 }
2415 spin_unlock_irqrestore(&cpbdev->other_done.lock, flags);
2416}
2417
2418static void cp_lkm_usb_process_other_done_tasklet (unsigned long param)
2419{
2420 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2421 struct sk_buff *skb;
2422 struct skb_data *entry;
2423 bool timed_out = false;
2424 unsigned long time_limit = jiffies + 2;
2425 bool can_restock = true;
2426 unsigned long flags;
2427
2428 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
2429 if(cpbdev->other_state != USB_PROCESS_STATE_IDLE){
2430 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2431 return;
2432 }
2433 cpbdev->other_state = USB_PROCESS_STATE_ACTIVE;
2434 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2435
2436 if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
2437 //printk("%s(), cpbdev %p delaying or no longer attached, base_state: %d\n", __FUNCTION__,cpbdev,cpbdev->base_state);
2438 can_restock = false;
2439 }
2440 //cpdev->dbg_total_o_done++;
2441
2442 while(!timed_out) {
2443 skb = skb_dequeue(&cpbdev->other_done);
2444 if(skb == NULL) {
2445 break;
2446 }
2447 entry = (struct skb_data *) skb->cb;
2448
2449 //printk("%s(), other data cpbdev: %p, bep: %p, num: 0x%x\n",__FUNCTION__,cpbdev,entry->bep,(entry->bep?entry->bep->ep_num:0));
2450
2451 //cp_lkm_usb_cnts(entry->state,-1);
2452 switch (entry->state) {
2453 case in_other_done:
2454 if(entry->urb) {
2455 //cp_lkm_usb_urb_cnt(-1);
2456 usb_free_urb (entry->urb);
2457 }
2458 cp_lkm_usb_other_recv_process(cpbdev, skb);
2459 break;
2460 case ctrl_done:
2461 if(entry->urb) {
2462 //cp_lkm_usb_urb_cnt(-1);
2463 usb_free_urb (entry->urb);
2464 }
2465 cp_lkm_usb_ctrl_process(cpbdev, skb);
2466 break;
2467 case out_done:
2468 case in_other_cleanup:
2469 if(entry->urb) {
2470 //cp_lkm_usb_urb_cnt(-1);
2471 usb_free_urb (entry->urb);
2472 }
2473 dev_kfree_skb_any(skb);
2474 break;
2475 case unlink_start:
2476 default:
2477 //printk("!!other: unknown skb state: %d\n",entry->state);
2478 break;
2479 }
2480
2481 if(time_after_eq(jiffies, time_limit)) {
2482 //ran out of time, process this one and then bail
2483 timed_out = true;
2484 }
2485 }
2486
2487 if(can_restock) {
2488 cp_lkm_usb_rx_other_restock(cpbdev);
2489 }
2490
2491 if(timed_out) {
2492 tasklet_schedule(&cpbdev->other_process_tasklet);
2493 }
2494
2495 spin_lock_irqsave(&cpbdev->other_state_lock, flags);
2496 cpbdev->other_state = USB_PROCESS_STATE_IDLE;
2497 spin_unlock_irqrestore(&cpbdev->other_state_lock, flags);
2498
2499 return ;
2500}
2501
2502// Timer callback. This runs in soft interrupt context.
2503//
2504// The call to restock can blow chow (actually when it calls cp_lkm_schedule_data_process)
2505// if an unlink or unplug happens while we are still in the call.
2506//
2507// Unlink or plug can happen during this call on multi core platforms with kernel preemption enabled.
2508// This timer is scheduled if we ran into some unexpected USB error and want
2509// to give the USB endpoint some time before trying to reschedule recv urbs on it.
2510//
2511// The whole purpose of this function is to pump the system if it is otherwise idle. If
2512// it isn't idle, we can count on those processes to call cp_lkm_schedule_rx_restock when done.
Kyle Swenson9b510922023-06-27 09:22:55 -06002513
2514#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
2515static void cp_lkm_usb_delay_timer (unsigned long param)
2516#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002517static void cp_lkm_usb_delay_timer (struct timer_list *timer)
Kyle Swenson9b510922023-06-27 09:22:55 -06002518#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07002519{
2520 unsigned long flags;
2521
Kyle Swenson9b510922023-06-27 09:22:55 -06002522#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
2523 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2524#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002525 struct cp_lkm_usb_base_dev* cpbdev = from_timer(cpbdev,timer,rx_delay);
Kyle Swenson9b510922023-06-27 09:22:55 -06002526#endif
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002527
Kyle Swenson74ad7532023-02-16 11:05:29 -07002528 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2529 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2530 cp_lkm_schedule_rx_restock(cpbdev,NULL);
2531 }
2532 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2533}
2534
2535#if 0
2536static void cp_lkm_usb_dbg_memleak_timer (unsigned long param)
2537{
2538 printk("+=+=+=+=+=!!!!mem: %d, urb: %d, skb: data: %d, other: %d, xmit: %d, ctrl: %d, unplug:%d, stck_cnt: %d, stck_chk: %d, unlink: %d\n",g_dbg_memalloc_cnt,g_dbg_urballoc_cnt,g_dbg_data_skballoc_cnt,g_dbg_other_skballoc_cnt,g_dbg_xmit_skballoc_cnt,g_dbg_ctrl_skballoc_cnt,g_dbg_unplug_cnt,g_stuck_cnt,g_stuck_chk,g_unlink_cnt);
2539 mod_timer(&dbg_memleak_timer, jiffies + msecs_to_jiffies(5000));
2540}
2541#endif
2542
2543
2544/*
2545 * We pause the transmit if there are too many urbs down at the usb layer.
2546 * The Broadcom processor's USB block sometimes gets stuck meaning we will never
2547 * unpause. This function is used to detect if we are paused because of a stuck and
2548 * try to recover it.
2549*/
2550static void cp_lkm_usb_stuck_check(struct cp_lkm_usb_base_dev* cpbdev, int action)
2551{
2552 //only broadcom has the stuck problem
2553 if (cp_lkm_is_broadcom == 0) {
2554 //printk("Not BRCM!!!!\n");
2555 return;
2556 }
2557
2558 //TODO: it seems like this might work fine with clones. I don't think it hurts to be inited,
2559 // started or stopped multiple times??
2560 //g_stuck_chk++;
2561 switch(action) {
2562 case CP_LKM_STUCK_INIT:
Kyle Swenson9b510922023-06-27 09:22:55 -06002563#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
2564 cpbdev->usb_pause_stuck_timer.function = cp_lkm_usb_pause_stuck_timer;
2565 cpbdev->usb_pause_stuck_timer.data = (unsigned long)cpbdev;
2566 init_timer(&cpbdev->usb_pause_stuck_timer);
2567#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002568 timer_setup(&cpbdev->usb_pause_stuck_timer, cp_lkm_usb_pause_stuck_timer, 0);
Kyle Swenson9b510922023-06-27 09:22:55 -06002569#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07002570 break;
2571 case CP_LKM_STUCK_START:
2572 mod_timer(&cpbdev->usb_pause_stuck_timer, jiffies + msecs_to_jiffies(3000));
2573 cpbdev->tx_proc_cnt_at_pause = cpbdev->tx_proc_cnt;
2574 break;
2575 case CP_LKM_STUCK_STOP:
2576 case CP_LKM_STUCK_DEINIT:
2577 del_timer_sync(&cpbdev->usb_pause_stuck_timer);
2578 break;
2579 }
2580}
2581
2582// Broadcom has a problem in the EHCI controller where if it gets a NAK on an out packet
2583// it occassionally doesn't update the status of the URB and retry it. This results in the endpoint getting stuck.
2584// If we detect that it is stuck (if the tx has been paused for more than 3 seconds) then we cancel the
2585// struck urb and this gets things going again. The cancelled urb results in a dropped packet which is undesirable,
2586// but preferrable to being stuck.
Kyle Swenson9b510922023-06-27 09:22:55 -06002587#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
2588static void cp_lkm_usb_pause_stuck_timer (unsigned long param)
2589#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002590static void cp_lkm_usb_pause_stuck_timer (struct timer_list *timer)
Kyle Swenson9b510922023-06-27 09:22:55 -06002591#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07002592{
Kyle Swenson9b510922023-06-27 09:22:55 -06002593#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
2594 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2595#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002596 struct cp_lkm_usb_base_dev* cpbdev = from_timer(cpbdev,timer,usb_pause_stuck_timer);
Kyle Swenson9b510922023-06-27 09:22:55 -06002597#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07002598 struct skb_data *entry;
2599 struct sk_buff *skb;
2600 struct urb *urb = NULL;
2601 unsigned long flags;
2602
2603 spin_lock_irqsave(&cpbdev->out_q.lock, flags);
2604 if (cpbdev->tx_paused) {
2605 // cancel stuck urb?
2606 skb = skb_peek(&cpbdev->out_q);
2607 if (skb) {
2608 entry = (struct skb_data *) skb->cb;
2609 if (entry) {
2610 if(cpbdev->tx_proc_cnt_at_pause == cpbdev->tx_proc_cnt){
2611 //printk("\n!!!!!!Canceling stuck URB, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n", cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
2612 urb = entry->urb;
2613 usb_get_urb(urb);
2614 }
2615 //else{
2616 //some pkts were transmitted successfully while waiting, though not enough to unpause us.
2617 //this means the tx is not stuck, so don't need to cancel anything
2618 //printk("\n!!!!!!Restarting stuck URB timer, cnt at stuck: %d, cnt at unstick: %d!!!!!!!!!!!!!!!!!!!!!!!!!\n",cpbdev->tx_proc_cnt_at_pause, cpbdev->tx_proc_cnt);
2619 //}
2620 // restart just in case this doesn't unpause tx
2621 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
2622 //g_stuck_cnt++;
2623 }
2624 }
2625 }
2626 spin_unlock_irqrestore(&cpbdev->out_q.lock, flags);
2627 if (urb) {
2628 //printk("\n!!!!!!Canceling stuck URB!!!!!!!!!!\n");
2629 //cpbdev->dbg_total_stuck_cnt++;
2630 usb_unlink_urb (urb);
2631 usb_put_urb(urb);
2632 }
2633}
2634
2635#if 0
2636static void cp_lkm_usb_dbg_timer (unsigned long param)
2637{
2638 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
2639 struct cp_lkm_usb_base_dev* cpbdev = cpdev->cpbdev;
2640 printk("!!!!cpdev: %p, clone: %d, id: 0x%x, q_cnt: %d, p: %d, stuck_cnt: %d, tx done: %d, ip_copies: %d!!!!!!!\n",cpdev, cpdev->clone_num,cpdev->mux_id,cpbdev->tx_usb_q_count,cpbdev->tx_paused, cpbdev->dbg_total_stuck_cnt, cpbdev->tx_proc_cnt,num_ip_copies);
2641
2642 //printk("!!!!Stuck urb count: %d, total_pause: %d, cpdev: %p, is_brcm: %d!!!!!!!\n",cpdev->dbg_total_stuck_cnt,cpdev->dbg_total_pause,cpdev,cp_lkm_is_broadcom);
2643 //printk("!!!!!!!!!!!\n");
2644 #if 0
2645 int txa;
2646 int rxa;
2647 int drql;
2648 int dtql;
2649 //int ab;
2650 int tx,rx;
2651 int pkt_avg;
2652 //int epqc, in_q;
2653
2654 cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
2655 cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
2656
2657 //ab = cpdev->dbg_total_budget/(cpdev->dbg_total_d_done+1);
2658 txa = cpdev->dbg_total_tx_proc/(cpdev->dbg_total_d_done+1);
2659 rxa = cpdev->dbg_total_rx_proc/(cpdev->dbg_total_d_done+1);
2660 drql = cpdev->dbg_total_rx_qlen/(cpdev->dbg_total_d_done+1);
2661 dtql = cpdev->dbg_total_tx_qlen/(cpdev->dbg_total_d_done+1);
2662 //epqc = cpdev->in_eps[CP_LKM_DATA_INDEX].q_cnt;
2663 //in_q = cpdev->in_q.qlen;
2664 tx = cpdev->dbg_total_tx_irq;
2665 rx = cpdev->dbg_total_rx_irq;
2666 pkt_avg = (tx+rx)/5;
2667 printk("tot: %d, tx: %d, rx: %d, pa: %d, dones: %d, p: %d\n", tx+rx, tx, rx, pkt_avg, cpdev->dbg_total_d_done, cpdev->dbg_total_pause);
2668 printk("resch: %d, d_c: %d, sch_n: %d, sch_t: %d, sch_wq: %d, sch_sk: %d, ds: %d\n", cpdev->dbg_total_d_resched, cpdev->dbg_total_d_comp, cpdev->dbg_total_napi_sched,cpdev->dbg_total_tasklet_sched, cpdev->dbg_total_wq_sched,cpdev->dbg_total_sch_sk, cpdev->data_state);
2669 printk("txa: %d, rxa: %d, to: %d, HZ:%d \n", txa , rxa, cpdev->dbg_total_timeout, HZ);
2670 printk("nrm_t: %d, blk_t: %d, nrm: %d, blk: %d, ntmrs: %d \n", cpdev->dbg_total_num_normal_t,cpdev->dbg_total_num_hybrid_t,cpdev->dbg_total_num_normal,cpdev->dbg_total_num_hybrid, cpdev->dbg_total_num_d_timers);
2671 printk("psd: %d, tuqc: %d, schd: %d, dql: %d, rql: %d, tql: %d, toq: %d\n",cpdev->tx_paused,cpdev->tx_usb_q_count,cpdev->scheduled,cpdev->data_q_len,cpdev->data_rx_done.qlen,cpdev->data_tx_done.qlen,cpdev->out_q.qlen);
2672 printk("txirq: %d, txprc: %d\n",cpdev->dbg_total_tx_irq, cpdev->dbg_total_tx_proc);
2673
2674 //printk("ipqc: %d, in_q: %d\n", epqc, in_q);
2675 //printk("d0: %p,d1: %p,d2: %p,d3: %p,d4: %p\n", devs[0],devs[1],devs[2],devs[3],devs[4]);
2676 cpdev->dbg_total_d_done = cpdev->dbg_total_d_resched = cpdev->dbg_total_d_comp = 0;
2677 cpdev->dbg_total_pause = cpdev->dbg_total_max_work = cpdev->dbg_total_budget = 0;
2678 cpdev->dbg_total_tx_irq = cpdev->dbg_total_rx_irq = 0;
2679 cpdev->dbg_total_tx_proc = cpdev->dbg_total_rx_proc = 0;
2680 cpdev->dbg_total_rx_qlen = cpdev->dbg_total_tx_qlen = 0;
2681 cpdev->dbg_total_napi_sched=cpdev->dbg_total_tasklet_sched=cpdev->dbg_total_wq_sched=0;
2682 cpdev->dbg_total_num_normal_t=cpdev->dbg_total_num_hybrid_t=cpdev->dbg_total_num_normal=cpdev->dbg_total_num_hybrid=cpdev->dbg_total_num_d_timers = 0;
2683 #endif
2684
2685 mod_timer(&cpdev->dbg_timer, jiffies + msecs_to_jiffies(5000));
2686
2687}
2688#endif
2689
2690
2691//Caller must have the data_q_lock before calling
2692static int cp_lkm_usb_have_data(struct cp_lkm_usb_base_dev *cpbdev)
2693{
2694 //return the amount of work to be done if it exceeds the threshold, else return 0
2695 if(cpbdev->data_rx_done.qlen >= cpbdev->rx_schedule_threshold || cpbdev->data_tx_done.qlen >= cpbdev->tx_schedule_threshold){
2696 return cpbdev->data_rx_done.qlen + cpbdev->data_tx_done.qlen;
2697 }
2698 return 0;
2699}
2700
2701
2702#if 1
2703static int cp_lkm_usb_process_data_done(struct cp_lkm_usb_base_dev *cpbdev, int budget)
2704{
2705 struct sk_buff *skb;
2706 struct skb_data *entry;
2707 struct cp_lkm_usb_dev* cpdev __attribute__((unused));
2708 unsigned long time_limit = jiffies + 3;
2709 int retval;
2710 int restock = 0;
2711 unsigned long flags;
2712 int rx_work_done = 0;
2713 int tx_work_done = 0;
2714 int work_done = 0;
2715 int can_restock = 1;
2716 int i;
2717 int loop;
2718 int num_proc;
2719 int actual_budget;
2720 int num_rx;
2721 int num_tx;
2722 struct sk_buff_head done_q;
2723 bool paused;
2724
2725 skb_queue_head_init (&done_q);
2726
2727 //cpdev->dbg_total_d_done++;
2728 //cpdev->dbg_total_budget += budget;
2729 //cpdev->dbg_total_rx_qlen += cpdev->data_rx_done.qlen;
2730 //cpdev->dbg_total_tx_qlen += cpdev->data_tx_done.qlen;
2731
2732 // if the delay timer is running, we aren't supposed to send any more recv urbs to the usb layer.
2733 // if the device has detached, we need to finish processing done pkts, but don't resubmit any new urbs
2734 if (timer_pending(&cpbdev->rx_delay) || !cp_lkm_usb_is_base_attached(cpbdev)) {
2735 //printk("%s(), cpdev delaying or no longer attached\n", __FUNCTION__);
2736 can_restock = 0;
2737 }
2738
2739 paused = cpbdev->tx_paused;
2740
2741 actual_budget = CP_LKM_USB_NAPI_MAX_WORK;
2742 for(loop=0;loop<CP_LKM_USB_PROCESS_DIVISOR;loop++) {
2743 if(time_after_eq(jiffies, time_limit)) {
2744 //ran out of time, process this one and then bail
2745 work_done = budget;
2746 //cpdev->dbg_total_timeout++;
2747 break;
2748 }
2749 //keep restocking the q until we max out the budget or timeout or runout
2750 if(rx_work_done >= actual_budget || (paused && tx_work_done >= actual_budget)) {
2751 work_done = budget;
2752 break;
2753 }
2754 spin_lock_irqsave(&cpbdev->data_q_lock, flags);
2755 num_rx = cpbdev->data_rx_done.qlen;
2756 num_tx = cpbdev->data_tx_done.qlen;
2757 num_proc = max(num_rx,num_tx);
2758 num_proc = min(num_proc,actual_budget/CP_LKM_USB_PROCESS_DIVISOR); //grab 1/divisor of remaining budget each time
2759 // Note: A unit of work for the shim is either a lone tx, a lone rx or a combo of a rx and a tx.
2760 // Here we calculate how much work to do on this poll. If there was work left over from last time
2761 // finish processing it.
2762 for(i = 0; i < num_proc; i++) {
2763 skb = __skb_dequeue (&cpbdev->data_rx_done);
2764 if(skb){
2765 cpbdev->data_q_len--;
2766 __skb_queue_tail(&done_q, skb);
2767 }
2768 skb = __skb_dequeue (&cpbdev->data_tx_done);
2769 if(skb){
2770 cpbdev->data_q_len--;
2771 __skb_queue_tail(&done_q, skb);
2772 }
2773 }
2774 spin_unlock_irqrestore(&cpbdev->data_q_lock, flags);
2775
2776 //nothing in the q, we are done
2777 if(done_q.qlen == 0) {
2778 break;
2779 }
2780
2781 while((skb = __skb_dequeue(&done_q))){
2782 entry = (struct skb_data *) skb->cb;
2783 //cp_lkm_usb_cnts(entry->state,-1);
2784 switch (entry->state) {
2785 case in_data_done:
2786 //cpdev->dbg_total_rx_proc++;
2787 entry->bep->q_cnt--;
2788 restock++;
2789 rx_work_done++;
2790 work_done++;
2791 if(can_restock && restock == CP_LKM_USB_RESTOCK_MULTIPLE) {
2792 restock = 0;
2793
2794 retval = cp_lkm_usb_submit_recv (cpbdev, entry->urb, GFP_ATOMIC, entry->bep, true);
2795 if (retval < 0) {
2796 //printk("%s(), can't resubmit\n", __FUNCTION__);
2797 //cp_lkm_usb_urb_cnt(-1);
2798 usb_free_urb (entry->urb);
2799 can_restock = 0;
2800 }
2801 }
2802 else{
2803 //cp_lkm_usb_urb_cnt(-1);
2804 usb_free_urb (entry->urb);
2805 }
2806 cp_lkm_usb_data_recv_process(cpbdev, skb);
2807 break;
2808 case out_done:
2809 work_done++;
2810 tx_work_done++;
2811 //fall through on purpose
Kyle Swenson9b510922023-06-27 09:22:55 -06002812#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,164))
Harish Ambati2e2e7b32023-02-22 14:21:36 +00002813 fallthrough;
Kyle Swenson9b510922023-06-27 09:22:55 -06002814#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07002815 case in_data_cleanup:
2816 if(entry->urb) {
2817 //cp_lkm_usb_urb_cnt(-1);
2818 usb_free_urb (entry->urb);
2819 }
2820 dev_kfree_skb_any(skb);
2821 break;
2822
2823 case unlink_start:
2824 default:
2825 //printk("!!data: unknown skb state: %d\n",entry->state);
2826 break;
2827 }
2828 }
2829 }
2830
2831 //restock recv urbs to usb layer if we processed any
2832 if(can_restock) {
2833 cp_lkm_usb_rx_data_restock(cpbdev);
2834 }
2835
2836 //see if we need to resume the tx side
2837 if(tx_work_done) {
2838 spin_lock_irqsave (&cpbdev->out_q.lock, flags);
2839 cpbdev->tx_proc_cnt += tx_work_done;
2840
2841 if(tx_work_done > cpbdev->tx_usb_q_count) {
2842 cpbdev->tx_usb_q_count = 0;
2843 }
2844 else{
2845 cpbdev->tx_usb_q_count -= tx_work_done;
2846 }
2847 if(cpbdev->tx_usb_q_count <= cpbdev->tx_resume_threshold) {
2848 if(cpbdev->tx_paused){
2849 //unpause all cpdevs
2850 cp_lkm_usb_dev_pause(cpbdev, false);
2851 // cancel usb_pause_stuck_timer
2852 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_STOP);
2853 }
2854
2855 }
2856 spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
2857 }
2858
2859 //if(work_done > cpdev->dbg_total_max_work){
2860 // cpdev->dbg_total_max_work = work_done;
2861 //}
2862
2863 //can't return greater than the passed in budget
2864 if(work_done > budget) {
2865 work_done = budget;
2866 }
2867
2868 return work_done;
2869 //return 1;
2870}
2871#endif
2872
2873static int cp_lkm_usb_common_process_data_done(struct cp_lkm_usb_base_dev* cpbdev, int budget)
2874{
2875 unsigned long flags;
2876 int work_done = -1;
2877 bool rescheduled;
2878 bool ran_data_done = false;
2879 if(NULL == cpbdev) {
2880 //printk("%s() !!!!!!!!!!!!!!!!no ctxt\n", __FUNCTION__);
2881 return work_done;
2882 }
2883
2884 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2885 if(cpbdev->processing_state == USB_PROCESS_STATE_IDLE){
2886 cpbdev->processing_state = USB_PROCESS_STATE_ACTIVE;
2887 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2888 work_done = cp_lkm_usb_process_data_done(cpbdev, budget);
2889 spin_lock_irqsave(&cpbdev->processing_state_lock, flags);
2890 ran_data_done = true;
2891 cpbdev->processing_state = USB_PROCESS_STATE_IDLE;
2892 }
2893 spin_unlock_irqrestore(&cpbdev->processing_state_lock, flags);
2894 if (ran_data_done) {
2895 rescheduled = cp_lkm_schedule_data_process(cpbdev,true,true,false);
2896 if (rescheduled) {
2897 work_done = budget;
2898 //cpdev->dbg_total_d_resched++;
2899 }
2900 else if(work_done){
2901 work_done--;
2902 //cpdev->dbg_total_d_comp++;
2903 }
2904 }
2905 else{
2906 //cpdev->dbg_total_sch_sk++;
2907 }
2908 return work_done;
2909}
2910
2911
2912static void cp_lkm_usb_process_data_done_tasklet (unsigned long param)
2913{
2914 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)param;
2915
2916 cp_lkm_usb_common_process_data_done(cpbdev, CP_LKM_PM_NAPI_WEIGHT);
2917}
2918
2919
2920static void cp_lkm_usb_rx_data_restock (struct cp_lkm_usb_base_dev* cpbdev)
2921{
2922 //struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)param;
2923 //int cur_token;
2924 struct urb *urb;
2925 //int ep_index;
2926 int q_len;
2927 struct cp_lkm_base_ep* bep;
2928 int retval;
2929 int q_cnt;
2930
2931 // timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
2932 if (timer_pending(&cpbdev->rx_delay)) {
2933 return;
2934 }
2935
2936 // restock the recv queues on any ep's that are listening
2937 bep = cp_lkm_usb_get_bep(cpbdev, cpbdev->data_in_bep_num);
2938 if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
2939 return;
2940 }
2941 if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
2942 return;
2943 }
2944
2945 if(bep->con_flags & CP_LKM_USB_RECV) {
2946 //only post 1 for recv's
2947 q_len = 1;
2948 }
2949 else{
2950 //its a listen
2951 q_len = CP_LKM_USB_MAX_RX_QLEN;
2952 }
2953
2954 // Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
2955 // there is a problem with usb, so only try up to q_len times to insert them.
2956 retval = 0;
2957 q_cnt = bep->q_cnt;
2958
2959 while(q_cnt < q_len) {
2960 urb = usb_alloc_urb (0, GFP_ATOMIC);
2961 if (!urb) {
2962 if (q_cnt == 0) {
2963 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
2964 }
2965 break;
2966 }
2967 //cp_lkm_usb_urb_cnt(1);
2968 retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, true);
2969 if (retval < 0) {
2970 //cp_lkm_usb_urb_cnt(-1);
2971 usb_free_urb (urb);
2972 break;
2973 }
2974 q_cnt++;
2975 }
2976}
2977
2978static void cp_lkm_usb_rx_other_restock (struct cp_lkm_usb_base_dev* cpbdev)
2979{
2980 struct urb *urb;
2981 int q_len;
2982 struct cp_lkm_base_ep* bep;
2983 int retval;
2984 int q_cnt;
2985 struct list_head *entry, *nxt;
2986
2987 // timer_pending means we had an error and are waiting for a recovery period before submitting any more rx urbs
2988 if (timer_pending(&cpbdev->rx_delay)) {
2989 return;
2990 }
2991
2992 // restock the recv queues on any ep's that are listening
2993 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
2994 bep = list_entry(entry, struct cp_lkm_base_ep, list);
2995 if(!(bep->con_flags & CP_LKM_USB_LISTEN) && !(bep->con_flags & CP_LKM_USB_RECV)) {
2996 continue;
2997 }
2998 if(test_bit (EVENT_RX_HALT, &bep->err_flags)){
2999 continue;
3000 }
3001 if(bep->ep_num == cpbdev->data_in_bep_num) {
3002 continue;
3003 }
3004
3005 if(bep->con_flags & CP_LKM_USB_RECV) {
3006 //only post 1 for recv's
3007 q_len = 1;
3008 }
3009 else{
3010 //its a listen
3011 q_len = CP_LKM_USB_MAX_OTHER_QLEN;
3012 }
3013
3014 // Try to q up to q_len recv buffs with usb. We may not be able to get to that amount if
3015 // there is a problem with usb, so only try up to q_len times to insert them.
3016 retval = 0;
3017 q_cnt = bep->q_cnt;
3018
3019 while(q_cnt < q_len) {
3020 urb = usb_alloc_urb (0, GFP_ATOMIC);
3021 if (!urb) {
3022 if (q_cnt == 0) {
3023 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
3024 }
3025 break;
3026 }
3027 //cp_lkm_usb_urb_cnt(1);
3028 retval = cp_lkm_usb_submit_recv (cpbdev, urb, GFP_ATOMIC, bep, false);
3029 if (retval < 0) {
3030 //cp_lkm_usb_urb_cnt(-1);
3031 usb_free_urb (urb);
3032 break;
3033 }
3034 q_cnt++;
3035 }
3036 }
3037}
3038
3039//unlink all urbs with the given ep, or all if ep is NULL
3040static int cp_lkm_usb_unlink_urbs (struct cp_lkm_usb_base_dev *cpbdev, struct sk_buff_head *q, struct cp_lkm_base_ep* bep)
3041{
3042 unsigned long flags;
3043 struct sk_buff *skb;
3044 int count = 0;
3045
3046 spin_lock_irqsave (&q->lock, flags);
3047 while (!skb_queue_empty(q)) {
3048 struct skb_data *entry;
3049 struct urb *urb;
3050 int retval;
3051
3052 skb_queue_walk(q, skb) {
3053 entry = (struct skb_data *) skb->cb;
3054 urb = entry->urb;
3055 if(urb && (entry->state != unlink_start) && (entry->bep == bep || bep == NULL)) {
3056 goto found;
3057 }
3058 }
3059 break;
3060found:
3061 entry->state = unlink_start;
3062
3063 /*
3064 * Get reference count of the URB to avoid it to be
3065 * freed during usb_unlink_urb, which may trigger
3066 * use-after-free problem inside usb_unlink_urb since
3067 * usb_unlink_urb is always racing with .complete
3068 * handler(include defer_bh).
3069 */
3070 usb_get_urb(urb);
3071 spin_unlock_irqrestore(&q->lock, flags);
3072 // during some PM-driven resume scenarios,
3073 // these (async) unlinks complete immediately
3074 //usb_kill_urb(urb);
3075 retval = usb_unlink_urb (urb);
3076 //g_unlink_cnt++;
3077 if (retval != -EINPROGRESS && retval != 0){
3078 //netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
3079 } else{
3080 count++;
3081 }
3082 usb_put_urb(urb);
3083 spin_lock_irqsave(&q->lock, flags);
3084 }
3085 spin_unlock_irqrestore (&q->lock, flags);
3086 return count;
3087}
3088
3089
3090static void cp_lkm_usb_defer_kevent (struct cp_lkm_usb_base_dev* cpbdev, struct cp_lkm_base_ep* bep, int work)
3091{
3092 set_bit (work, &bep->err_flags);
3093 if (!schedule_work (&cpbdev->kevent)) {
3094 //deverr (dev, "kevent %d may have been dropped", work);
3095 } else {
3096 //devdbg (dev, "kevent %d scheduled", work);
3097 }
3098}
3099
3100// Workqueue callback function. This runs in thread context
3101static void cp_lkm_usb_kevent (struct work_struct *work)
3102{
3103 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)container_of(work, struct cp_lkm_usb_base_dev, kevent);
3104 int status;
3105 struct cp_lkm_base_ep* bep;
3106 struct list_head *entry, *nxt;
3107
3108
3109 //grab global lock while testing dev state so it can't change on us.
3110 spin_lock(&cp_lkm_usb_mgr.lock);
3111 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3112 spin_unlock(&cp_lkm_usb_mgr.lock);
3113 return;
3114 }
3115
3116 //don't want to hold global lock while doing this since don't know how long this will take, see next note
3117 spin_unlock(&cp_lkm_usb_mgr.lock);
3118
3119
3120 //NOTE: if kernel preemption is enabled and the disconnect gets called right here, bad things could happen if the cpdev->udev
3121 // is released. Fortunately, cp_lkm_usb_disconnect() calls cancel_work_sync() before releasing it. This will either cancel this
3122 // function if it isn't currently running, or will wait until it exits before returning if it is running. This protects us.
3123
3124 list_for_each_safe(entry, nxt, &cpbdev->out_bep_list) {
3125 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3126 /* usb_clear_halt() needs a thread context */
3127 if (test_bit (EVENT_TX_HALT, &bep->err_flags)) {
3128 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->out_q, bep);
3129 status = usb_clear_halt (cpbdev->udev, bep->pipe);
3130 DEBUG_TRACE("%s() EVENT_TX_HALT status:%d", __FUNCTION__, status);
3131 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
3132 //if (netif_msg_tx_err (dev))
3133 // deverr (dev, "can't clear tx halt, status %d",
3134 DEBUG_TRACE("%s() failed EVENT_TX_HALT status:%d", __FUNCTION__, status);
3135 // status);
3136 } else {
3137 clear_bit (EVENT_TX_HALT, &bep->err_flags);
3138 //if (status != -ESHUTDOWN)
3139 // netif_wake_queue (dev->net);
3140 }
3141 }
3142 }
3143
3144 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
3145 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3146 if (test_bit (EVENT_RX_HALT, &bep->err_flags)) {
3147 cp_lkm_usb_unlink_urbs (cpbdev, &cpbdev->in_q, bep);
3148 status = usb_clear_halt (cpbdev->udev, bep->pipe);
3149 DEBUG_TRACE("%s() EVENT_RX_HALT status:%d", __FUNCTION__, status);
3150 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) {
3151 DEBUG_TRACE("%s() failed EVENT_RX_HALT status:%d", __FUNCTION__, status);
3152 //if (netif_msg_rx_err (dev))
3153 // deverr (dev, "can't clear rx halt, status %d",
3154 // status);
3155 } else {
3156 clear_bit (EVENT_RX_HALT, &bep->err_flags);
3157 //grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
3158 spin_lock(&cp_lkm_usb_mgr.lock);
3159 if (cp_lkm_usb_is_base_attached(cpbdev)){
3160 cp_lkm_schedule_rx_restock(cpbdev,bep);
3161 }
3162 spin_unlock(&cp_lkm_usb_mgr.lock);
3163
3164 }
3165 }
3166 }
3167 /* tasklet could resubmit itself forever if memory is tight */
3168 list_for_each_safe(entry, nxt, &cpbdev->in_bep_list) {
3169 bep = list_entry(entry, struct cp_lkm_base_ep, list);
3170 if (test_bit (EVENT_RX_MEMORY, &bep->err_flags)) {
3171 DEBUG_TRACE("%s() EVENT_RX_MEMORY", __FUNCTION__);
3172
3173 clear_bit (EVENT_RX_MEMORY, &bep->err_flags);
3174
3175 //grab global lock so link/unlink or unplug can't mess up the restock shedule pointers mid scheduling
3176 spin_lock(&cp_lkm_usb_mgr.lock);
3177 if (cp_lkm_usb_is_base_attached(cpbdev) && bep->q_cnt == 0){
3178 cp_lkm_schedule_rx_restock(cpbdev,bep);
3179
3180 }
3181 spin_unlock(&cp_lkm_usb_mgr.lock);
3182 }
3183 }
3184 //if (test_bit (EVENT_LINK_RESET, &cpdev->flags)) {
3185 // struct driver_info *info = dev->driver_info;
3186 // int retval = 0;
3187 //
3188 // clear_bit (EVENT_LINK_RESET, &dev->flags);
3189 // if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
3190 // devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
3191 // retval,
3192 // dev->udev->bus->bus_name, dev->udev->devpath,
3193 // info->description);
3194 // }
3195 //}
3196
3197 //if (dev->flags)
3198 // devdbg (dev, "kevent done, flags = 0x%lx",
3199 // dev->flags);
3200}
3201
3202static void cp_lkm_usb_ctrl_complete(struct urb *urb)
3203{
3204 unsigned long flags;
3205 struct sk_buff *skb = (struct sk_buff *) urb->context;
3206 struct skb_data *entry = (struct skb_data *) skb->cb;
3207 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3208
3209 //remove skb from the list first thing so no other code conext looking at the
3210 //list (such as unlink_urbs) can mess with it.
3211 spin_lock_irqsave(&cpbdev->ctrlq.lock, flags);
3212 __skb_unlink(skb, &cpbdev->ctrlq);
3213 spin_unlock_irqrestore(&cpbdev->ctrlq.lock,flags);
3214
3215 skb->len = urb->actual_length;
3216
3217 //skip status and error checking if the device has unplugged
3218 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3219 urb->status = -ENODEV;
3220 goto ctrl_done;
3221 }
3222
3223 if (urb->status != 0) {
3224 switch (urb->status) {
3225 case -EPIPE:
3226 break;
3227
3228 /* software-driven interface shutdown */
3229 case -ECONNRESET: // async unlink
3230 case -ESHUTDOWN: // hardware gone
3231 break;
3232
3233 case -ENODEV:
3234 //printk("ctrl fail, no dev\n");
3235 break;
3236
3237 case -EPROTO:
3238 case -ETIME:
3239 case -EILSEQ:
3240 //CA: decided not to throttle on ctrl channel transfers since they are a different beast
3241 //if (!timer_pending (&cpdev->rx_delay)) {
3242 // mod_timer (&cpdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3243 //if (netif_msg_link (dev))
3244 // devdbg (dev, "tx throttle %d",
3245 // urb->status);
3246 //}
3247 //netif_stop_queue (dev->net);
3248 break;
3249 default:
3250 //if (netif_msg_tx_err (dev))
3251 // devdbg (dev, "tx err %d", entry->urb->status);
3252 break;
3253 }
3254 }
3255
3256ctrl_done:
3257 urb->dev = NULL;
3258 entry->state = ctrl_done;
3259 entry->status = urb->status;
3260 entry->urb = NULL;
3261 if(urb->setup_packet) {
3262 kfree(urb->setup_packet);
3263 }
3264 //cp_lkm_usb_urb_cnt(-1);
3265 usb_free_urb (urb);
3266 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3267}
3268
3269
3270static int cp_lkm_usb_start_ctrl_xmit(void *ctx, struct sk_buff *skb_in)
3271{
3272 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3273 struct cp_lkm_usb_base_dev* cpbdev;
3274 int retval = NET_XMIT_SUCCESS;
3275 struct urb *urb = NULL;
3276 struct skb_data *entry;
3277 unsigned long flags;
3278 int pipe;
3279 u8* tmp8;
3280 u16* tmp16;
3281 struct usb_ctrlrequest *req = NULL;
3282
3283 if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
3284 //printk("%s() no ctxt\n", __FUNCTION__);
3285 goto ctrl_done;
3286 }
3287
3288 cpbdev = cpdev->cpbdev;
3289
3290 DEBUG_TRACE("%s()", __FUNCTION__);
3291
3292 if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
3293 retval = -ENOMEM;
3294 goto ctrl_done;
3295 }
3296 //cp_lkm_usb_urb_cnt(1);
3297
3298 if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
3299 //cp_lkm_usb_urb_cnt(-1);
3300 usb_free_urb(urb);
3301 retval = -ENOMEM;
3302 goto ctrl_done;
3303 }
3304
3305 //The upper layer driver put all the ctrl stuff at the end of the buffer (in correct le order)
3306 //This layer puts it in a separate buffer
3307 tmp8 = (u8*)skb_in->data;
3308 req->bRequestType = *tmp8;
3309 skb_pull(skb_in, 1);
3310
3311 tmp8 = (u8*)skb_in->data;
3312 req->bRequest = *tmp8;
3313 skb_pull(skb_in, 1);
3314
3315 tmp16 = (u16*)skb_in->data;
3316 req->wValue = *tmp16;
3317 skb_pull(skb_in, 2);
3318
3319 tmp16 = (u16*)skb_in->data;
3320 req->wIndex = *tmp16;
3321 skb_pull(skb_in, 2);
3322
3323 tmp16 = (u16*)skb_in->data;
3324 req->wLength = *tmp16;
3325 skb_pull(skb_in, 2);
3326 //printk("%s() RT:%x, R:%x, V:%x, I:%x, L:%x\n", __FUNCTION__, req->bRequestType, req->bRequest, req->wValue, req->wIndex, req->wLength);
3327
3328 entry = (struct skb_data *) skb_in->cb;
3329 entry->urb = urb;
3330 entry->cpbdev = cpbdev;
3331 entry->state = ctrl_start;
3332 entry->status = 0;
3333 entry->bep = NULL;
3334 entry->unique_id = cpdev->unique_id;
3335
3336 if(req->bRequestType & USB_DIR_IN) {
3337 DEBUG_TRACE("%s() ctrl in len: %d", __FUNCTION__,skb_in->len);
3338 pipe = usb_rcvctrlpipe(cpbdev->udev, 0);
3339 }
3340 else{
3341 DEBUG_TRACE("%s() ctrl out len: %d", __FUNCTION__,skb_in->len);
3342 pipe = usb_sndctrlpipe(cpbdev->udev, 0);
3343 }
3344
3345 usb_fill_control_urb(urb, cpbdev->udev, pipe,
3346 (void *)req, skb_in->data, skb_in->len,
3347 cp_lkm_usb_ctrl_complete, skb_in);
3348
3349 //cp_lkm_usb_cnts(ctrl_start,1);
3350 spin_lock_irqsave (&cpbdev->ctrlq.lock, flags);
3351 retval = usb_submit_urb (urb, GFP_ATOMIC);
3352 switch (retval) {
3353 case 0:
3354 //net->trans_start = jiffies;
3355 //success: queue it
3356 __skb_queue_tail (&cpbdev->ctrlq, skb_in);
3357 skb_in = NULL;
3358 urb = NULL;
3359 req = NULL;
3360 break;
3361 case -ENODEV:
3362 break;
3363 case -EPROTO:
3364 case -EPIPE:
3365 break;
3366 default:
3367 break;
3368 }
3369 spin_unlock_irqrestore (&cpbdev->ctrlq.lock, flags);
3370
3371ctrl_done:
3372 if(req) {
3373 kfree(req);
3374 }
3375 if(urb) {
3376 //cp_lkm_usb_urb_cnt(-1);
3377 usb_free_urb(urb);
3378 }
3379 if(skb_in) {
3380 //cp_lkm_usb_cnts(ctrl_start,-1);
3381 dev_kfree_skb_any (skb_in);
3382 }
3383
3384 DEBUG_TRACE("%s() retval %d", __FUNCTION__, retval);
3385
3386 return retval;
3387}
3388
3389
3390#define THROTTLE_JIFFIES (HZ/8)
3391/*
3392 * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here.
3393*/
3394static void cp_lkm_usb_xmit_complete (struct urb *urb)
3395{
3396 unsigned long flags;
3397 struct sk_buff *skb = (struct sk_buff *) urb->context;
3398 struct skb_data *entry = (struct skb_data *) skb->cb;
3399 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3400 struct cp_lkm_base_ep* bep = (struct cp_lkm_base_ep*)entry->bep;
3401 bool is_data = false;
3402 struct cp_lkm_usb_dev* cpdev;
3403
3404 //remove skb from the list first thing so no other code context looking at the
3405 //list (such as unlink_urbs) can mess with it.
3406 spin_lock_irqsave(&cpbdev->out_q.lock,flags);
3407 __skb_unlink(skb, &cpbdev->out_q);
3408 spin_unlock_irqrestore(&cpbdev->out_q.lock,flags);
3409
3410 bep->q_cnt--;
3411
3412 if(bep->ep_num == cpbdev->data_out_bep_num) {
3413 is_data = true;
3414 }
3415
3416 // we save mux id of the cpdev that sent each tx pckt.
3417 cpdev = cp_lkm_usb_find_dev(entry->unique_id);
3418
3419 //skip status and error checking if the device has unplugged
3420 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3421 goto xmit_done;
3422 }
3423
3424 if (urb->status != 0) {
3425 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3426 switch (urb->status) {
3427 case -EPIPE:
3428 //don't have to clear halts on ctrl ep
3429 if (bep->ep_num != 0) {
3430 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_TX_HALT);
3431 }
3432 break;
3433
3434 /* software-driven interface shutdown */
3435 case -ECONNRESET: // async unlink
3436 case -ESHUTDOWN: // hardware gone
3437 break;
3438
3439 case -ENODEV:
3440 break;
3441
3442 // like rx, tx gets controller i/o faults during khubd delays
3443 // and so it uses the same throttling mechanism.
3444 case -EPROTO:
3445 case -ETIME:
3446 case -EILSEQ:
3447 if (!timer_pending (&cpbdev->rx_delay)) {
3448 mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3449 //if (netif_msg_link (dev))
3450 // devdbg (dev, "tx throttle %d",
3451 // urb->status);
3452 }
3453 //netif_stop_queue (dev->net);
3454 break;
3455 default:
3456 //if (netif_msg_tx_err (dev))
3457 // devdbg (dev, "tx err %d", entry->urb->status);
3458 break;
3459 }
3460 }
3461
3462xmit_done:
3463 entry->state = out_done;
3464
3465 if(is_data) {
3466 //cpdev->dbg_total_tx_irq++;
3467 cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_TX);
3468 }
3469 else{
3470 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
3471 }
3472}
3473
3474static int cp_lkm_usb_start_xmit_common(void *ctx, struct sk_buff *skb_in, int src, struct cp_lkm_ep* ep)
3475{
3476 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3477 struct cp_lkm_usb_base_dev* cpbdev;
3478 struct cp_lkm_base_ep* bep;
3479 int length;
3480 int retval = NET_XMIT_SUCCESS;
3481 struct urb *urb = NULL;
3482 struct skb_data *entry;
3483 unsigned long flags;
3484 struct sk_buff* skb_out = NULL;
3485 int wres;
3486
3487 if(NULL == cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
3488 //printk("%s() no ctxt\n", __FUNCTION__);
3489 dev_kfree_skb_any(skb_in);
3490 return -1;
3491 }
3492
3493 cpbdev = cpdev->cpbdev;
3494
3495 //the network doesn't have a pointer to the ep readily available so he passes in NULL for ep so we can
3496 //fetch the well known ep for the data out ep
3497 length = 0;
3498 if(src == CP_LKM_WRAPPER_SRC_DATA && ep == NULL){
3499 ep = cp_lkm_usb_get_ep(cpdev,cpdev->data_out_ep_num);
3500 length = skb_in->len;
3501 }
3502 bep = ep->bep;
3503
3504 while(1) {
3505 skb_out = NULL;
3506 urb = NULL;
3507 retval = NET_XMIT_SUCCESS;
3508
3509 //DEBUG_ERROR("%s() wrap it skb_in:%p", __FUNCTION__, skb_in);
3510
3511 //only use wrappers on the data endpoint
3512 if(ep->ep_num == cpdev->data_out_ep_num) {
3513 //DEBUG_ERROR("%s() wrap it", __FUNCTION__);
3514 //spin_lock_irqsave (&cp_lkm_usb_mgr.lock, flags);
3515 wres = cp_lkm_wrapper_send(cpbdev->wrapper_ctxt, src, cpdev->mux_id, skb_in, &skb_out);
3516 skb_in = NULL; //we no longer own skb so null its pointer for future call if we loop
3517 //spin_unlock_irqrestore (&cp_lkm_usb_mgr.lock, flags);
3518 if (wres == CP_LKM_WRAPPER_RES_ERROR) {
3519 DEBUG_ERROR("%s() wrapper error wres:0x%x, skb_out:%p", __FUNCTION__, wres, skb_out);
3520 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
3521 retval = -ENOMEM;
3522 goto xmit_done;
3523 }
3524 }
3525 else{
3526 //Not a data ep, send the skb and then we are done
3527 skb_out = skb_in;
3528 skb_in = NULL;
3529 wres = CP_LKM_WRAPPER_RES_DONE;
3530 }
3531
3532 //If we get here, send returned either done or again. skb_out can be NULL if there is nothing to
3533 //send, so check that first
3534 if(NULL == skb_out) {
3535// DEBUG_INFO("%s() no wrapped data", __FUNCTION__);
3536 goto xmit_done;
3537 }
3538
3539 if(cp_lkm_is_broadcom && ((uintptr_t)(skb_out->data) & 0x3)) {
3540 //broadcom unaligned packets that are multiples of 512 plus 3,4 or 5 bytes (515,516,517,1027,1028,1029,etc)
3541 //are corrupted for some reason, so need to copy into an aligned buffer
3542 int r = skb_out->len & 0x000001FF; //poor man's mod
3543 if (r >= 3 && r <= 5) {
3544 struct sk_buff* skb_new = skb_copy_expand(skb_out, 0, 0, GFP_ATOMIC);
3545 if(!skb_new) {
3546 retval = -ENOMEM;
3547 goto xmit_done;
3548 }
3549 //printk("%s() unaligned: %p, aligned: %p, len: %d, r: %d\n",__FUNCTION__,skb_out->data, skb_new->data, skb_out->len, r);
3550 dev_kfree_skb_any(skb_out);
3551 skb_out=skb_new;
3552 }
3553 }
3554
3555 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
3556 //if (netif_msg_tx_err (dev))
3557 // devdbg (dev, "no urb");
3558 DEBUG_ERROR("%s() urb alloc failed", __FUNCTION__);
3559 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_dropped, 1);
3560 retval = -ENOMEM;
3561 goto xmit_done;
3562 }
3563 //cp_lkm_usb_urb_cnt(1);
3564 entry = (struct skb_data *) skb_out->cb;
3565 entry->urb = urb;
3566 entry->cpbdev = cpbdev;
3567 entry->bep = bep;
3568 entry->state = out_start;
3569 entry->unique_id = cpdev->unique_id;
3570 //cp_lkm_usb_cnts(out_start,1);
3571
3572 if(bep->type == UE_BULK) {
3573 usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb_out->data,
3574 skb_out->len, cp_lkm_usb_xmit_complete, skb_out);
3575 }
3576 else{
3577 usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb_out->data, skb_out->len,
3578 cp_lkm_usb_xmit_complete, skb_out, bep->interval);
3579 }
3580
3581 if (!(cpbdev->feature_flags & CP_LKM_FEATURE_NO_ZERO_PACKETS)) {
3582 urb->transfer_flags |= URB_ZERO_PACKET;
3583 }
3584
3585 // DEBUG_INFO("%s()", __FUNCTION__);
3586 // DEBUG_INFO("%s() send to ep: 0x%x type:%d, pipe:0x%x", __FUNCTION__, ep->ep_num, ep->type, ep->pipe);
3587
3588 spin_lock_irqsave (&cpbdev->out_q.lock, flags);
3589 retval = usb_submit_urb (urb, GFP_ATOMIC);
3590 switch (retval) {
3591 case 0:
3592 //net->trans_start = jiffies;
3593 //success: queue it
3594 __skb_queue_tail (&cpbdev->out_q, skb_out);
3595 bep->q_cnt++;
3596 skb_out = NULL;
3597 urb = NULL;
3598 if(ep->ep_num == cpdev->data_out_ep_num) {
3599 cpbdev->tx_usb_q_count++;
3600 if(cpbdev->tx_usb_q_count >= CP_LKM_USB_TX_PAUSE_Q_PKTS){
3601 if(!cpbdev->tx_paused) {
3602 //pause all cpdevs
3603 cp_lkm_usb_dev_pause(cpbdev, true);
3604 cp_lkm_usb_stuck_check(cpbdev, CP_LKM_STUCK_START);
3605 }
3606 }
3607 }
3608 break;
3609 case -EPIPE:
3610 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3611 //don't clear halts on ctrl ep
3612 if(ep->ep_num != 0) {
3613 cp_lkm_usb_defer_kevent(cpbdev, bep, EVENT_TX_HALT);
3614 }
3615 break;
3616 case -ENODEV:
3617 break;
3618 case -EPROTO:
3619 default:
3620 //if (netif_msg_tx_err (dev))
3621 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, tx_errors, 1);
3622 // devdbg (dev, "tx: submit urb err %d", retval);
3623 break;
3624 }
3625 spin_unlock_irqrestore (&cpbdev->out_q.lock, flags);
3626
3627xmit_done:
3628 if (retval) {
3629 DEBUG_TRACE("%s() failed to send: %d", __FUNCTION__, retval);
3630 //cp_lkm_usb_cnts(out_start,-1);
3631 }
3632
3633 //if these are non null then they weren't sent so free them
3634 if (skb_out){
3635 dev_kfree_skb_any (skb_out);
3636 }
3637 if(urb) {
3638 //cp_lkm_usb_urb_cnt(-1);
3639 usb_free_urb (urb);
3640 }
3641
3642 //Bail out of while loop unless the wrapper asked to be called again
3643 if(wres != CP_LKM_WRAPPER_RES_AGAIN) {
3644 break;
3645 }
3646
3647 length = 0;
3648
3649 }
3650 return retval;
3651}
3652
3653static int cp_lkm_usb_start_xmit (void *ctx, struct sk_buff *skb)
3654{
3655 struct cp_lkm_usb_dev* cpdev = (struct cp_lkm_usb_dev *)ctx;
3656 struct cp_lkm_usb_base_dev* cpbdev;
3657 int res;
3658
3659 if(NULL == cpdev){
3660 DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
3661 dev_kfree_skb_any(skb);
3662 return -1;
3663 }
3664 cpbdev = cpdev->cpbdev;
3665 if(cpbdev->tx_paused || CP_LKM_USB_ACTIVE != cpdev->state) {
3666 DEBUG_TRACE("%s() no ctxt", __FUNCTION__);
3667 dev_kfree_skb_any(skb);
3668 return -1;
3669 }
3670 res = cp_lkm_usb_start_xmit_common(ctx, skb, CP_LKM_WRAPPER_SRC_DATA, NULL);
3671 return res;
3672}
3673
3674static int cp_lkm_usb_to_cplkm_status(int usb_status)
3675{
3676 int cplkm_status;
3677 switch(usb_status) {
3678 case 0:
3679 cplkm_status = CP_LKM_STATUS_OK;
3680 break;
3681 default:
3682 //printk("usb err: %d\n", usb_status);
3683 cplkm_status = CP_LKM_STATUS_ERROR;
3684 break;
3685 }
3686 return cplkm_status;
3687}
3688
3689static void cp_lkm_usb_other_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3690{
3691 struct skb_data *entry;
3692 struct cp_lkm_msg_hdr hdr;
3693 int status;
3694 struct cp_lkm_base_ep* bep;
3695 struct cp_lkm_usb_dev* cpdev = NULL;
3696 struct list_head *tmp, *nxt;
3697 struct cp_lkm_ep *ep;
3698
3699 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3700 //printk("%s(), cpbdev: %p not attached. state: %d\n",__FUNCTION__,cpbdev,cpbdev->base_state);
3701 dev_kfree_skb_any (skb_in);
3702 return;
3703 }
3704 entry = (struct skb_data *)skb_in->cb;
3705 bep = entry->bep;
3706
3707 //Note: pkts on non-data endpoints when running with clones present a problem because there are no headers on these
3708 // pkts to tell us which clone ep to send this to. Fortunately, the modem stack serializes clone instances so
3709 // only one can be accessing the non-data endpoints at a time. In order to get any responses from the module
3710 // over their endpoint, they must be either listening or have posted a recv. We use this fact to find the
3711 // ep we need to send the recv back on.
3712 list_for_each_safe(tmp, nxt, &bep->eps) {
3713 ep = list_entry(tmp, struct cp_lkm_ep, list_bep);
3714 if (ep->con_flags & (CP_LKM_USB_LISTEN | CP_LKM_USB_RECV)) {
3715 cpdev = ep->cpdev;
3716 if (ep->con_flags & CP_LKM_USB_RECV) {
3717 //can only have one recv pending on non-data endpoints for a given ep number.
3718 //therefor when the clone is done, the base is done
3719 ep->con_flags &= ~CP_LKM_USB_RECV;
3720 bep->con_flags &= ~CP_LKM_USB_RECV;
3721 }
3722 //printk("%s(), other data cpdev: %p, ep: %p, num: 0x%x, flags: 0x%x\n",__FUNCTION__,cpdev,ep, ep->ep_num,ep->con_flags);
3723 break;
3724 }
3725 }
3726
3727 if (!cpdev) {
3728 //printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
3729 dev_kfree_skb_any (skb_in);
3730 return;
3731 }
3732
3733 status = cp_lkm_usb_to_cplkm_status(entry->status);
3734 //printk("%s() other data uid: %d, ep_num:0x%x, status:%d, len: %d\n", __FUNCTION__, cpdev->unique_id,bep->ep_num, entry->status, skb_in->len);
3735
3736 memset(&hdr,0,sizeof(hdr));
3737 hdr.instance_id = cpdev->unique_id;
3738 hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
3739 hdr.status = status;
3740 hdr.len = skb_in?skb_in->len:0;
3741 hdr.arg1 = bep->ep_num;
3742 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in);
3743
3744 return;
3745}
3746
3747
3748static void cp_lkm_usb_ctrl_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3749{
3750 struct skb_data *entry;
3751 struct cp_lkm_msg_hdr hdr;
3752 int status;
3753 static struct cp_lkm_usb_dev* cpdev = NULL;
3754
3755 DEBUG_TRACE("%s()", __FUNCTION__);
3756 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3757 dev_kfree_skb_any (skb_in);
3758 return;
3759 }
3760
3761 entry = (struct skb_data *)skb_in->cb;
3762 cpdev = cp_lkm_usb_find_dev(entry->unique_id);
3763 if (!cpdev) {
3764 //printk("%s() no cpdev unexpectedly for unique_id: %d",__FUNCTION__, entry->unique_id);
3765 dev_kfree_skb_any (skb_in);
3766 return;
3767 }
3768
3769 status = cp_lkm_usb_to_cplkm_status(entry->status);
3770 memset(&hdr,0,sizeof(hdr));
3771 hdr.instance_id = cpdev->unique_id;
3772 hdr.cmd = CP_LKM_USB_CMD_CTRL_RECV;
3773 hdr.status = status;
3774 hdr.len = skb_in?skb_in->len:0;
3775 hdr.arg1 = 0; //ctrl channel ep is always 0
3776
3777 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_in);
3778 DEBUG_TRACE("%s() ctrl response status:%d", __FUNCTION__, entry->status);
3779
3780 return;
3781}
3782
3783
3784//This function runs in an interrupt context so it can't be preempted. This means cpdev can't
3785//be deleted out from under
3786static void cp_lkm_usb_data_recv_process (struct cp_lkm_usb_base_dev* cpbdev, struct sk_buff *skb_in)
3787{
3788 struct sk_buff *skb_out;
3789 int res;
3790 int dst;
3791 struct skb_data *entry;
3792 struct cp_lkm_usb_dev* cpdev;
3793 struct cp_lkm_base_ep* bep;
3794 int ep_num;
3795 int mux_id;
3796
3797 // WARNING: The memory this pointer points to will be freed by the wrapper, so copy everything you need
3798 // out of it here before going into the while loop
3799 entry = (struct skb_data *)skb_in->cb;
3800 bep = entry->bep;
3801 ep_num = bep->ep_num;
3802
3803 //printk("%s() cpbdev: %p, bep: %p base_state: %d\n", __FUNCTION__, cpbdev, bep, cpbdev->base_state);
3804
3805 if(!cp_lkm_usb_is_base_attached(cpbdev)){
3806 dev_kfree_skb_any (skb_in);
3807 return;
3808 }
3809
3810 while(1) {
3811 skb_out = NULL;
3812
3813 mux_id = 0;
3814
3815 res = cp_lkm_wrapper_recv(cpbdev->wrapper_ctxt, &dst, &mux_id, skb_in, &skb_out);
3816
3817 if (dst != CP_LKM_WRAPPER_DST_CTRL && dst != CP_LKM_WRAPPER_DST_DATA) {
3818 // this is something other than data that we don't know what to do with, so drop it.
3819 goto recv_done;
3820 }
3821
3822 cpdev = cp_lkm_usb_find_muxed_dev(cpbdev, mux_id);
3823
3824 skb_in = NULL;
3825
3826 if (NULL == cpdev) {
3827 //LOG("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
3828 DEBUG_WARN("%s(), no cpdev found for mux_id: 0x%x, or base_id: %d", __FUNCTION__,mux_id,cpbdev->base_id);
3829 goto recv_done;
3830 }
3831
3832 if(res == CP_LKM_WRAPPER_RES_ERROR) {
3833 UPDATE_STATS(cpdev->edi->pm_stats64_ctx, rx_dropped, 1);
3834 goto recv_done;
3835 }
3836
3837 //printk("%s() cpdev: %p, ep_num: 0x%x, dst: %d, mux_id: %d, state: %d, res: %d\n", __FUNCTION__, cpdev, ep_num, dst, mux_id, cpdev->state, res);
3838
3839 //DEBUG_INFO("%s() while() - skb_out:%p, dst:%d, res:%d", __FUNCTION__, skb_out, dst, res);
3840
3841 //if nothing to send, see if we can bail or if need to call again
3842 if(NULL == skb_out){
3843 goto recv_done;
3844 }
3845
3846 if(dst == CP_LKM_WRAPPER_DST_CTRL) {
3847 //printk("%s() ctrl pkt cpdev: %p\n", __FUNCTION__, cpdev);
3848 if (skb_out->len) { // watch for 0 length short packets
3849 struct cp_lkm_msg_hdr hdr;
3850
3851 DEBUG_TRACE("%s() recv app pkt", __FUNCTION__);
3852 memset(&hdr,0,sizeof(hdr));
3853 hdr.instance_id = cpdev->unique_id;
3854 hdr.cmd = CP_LKM_USB_CMD_DATA_RECV;
3855 hdr.status = CP_LKM_STATUS_OK;
3856 hdr.len = skb_out->len;
3857 hdr.arg1 = ep_num;
3858
3859 cp_lkm_post_message(&cp_lkm_usb_mgr.common, &hdr, skb_out);
3860 skb_out = NULL;
3861 }
3862 }
3863 //dst == CP_LKM_WRAPPER_DST_DATA
3864 else{
3865 //printk("%s() data pkt cpdev: %p\n", __FUNCTION__, cpdev);
3866 if (skb_out->len && cpdev->edi->pm_recv){
3867 //printk("%s() data pkt send to pm cpdev: %p, first byte: 0x%x\n", __FUNCTION__, cpdev, skb_out->data[0]);
3868 cpdev->edi->pm_recv(cpdev->edi->pm_recv_ctx, skb_out);
3869 skb_out = NULL;
3870 }
3871 }
3872
3873recv_done:
3874 if(skb_out) {
3875 dev_kfree_skb_any(skb_out);
3876 }
3877
3878 //if wrapper didn't ask to be called back, then done
3879 if(res != CP_LKM_WRAPPER_RES_AGAIN) {
3880 break;
3881 }
3882
3883 }
3884
3885 return;
3886}
3887
3888/*
3889 * This function runs in a hw interrupt context. Do not put any DEBUG_XX print messages in here.
3890*/
3891static void cp_lkm_usb_recv_complete (struct urb *urb)
3892{
3893 unsigned long flags;
3894 struct sk_buff *skb = (struct sk_buff *) urb->context;
3895 struct skb_data *entry = (struct skb_data *) skb->cb;
3896 struct cp_lkm_usb_base_dev* cpbdev = (struct cp_lkm_usb_base_dev *)entry->cpbdev;
3897 struct cp_lkm_usb_dev* cpdev_stats_only;
3898 int urb_status = urb->status;
3899 struct cp_lkm_base_ep* bep = entry->bep;
3900 bool is_data = false;
3901 //if(urb->status) {
3902 // printk("recv_done: status: %d, len:%d\n", urb->status, urb->actual_length);
3903 //}
3904
3905 // we don't know what cpdev recv packets are destined for when running muxed clones, so report all errors
3906 // to the base device (for non cloned cases, this will always be the correct cpdev)
3907 cpdev_stats_only = cp_lkm_usb_find_dev(cpbdev->base_id);
3908
3909 //remove skb from the list first thing so no other code conext looking at the
3910 //list (such as unlink_urbs) can mess with it.
3911 spin_lock_irqsave(&cpbdev->in_q.lock,flags);
3912 __skb_unlink(skb, &cpbdev->in_q);
3913 spin_unlock_irqrestore(&cpbdev->in_q.lock,flags);
3914
3915 skb_put (skb, urb->actual_length);
3916 if(bep->ep_num == cpbdev->data_in_bep_num) {
3917 is_data = true;
3918 entry->state = in_data_done;
3919 //note we don't decrement the data ep cnt until we process the pkt
3920 } else{
3921 bep->q_cnt--;
3922 entry->state = in_other_done;
3923 }
3924 entry->status = urb->status;
3925
3926 //skip status and error checking if the device has unplugged
3927 if(!cp_lkm_usb_is_base_attached(cpbdev)) {
3928 entry->status = -ENODEV;
3929 goto recv_done;
3930 }
3931
3932 switch (urb_status) {
3933 // success
3934 case 0:
3935 break;
3936
3937 // stalls need manual reset. this is rare ... except that
3938 // when going through USB 2.0 TTs, unplug appears this way.
3939 // we avoid the highspeed version of the ETIMEOUT/EILSEQ
3940 // storm, recovering as needed.
3941 case -EPIPE:
3942 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3943 //don't clear halts on ctrl ep
3944 if(bep->ep_num != 0) {
3945 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
3946 }
3947 goto block;
3948
3949 // software-driven interface shutdown
3950 case -ECONNRESET: // async unlink
3951 case -ESHUTDOWN: // hardware gone
3952 goto block;
3953
3954 case -ENODEV:
3955 //printk("recv_done nodev:%d\n", ENODEV);
3956 goto block;
3957
3958 // we get controller i/o faults during khubd disconnect() delays.
3959 // throttle down resubmits, to avoid log floods; just temporarily,
3960 // so we still recover when the fault isn't a khubd delay.
3961 case -EPROTO:
3962 case -ETIME:
3963 case -EILSEQ:
3964 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3965 if (!timer_pending (&cpbdev->rx_delay)) {
3966 mod_timer (&cpbdev->rx_delay, jiffies + THROTTLE_JIFFIES);
3967 }
3968block:
3969 if(bep->ep_num == cpbdev->data_in_bep_num) {
3970 bep->q_cnt--;
3971 entry->state = in_data_cleanup;
3972 }
3973 else{
3974 entry->state = in_other_cleanup;
3975 }
3976
3977 break;
3978
3979 // data overrun ... flush fifo?
3980 case -EOVERFLOW:
3981 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_over_errors, 1);
3982
3983 // FALLTHROUGH
3984
3985 default:
3986 if(bep->ep_num == cpbdev->data_in_bep_num) {
3987 bep->q_cnt--;
3988 entry->state = in_data_cleanup;
3989 }
3990 else{
3991 entry->state = in_other_cleanup;
3992 }
3993 UPDATE_STATS(cpdev_stats_only->edi->pm_stats64_ctx, rx_errors, 1);
3994 break;
3995 }
3996
3997 // on responses to a requested recv from the app driver, we need to always return something even on error so force it here
3998 if(bep->con_flags & CP_LKM_USB_RECV) {
3999 if(is_data){
4000 entry->state = in_data_done; //this should never happen, data endpoints always listen, they don't post recv's
4001 }
4002 else{
4003 entry->state = in_other_done;
4004 }
4005 }
4006
4007recv_done:
4008 //do not use the 'entry' struct after this call. It is part of the skb and the skb will be freed when the _bh function runs.
4009 //if you need something from it save it off before calling this
4010 if(is_data) {
4011 //cpdev->dbg_total_rx_irq++;
4012 //printk("%s(), got data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
4013 cp_lkm_usb_done_and_defer_data(cpbdev, skb, DATA_SRC_RX);
4014 }
4015 else{
4016 //printk("%s(), got other data on cpbdev: %p, bep: %p, id: %d\n",__FUNCTION__, cpbdev, entry->bep, cpbdev->base_id);
4017 cp_lkm_usb_done_and_defer_other(cpbdev, skb);
4018 }
4019}
4020
4021//static int g_num_adjusts = 0;
4022//static int g_num_recv_pkts = 0;
4023//static int g_num_iters = 0;
4024static int cp_lkm_usb_submit_recv(struct cp_lkm_usb_base_dev* cpbdev , struct urb *urb, gfp_t flags, struct cp_lkm_base_ep* bep, bool data)
4025{
4026 struct sk_buff *skb;
4027 struct skb_data *entry;
4028 int retval = 0;
4029 unsigned long lockflags;
4030 size_t size;
4031 int hdr_size = 0;
4032 int hdr_offset = 0;
4033 int pad = 0; //some platforms require alignment override. pad takes care of that.
4034
4035 //g_num_recv_pkts++;
4036 //g_num_iters++;
4037 //if(g_num_iters > 10000){
4038 // printk("%s() num pkts: %d, num adjusts: %d\n",__FUNCTION__,g_num_recv_pkts,g_num_adjusts);
4039 // g_num_iters = 0;
4040 //}
4041 size = bep->max_transfer_size;
4042 if (data) {
4043 hdr_size = cpbdev->pm_hdr_size;
4044 hdr_offset = cpbdev->pm_hdr_offset;
4045 }
4046
4047 if(cp_lkm_is_broadcom && (hdr_offset & 0x3)) {
4048 //Jira issue FW-14929: On broadcom, we have to keep the buffers four byte aligned else the USB block
4049 //corrupts the data (no idea why).
4050 //Round up the hdr_offset to nearest 4 byte boundary. This means pkts may not be aligned as expected,
4051 //so recieve function will need to either realign with a copy, or send up to the stack unaligned
4052 // See cp_lkm_pm_net_recv() to see how we decided to deal with it (subject to change).
4053 pad = 4 - (hdr_offset&0x3);
4054 //g_num_adjusts++;
4055 }
4056
4057 if ((skb = alloc_skb (size+hdr_size+pad, flags)) == NULL) {
4058 //if (netif_msg_rx_err (dev))
4059 // devdbg (dev, "no rx skb");
4060 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
4061 return -ENOMEM;
4062 }
4063 if (data) {
4064 skb_reserve(skb, hdr_offset+pad);
4065 //printk("%s(), data: %p, len: %d, whs:%d, hs:%d, ho:%d\n",__FUNCTION__,skb->data,skb->len,wrapper_hdr_size,hdr_size,hdr_offset);
4066 }
4067 entry = (struct skb_data *) skb->cb;
4068 entry->urb = urb;
4069 entry->cpbdev = cpbdev;
4070 if(data) {
4071 entry->state = in_data_start;
4072 }
4073 else{
4074 entry->state = in_other_start;
4075 }
4076
4077 entry->status = 0;
4078 entry->bep = bep;
4079
4080 if(bep->type == UE_BULK) {
4081 usb_fill_bulk_urb (urb, cpbdev->udev, bep->pipe, skb->data, size,
4082 cp_lkm_usb_recv_complete, skb);
4083 }
4084 else{
4085 usb_fill_int_urb (urb, cpbdev->udev, bep->pipe, skb->data, size,
4086 cp_lkm_usb_recv_complete, skb, bep->interval);
4087 }
4088 //cp_lkm_usb_cnts(entry->state,1);
4089 spin_lock_irqsave (&cpbdev->in_q.lock, lockflags);
4090 if (cp_lkm_usb_is_base_attached(cpbdev) && !test_bit (EVENT_RX_HALT, &bep->err_flags)) {
4091 DEBUG_TRACE("%s() ep:0x%x, size:%d, type:%d, pipe:0x%x",__FUNCTION__, bep->ep_num, size, bep->type, bep->pipe);
4092 retval = usb_submit_urb (urb, GFP_ATOMIC);
4093 switch (retval) {
4094 case -EPIPE:
4095 //don't clear halts on ctrl ep
4096 if(bep->ep_num != 0) {
4097 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_HALT);
4098 }
4099 break;
4100 case -ENOMEM:
4101 cp_lkm_usb_defer_kevent (cpbdev, bep, EVENT_RX_MEMORY);
4102 break;
4103 case -ENODEV:
4104 //if (netif_msg_ifdown (dev))
4105 // devdbg (dev, "device gone");
4106 //netif_device_detach (dev->net);
4107 break;
4108 case -EPROTO:
4109 default:
4110 //if (netif_msg_rx_err (dev))
4111 // devdbg (dev, "rx submit, %d", retval);
4112 cp_lkm_schedule_rx_restock(cpbdev,bep);
4113 break;
4114 case 0:
4115 __skb_queue_tail (&cpbdev->in_q, skb);
4116 bep->q_cnt++;
4117 //if(cpdev->in_q.qlen == 1 && ep->index == CP_LKM_DATA_INDEX){
4118 // printk("rx q empty\n");
4119 //}
4120
4121 }
4122 } else {
4123 //if (netif_msg_ifdown (dev))
4124 // devdbg (dev, "rx: stopped");
4125 retval = -ENOLINK;
4126 }
4127 spin_unlock_irqrestore (&cpbdev->in_q.lock, lockflags);
4128 if (retval) {
4129 DEBUG_TRACE("%s() FAILED ep_num:0x%x ep_type:%d, retval: %d",__FUNCTION__, bep->ep_num, bep->type, retval);
4130 //cp_lkm_usb_cnts(entry->state,-1);
4131 dev_kfree_skb_any (skb);
4132 }
4133
4134 return retval;
4135}
4136
4137
4138static int cp_lkm_usb_init(void)
4139{
4140 DEBUG_TRACE("%s()", __FUNCTION__);
4141 memset(&cp_lkm_usb_mgr, 0x00, sizeof(struct cp_lkm_usb_ctx));
4142 cp_lkm_usb_mgr.common.open = cp_lkm_usb_open;
4143 cp_lkm_usb_mgr.common.close = cp_lkm_usb_close;
4144 cp_lkm_usb_mgr.common.handle_msg = cp_lkm_usb_handle_msg;
4145 cp_lkm_usb_mgr.common.handle_ioctl = cp_lkm_usb_handle_ioctl;
4146 INIT_LIST_HEAD(&cp_lkm_usb_mgr.dev_list);
4147
4148 cp_lkm_common_ctx_init(&cp_lkm_usb_mgr.common);
4149
4150 spin_lock_init(&cp_lkm_usb_mgr.lock);
4151 //sema_init(&cp_lkm_usb_mgr.thread_sem, 1);
4152
4153 if(!strcmp(PRODUCT_PLATFORM, "brcm_arm")) {
4154 LOG("cp_lkm: Broadcom platform");
4155 cp_lkm_is_broadcom = 1;
4156 }
4157
4158 LOG("cp_lkm: Product chipset %s",PRODUCT_INFO_CHIPSET);
4159 LOG("cp_lkm: Product platform %s",PRODUCT_PLATFORM);
4160
4161 //Things work better if the napi weight here matchs the global weight set in service_manager/services/firewall.py
4162 //This is even true if we don't use napi here since ethernet on some platforms use it
4163 if ((strcmp(PRODUCT_PLATFORM,"ramips")==0) && (strcmp(PRODUCT_INFO_CHIPSET, "3883")!=0)){
4164 //all ralink (mediatek) platforms except for 3883 use the low settings
4165 //use_high = false;
4166 CP_LKM_PM_NAPI_WEIGHT = 32;
4167 }
4168 else{
4169 //use_high = true;
4170 CP_LKM_PM_NAPI_WEIGHT = 64;
4171 }
4172
4173 //set up default settings for all platforms
4174 CP_LKM_USB_NAPI_MAX_WORK = CP_LKM_PM_NAPI_WEIGHT;
4175 CP_LKM_USB_MAX_RX_QLEN = CP_LKM_USB_NAPI_MAX_WORK;
4176 CP_LKM_USB_MAX_OTHER_QLEN = 2;
4177 CP_LKM_USB_TX_PAUSE_Q_PKTS = CP_LKM_USB_NAPI_MAX_WORK;
4178 CP_LKM_USB_TX_RESUME_Q_PKTS = CP_LKM_USB_TX_PAUSE_Q_PKTS/4;
4179 CP_LKM_USB_TX_SCHED_CNT = 1;
4180 CP_LKM_USB_RX_SCHED_CNT = 1;
4181 CP_LKM_USB_RESTOCK_MULTIPLE = 1; //restock rx as we process them
4182 CP_LKM_USB_TASKLET_CNT = 10;
4183 CP_LKM_USB_WORKQUEUE_CNT = 5;
4184 CP_LKM_USB_PROCESS_DIVISOR = 4;
4185
4186 LOG("cp_lkm: Processor: %s, Max work: %d, NAPI budget: %d, QLEN: %d.",PRODUCT_INFO_CHIPSET, CP_LKM_USB_NAPI_MAX_WORK, CP_LKM_PM_NAPI_WEIGHT, CP_LKM_USB_MAX_RX_QLEN);
4187
4188 return 0;
4189
4190}
4191
4192static int cp_lkm_usb_cleanup(void)
4193{
4194 //module is unloading, clean up everything
4195 // empty pending posted messages
4196 cp_lkm_cleanup_msg_list(&cp_lkm_usb_mgr.common);
4197
4198 cp_lkm_usb_close(&cp_lkm_usb_mgr.common);
4199 return 0;
4200}
4201
4202static int cp_lkm_usb_open(struct cp_lkm_common_ctx *ctx)
4203{
4204 //struct cp_lkm_usb_ctx* mgr;
4205
4206 DEBUG_TRACE("%s()", __FUNCTION__);
4207 //mgr = (struct cp_lkm_usb_ctx*)ctx;
4208
4209 return 0;
4210}
4211
4212static int cp_lkm_usb_close(struct cp_lkm_common_ctx *ctx)
4213{
4214 //unsigned long flags;
4215 //struct cp_lkm_usb_dev* cpdev;
4216 //struct cp_lkm_usb_close_intf ci;
4217 //struct cp_lkm_usb_unplug_intf ui;
4218 LOG("%s() called unexpectedly.", __FUNCTION__);
4219
4220 //NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
4221 // things are in a bad state and the router will be rebooting. We decided not
4222 // to clean things up here because this code got into an infinite loop in
4223 // certain fail situations, which prevented the router from rebooting.
4224 // Revisit if close ever becomes a normal event.
4225
4226 /*
4227 while(1) {
4228 spin_lock(&cp_lkm_usb_mgr.lock);
4229
4230 cpdev = cp_lkm_usb_get_head_dev();
4231
4232 spin_unlock(&cp_lkm_usb_mgr.lock);
4233 if(!cpdev) {
4234 return 0;
4235 }
4236
4237 //TODO - when this closed we have a modem plugged, we will be deleting the top half of the driver while the bottom half is
4238 // still plugged. Figure out how to force the driver to disconnect the modem
4239 ci.unique_id = cpdev->unique_id;
4240 cp_lkm_usb_close_intf(&ci);
4241
4242 //the unplug removes the device from the list which prevents us from infinite looping here
4243 ui.unique_id = cpdev->unique_id;
4244 cp_lkm_usb_unplug_intf(&ui);
4245 }
4246
4247 cp_lkm_cleanup_msg_list(ctx);
4248 */
4249 return 0;
4250}
4251
4252static int cp_lkm_usb_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
4253{
4254 int retval = -1;
4255 struct cp_lkm_ep* ep;
4256 struct cp_lkm_usb_dev* cpdev;
4257 struct cp_lkm_usb_base_dev* cpbdev;
4258
4259 //grab lock to protect global device list before searching (don't want to search it if another thread is adding or removing a cpdev)
4260 spin_lock(&cp_lkm_usb_mgr.lock);
4261 cpdev = cp_lkm_usb_find_dev(hdr->instance_id);
4262
4263 //grab thread semaphore so disconnect can't run and delete the cpdev while we are running here
4264 if(!cpdev || !cp_lkm_usb_is_attached(cpdev) || !cp_lkm_usb_is_base_attached(cpdev->cpbdev)) {
4265 spin_unlock(&cp_lkm_usb_mgr.lock);
4266 dev_kfree_skb_any (skb);
4267 //printk("%s() no device or no probe yet\n", __FUNCTION__);
4268 return 0;
4269 }
4270 cpbdev = cpdev->cpbdev;
4271 switch(hdr->cmd) {
4272 case CP_LKM_USB_CMD_DATA_SEND:
4273 {
4274 ep = cp_lkm_usb_get_ep(cpdev, hdr->arg1);
4275 if(ep) {
4276 //printk("%s(), send other data cpbdev: %p, cpdev: %p, bep: %p, ep: %p, num: 0x%x\n",__FUNCTION__,cpdev->cpbdev,cpdev,ep->bep,ep,ep->ep_num);
4277 retval = cp_lkm_usb_start_xmit_common(cpdev, skb, CP_LKM_WRAPPER_SRC_CTRL, ep);
4278 skb = NULL;
4279 }
4280 else{
4281 DEBUG_TRACE("%s() Invalid EP number 0x%x", __FUNCTION__, hdr->arg1);
4282 retval = -1;
4283 }
4284 }
4285 break;
4286 case CP_LKM_USB_CMD_CTRL_SEND:
4287 {
4288 retval = cp_lkm_usb_start_ctrl_xmit(cpdev, skb);
4289 skb = NULL;
4290 }
4291 break;
4292 }
4293
4294 spin_unlock(&cp_lkm_usb_mgr.lock);
4295
4296 if(skb) {
4297 dev_kfree_skb_any (skb);
4298 }
4299 return retval;
4300}
4301
4302static int cp_lkm_usb_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
4303{
4304 int retval = -1;
4305 //printk("%s(), cmd:0x%x\n", __FUNCTION__, _IOC_NR(cmd));
4306
4307 switch(cmd) {
4308 case CP_LKM_IOCTL_USB_PLUG_INTF:
4309 {
4310 struct cp_lkm_usb_plug_intf* pi = (struct cp_lkm_usb_plug_intf*)k_argp;
4311 retval = cp_lkm_usb_plug_intf(pi);
4312 }
4313 break;
4314 case CP_LKM_IOCTL_USB_SET_WRAPPER:
4315 {
4316 struct cp_lkm_usb_set_wrapper* sw = (struct cp_lkm_usb_set_wrapper*)k_argp;
4317 retval = cp_lkm_usb_set_wrapper(sw);
4318 }
4319 break;
4320 case CP_LKM_IOCTL_USB_SET_MUX_ID:
4321 {
4322 struct cp_lkm_usb_set_mux_id* smi = (struct cp_lkm_usb_set_mux_id*)k_argp;
4323 retval = cp_lkm_usb_set_mux_id(smi);
4324 }
4325 break;
4326 case CP_LKM_IOCTL_USB_OPEN_INTF:
4327 {
4328 struct cp_lkm_usb_open_intf* oi = (struct cp_lkm_usb_open_intf*)k_argp;
4329 retval = cp_lkm_usb_open_intf(oi);
4330 }
4331 break;
4332 case CP_LKM_IOCTL_USB_CLOSE_INTF:
4333 {
4334 struct cp_lkm_usb_close_intf* ci = (struct cp_lkm_usb_close_intf*)k_argp;
4335 retval = cp_lkm_usb_close_intf(ci);
4336 }
4337 break;
4338 case CP_LKM_IOCTL_USB_UNPLUG_INTF:
4339 {
4340 struct cp_lkm_usb_unplug_intf* ui = (struct cp_lkm_usb_unplug_intf*)k_argp;
4341 retval = cp_lkm_usb_unplug_intf(ui);
4342 }
4343 break;
4344 case CP_LKM_IOCTL_USB_EP_ACTION:
4345 {
4346 struct cp_lkm_usb_ep_action* ea = (struct cp_lkm_usb_ep_action*)k_argp;
4347 retval = cp_lkm_usb_ep_action(ea);
4348 }
4349 break;
4350 case CP_LKM_IOCTL_USB_PM_LINK:
4351 {
4352 struct cp_lkm_usb_pm_link *upl = (struct cp_lkm_usb_pm_link *)k_argp;
4353 retval = cp_lkm_usb_pm_link(upl);
4354 }
4355 break;
4356 case CP_LKM_IOCTL_USB_IS_ALIVE_INTF:
4357 {
4358 struct cp_lkm_usb_is_alive_intf* alivei = (struct cp_lkm_usb_is_alive_intf*)k_argp;
4359 retval = cp_lkm_usb_is_alive_intf(alivei);
4360 }
4361 }
4362
4363 return retval;
4364}
4365
4366
4367/******************************* kernel module PM instance functionality **********************************/
4368struct cp_lkm_pm_ctx {
4369 struct cp_lkm_common_ctx common;
4370 struct list_head pm_list;
4371 spinlock_t pm_list_lock;
4372};
4373
4374struct cp_lkm_pm_ctx cp_lkm_pm_mgr;
4375
4376
4377static void cp_lkm_pm_filter_empty_list(struct cp_lkm_pm_common *pm)
4378{
4379
4380 struct cp_lkm_pm_filter *filter;
4381 struct list_head *entry, *tmp;
4382
4383 list_for_each_safe(entry, tmp, &pm->filter_list) {
4384 filter = list_entry(entry, struct cp_lkm_pm_filter, list);
4385 list_del(&filter->list);
4386 kfree(filter);
4387 }
4388}
4389
4390static bool cp_lkm_pm_filter_ok(struct cp_lkm_pm_common *pm, unsigned char *buf, unsigned int buf_len)
4391{
4392 bool allow = true; // default allow the egress packet
4393
4394 struct list_head *pos;
4395
4396 struct in_device *in_dev;
4397 struct in_ifaddr *ifa;
4398 struct iphdr *ipv4_hdr;
4399 u32 ipv4_src_addr = 0;
4400 u32 ipv4_net_addr = 0;
4401 u32 ipv4_net_mask = 0;
4402
4403 ipv4_hdr = (struct iphdr *)buf;
4404
4405 // these are the include filters (white list) - exclude filters (black list) are not currently supported
4406 // exclude filters may need to be processed in another loop through the filters
4407 list_for_each(pos, &pm->filter_list) {
4408 struct cp_lkm_pm_filter *filter = list_entry(pos, struct cp_lkm_pm_filter, list);
4409 switch(filter->type) {
4410 case CP_LKM_PM_FILTER_TYPE_IP_SRC_WAN_SUBNET_INCLUDE:
4411 if (4 == ipv4_hdr->version) {
4412 // ipv4
4413 allow = false;
4414 ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
4415 if(ipv4_src_addr == 0){
4416 //DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
4417 allow = true;
4418 }
4419 else{
4420 // get network device IP address and check against src packet ip address
4421 rcu_read_lock();
4422 in_dev = rcu_dereference(pm->net_dev->ip_ptr);
4423 // in_dev has a list of IP addresses (because an interface can have multiple - check them all)
4424 for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next) {
4425 ipv4_net_addr = __be32_to_cpu(ifa->ifa_local);
4426 ipv4_net_mask = __be32_to_cpu(ifa->ifa_mask);
4427 if ((ipv4_net_addr & ipv4_net_mask) == (ipv4_src_addr & ipv4_net_mask)) {
4428 // allow the packet
4429 allow = true;
4430 break;
4431 }
4432 }
4433 rcu_read_unlock();
4434 }
4435 }/* benk needs to be tested before ok to execute
4436 else if (6 == ipv4_hdr->version) {
4437 struct in6_addr *addr = (struct in6_addr *)&buf[2 * sizeof(u32)];
4438 if (ipv6_chk_prefix(addr, pm->net_dev)) {
4439 allow = true;
4440 }
4441 } */
4442 break;
4443 case CP_LKM_PM_FILTER_TYPE_IP_SRC_SUBNET_INCLUDE:
4444 if (4 == ipv4_hdr->version) {
4445 // ipv4
4446 allow = false;
4447 ipv4_src_addr = __be32_to_cpu(ipv4_hdr->saddr);
4448 if(ipv4_src_addr == 0){
4449 //DHCP rebind packets may have a src addr of 0.0.0.0 and we want to let those through.
4450 allow = true;
4451 }
4452 else if ((filter->subnet.ipv4_addr & filter->subnet.ipv4_mask) == (ipv4_src_addr & filter->subnet.ipv4_mask)) {
4453 allow = true;
4454 }
4455 }
4456
4457 default:
4458 break;
4459 }
4460
4461 if (allow) {
4462 break;
4463 }
4464 }
4465
4466 if (!allow) {
4467 DEBUG_WARN("%s() dropping packet - src:0x%x\n", __FUNCTION__, ipv4_src_addr);
4468 }
4469
4470 return allow;
4471}
4472/******************************* kernel module pm common functionality **********************************/
4473int cp_lkm_common_init(struct cp_lkm_pm_common *pmc)
4474{
4475 // allocate stats struct
4476 pmc->pcpu_stats64 = netdev_alloc_pcpu_stats(struct cp_lkm_pm_stats64);
4477 if (!pmc->pcpu_stats64) {
4478 return -ENOMEM;
4479 }
4480
4481
4482 pmc->pm_link_count = 0;
4483 spin_lock_init(&pmc->pm_link_lock);
4484 INIT_LIST_HEAD(&pmc->filter_list);
4485
4486 return 0;
4487}
4488
4489void cp_lkm_common_deinit(struct cp_lkm_pm_common *pmc)
4490{
4491 if (!pmc->pcpu_stats64) {
4492 return;
4493 }
4494 free_percpu(pmc->pcpu_stats64);
4495 pmc->pcpu_stats64 = NULL;
4496}
4497// The pm_link_lock is used to coordinate activity between xmit, poll, and link/unlink
4498// It is okay to poll and xmit at the same time, but we don't want to do either if we are linking or unlinking.
4499// link/unlink sets the pm_link_count negative to block both poll and xmit. If pm_link_count is not negative then
4500// both poll and xmit are free to grab the link at any time and at the same time.
4501//retval:
4502// 0 = you have the token, proceed
4503// -1 = you don't have the token, do not pass go
4504int cp_lkm_common_inc_link_lock(struct cp_lkm_pm_common* pmc)
4505{
4506 unsigned long flags;
4507 int retval = 0;
4508 spin_lock_irqsave(&pmc->pm_link_lock, flags);
4509 if(pmc->pm_link_count < 0) {
4510 retval = -1;
4511 }
4512 else{
4513 pmc->pm_link_count++;
4514 }
4515 spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
4516 return retval;
4517}
4518
4519int cp_lkm_common_dec_link_lock(struct cp_lkm_pm_common* pmc)
4520{
4521 unsigned long flags;
4522 int retval = 0;
4523 spin_lock_irqsave(&pmc->pm_link_lock, flags);
4524 if(pmc->pm_link_count > 0) {
4525 pmc->pm_link_count--;
4526 }
4527 else{
4528 //should never hit this
4529 retval = -1;
4530 }
4531 spin_unlock_irqrestore(&pmc->pm_link_lock, flags);
4532 return retval;
4533}
4534
4535/******************************* kernel module net PM functionality **********************************/
4536
4537// common structure for ethernet and IP protocol managers
4538struct cp_lkm_pm_net {
4539 struct cp_lkm_pm_common common;
4540 struct ethhdr eth_hdr;
4541
4542};
4543
Kyle Swenson9b510922023-06-27 09:22:55 -06004544#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
4545static struct rtnl_link_stats64 *cp_lkm_pm_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
4546#else
Harish Ambati2e2e7b32023-02-22 14:21:36 +00004547static void cp_lkm_pm_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Kyle Swenson9b510922023-06-27 09:22:55 -06004548#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07004549{
4550 struct cp_lkm_pm_net *pm_net;
4551 int i;
4552 struct cp_lkm_pm_stats64 *pstats;
4553
4554 pm_net = netdev_priv(netdev);
4555
4556 for_each_possible_cpu(i) {
4557 u64 rx_packets, rx_bytes, rx_errors, rx_dropped, rx_over_errors;
4558 u64 tx_packets, tx_bytes, tx_errors, tx_dropped;
4559 unsigned int start;
4560 pstats = per_cpu_ptr(pm_net->common.pcpu_stats64, i);
4561 do {
4562 start = u64_stats_fetch_begin_irq(&pstats->syncp);
4563 rx_packets = pstats->rx_packets;
4564 tx_packets = pstats->tx_packets;
4565 rx_bytes = pstats->rx_bytes;
4566 tx_bytes = pstats->tx_bytes;
4567 rx_errors = pstats->rx_errors;
4568 tx_errors = pstats->tx_errors;
4569 rx_dropped = pstats->rx_dropped;
4570 tx_dropped = pstats->tx_dropped;
4571 rx_over_errors = pstats->rx_over_errors;
4572 } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
4573
4574 stats->rx_packets += rx_packets;
4575 stats->tx_packets += tx_packets;
4576 stats->rx_bytes += rx_bytes;
4577 stats->tx_bytes += tx_bytes;
4578 stats->rx_errors += rx_errors;
4579 stats->tx_errors += tx_errors;
4580 stats->rx_dropped += rx_dropped;
4581 stats->tx_dropped += tx_dropped;
4582 stats->rx_over_errors += rx_over_errors;
4583 }
Kyle Swenson9b510922023-06-27 09:22:55 -06004584#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,100))
4585 return stats;
4586#endif
Kyle Swenson74ad7532023-02-16 11:05:29 -07004587}
4588
4589static int cp_lkm_pm_net_open(struct net_device *dev)
4590{
4591 struct cp_lkm_pm_net *pm_net;
4592
4593 DEBUG_TRACE("%s()", __FUNCTION__);
4594
4595 pm_net = netdev_priv(dev);
4596 netif_start_queue(dev);
4597
4598 // is this link up?
4599 return 0;
4600}
4601
4602static int cp_lkm_pm_net_close(struct net_device *dev)
4603{
4604 struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
4605 struct cp_lkm_msg_hdr hdr;
4606
4607 DEBUG_TRACE("%s()", __FUNCTION__);
4608
4609 // link change
4610 netif_stop_queue(dev);
4611
4612 // post message to indicate link down
4613 memset(&hdr,0,sizeof(hdr));
4614 hdr.instance_id = pm_net->common.unique_id;
4615 hdr.cmd = CP_LKM_PM_LINK_DOWN;
4616 hdr.status = CP_LKM_STATUS_OK;
4617 cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
4618 LOG("Link Down indicated - id:%d\n", hdr.instance_id);
4619
4620
4621 return 0;
4622}
4623
4624static int cp_lkm_pm_net_xmit(struct sk_buff *skb, struct net_device *dev)
4625{
4626 struct cp_lkm_pm_net *pm_net = netdev_priv(dev);
4627 bool filter_ok = true;
4628 int link_res;
4629
4630 //see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
4631 link_res = cp_lkm_common_inc_link_lock(&pm_net->common);
4632 if(link_res < 0) {
4633 dev_kfree_skb_any(skb);
4634 return NETDEV_TX_OK;
4635 }
4636
4637 if (!pm_net->common.edi) {
4638 // cannot do anything without edi
4639 dev_kfree_skb_any(skb);
4640 goto net_xmit_done;
4641 }
4642
4643 //DEBUG_INFO("%s() - %s len:%d", __FUNCTION__, pm_net->common.net_dev->name, skb->len);
4644 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_bytes, (skb->len - sizeof(struct ethhdr)));
4645 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_packets, 1);
4646 /* Drop packet if interface is not attached */
4647 if (0 == pm_net->common.attached)
4648 goto drop;
4649
4650 if (!pm_net->common.edi->usb_send) {
4651 goto drop;
4652 }
4653
4654 filter_ok = cp_lkm_pm_filter_ok(&pm_net->common, skb->data + sizeof(struct ethhdr), skb->len - sizeof(struct ethhdr));
4655 if (!filter_ok) {
4656 pm_net->common.filter_drop_cnt++;
4657 DEBUG_WARN("%s() filter dropped packet cnt:%u", __FUNCTION__, pm_net->common.filter_drop_cnt);
4658 goto drop;
4659 }
4660
4661 switch(pm_net->common.type) {
4662 case CP_LKM_PM_TYPE_IP_DHCP:
4663 case CP_LKM_PM_TYPE_IP_STATIC:
4664 skb_pull(skb, sizeof(struct ethhdr)); // strip off the ethernet header
4665 break;
4666 default:
4667 break;
4668 }
4669
4670 // send data to USB module
4671 pm_net->common.edi->usb_send(pm_net->common.edi->usb_send_ctx, skb);
4672 goto net_xmit_done;
4673
4674drop:
4675 DEBUG_INFO("%s() - dropped", __FUNCTION__);
4676 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, tx_dropped, 1);
4677 dev_kfree_skb_any(skb);
4678
4679net_xmit_done:
4680 cp_lkm_common_dec_link_lock(&pm_net->common);
4681 return NETDEV_TX_OK;
4682}
4683
4684
4685#if 0
4686static u8 cp_lkm_pm_test_find(u8* pkt, u32 pkt_len, u8* pattern, u32 pattern_len)
4687{
4688 s32 i;
4689 for(i = 0; i < (pkt_len - pattern_len); i++) {
4690 if (memcmp(&pkt[i],pattern,pattern_len) == 0) {
4691 return 1;
4692 }
4693 }
4694 return 0;
4695}
4696
4697static int cp_lkm_pm_test(struct sk_buff *skb)
4698{
4699static u8 first_pkt = 1;
4700static u8 started = 0;
4701static unsigned long total_data = 0;
4702static unsigned long start_time = 0;
4703static unsigned long stop_time = 0;
4704
4705static unsigned long invalid_pkts = 0;
4706static unsigned long total_pkts = 0;
4707
4708 int drop = 0;
4709 unsigned char *ptr = skb->data;
4710 u32 pkt_len = skb->len;
4711 u8 prot;
4712 //u8 type;
4713 u16 udp_len;
4714 u16 dst_port;
4715
4716 if (pkt_len < 20) {
4717 return 0;
4718 }
4719 //function is set up to parse IP pkts, may be called with ether framed pkts as well.
4720 //auto detect ether hdr and remove it
4721 if (ptr[0] != 0x45) {
4722 //ether header
4723 if(ptr[14] == 0x45){
4724 ptr+=14;
4725 pkt_len -= 14;
4726 }
4727 //vlan hdr
4728 else if (ptr[12] == 0x81 && ptr[18] == 0x45) {
4729 ptr+=18;
4730 pkt_len -=18;
4731 }
4732 }
4733
4734 if (ptr[0] != 0x45) {
4735 invalid_pkts++;
4736 }
4737
4738 //printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
4739 //printk("0x%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x len: %d \n",ptr[0],ptr[1],ptr[2],ptr[3],ptr[4],ptr[5],ptr[6],ptr[7],ptr[8],ptr[9],ptr[10],ptr[11],ptr[12],ptr[13],ptr[14],ptr[15],pkt_len);
4740 if (pkt_len >= 28) {
4741 prot = ptr[9];
4742 if (prot == 0x11) {
4743 ptr += 20; //skip ip header
4744 pkt_len -= 20;
4745 dst_port = ntohs(*((u16*)(&ptr[2])));
4746 udp_len = ntohs(*((u16*)(&ptr[4])));
4747 //printk("Got UDP pkt\n");
4748 if (started && dst_port == 5001) {
4749 drop = 1;
4750 if (first_pkt == 1) {
4751 first_pkt = 0;
4752 total_data = 0;
4753 start_time = jiffies;
4754 invalid_pkts = 0;
4755 total_pkts = 0;
4756 }
4757 total_data += (udp_len+34); //add ip and ether hdrs
4758 stop_time = jiffies;
4759 total_pkts++;
4760 }
4761 else if(dst_port == 5002) {
4762 drop = 1;
4763 ptr += 8; //skip udp header
4764 printk("SHIM START PORT len: %d data: 0x%x, start=%x, stop=%x\n",udp_len, ptr[0], start_time, stop_time);
4765 if(cp_lkm_pm_test_find(ptr, udp_len, "START", 5)){
4766 printk("Got IPERF START\n");
4767 first_pkt = 1;
4768 started = 1;
4769 cp_lkm_wrapper_start_debug();
4770 }
4771 else if (cp_lkm_pm_test_find(ptr, udp_len, "STOP", 4)) {
4772 u32 delta_time = (stop_time - start_time)*1000/HZ;
4773 u32 bits_per_sec = (total_data/delta_time)*8000; //in bytes per milisecond, need bits per second
4774 delta_time -= 2; //iperf has 2 second delay waiting for an ack we won't send
4775 started = 0;
4776 printk("Got IPERF STOP: Total data: %u, Total pkts: %u, Total invalid: %u, Total time: %u msec, BitsPerSec: %u\n",total_data, total_pkts, invalid_pkts, delta_time,bits_per_sec);
4777 cp_lkm_wrapper_stop_debug();
4778 }
4779 }
4780 }
4781 }
4782 return drop;
4783}
4784#endif
4785
4786// called in soft interrupt context - otherwise some protection around pm_net is required
4787//int num_ip_copies = 0;
4788//int num_eth_copies = 0;
4789//int num_pkts = 0;
4790//int num_iters = 0;
4791//int num_unaligned = 0;
4792static int cp_lkm_pm_net_recv(void *ctx, struct sk_buff *skb)
4793{
4794 struct cp_lkm_pm_net *pm_net;
4795 int err;
4796 int recv_bytes;
4797 struct sk_buff *skb_new;
4798 int align = 0; //set to 1 to always send 4 byte aligned IP pkts to network stack
4799 int pad = 20; //number of bytes to put on front of new skbs
4800
4801 //DEBUG_INFO("%s()", __FUNCTION__);
4802 if(NULL == ctx) {
4803 dev_kfree_skb_any(skb);
4804 return 0;
4805 }
4806
4807 //num_pkts++;
4808 //num_iters++;
4809 pm_net = (struct cp_lkm_pm_net *)ctx;
4810
4811 //printk("%s() pm_net: %p\n", __FUNCTION__, pm_net);
4812
4813
4814 skb->dev = pm_net->common.net_dev;
4815
4816 switch(pm_net->common.type) {
4817 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
4818 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
4819 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
4820 //this strips the ether header off the packet
4821 skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
4822 //Need IP hdr aligned for IP stack to avoid unaligned access interrupts
4823 if(align && ((uintptr_t)(skb->data) & 0x3)) {
4824 //num_eth_copies++;
4825 skb_new = skb_copy_expand(skb, pad, 0, GFP_ATOMIC);
4826 dev_kfree_skb_any(skb);
4827 skb=skb_new;
4828 }
4829 if (!skb) {
4830 // packet dropped
4831 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
4832 return -ENOMEM;
4833 }
4834 break;
4835
4836 case CP_LKM_PM_TYPE_IP_DHCP:
4837 case CP_LKM_PM_TYPE_IP_STATIC:
4838 // Need to add ether header first for processing, then remove it. Need IP hdr aligned when done.
4839 //
4840 // Note: avoid the temptation to skip adding the ether header and doing manually what the call
4841 // to eth_type_trans() does. We did that and it bit us (see Jira issue FW-16149)
4842 // The kernel expects the ether header to be present in the skb buff even though the data ptr
4843 // has been moved past it. Also, if the skb has been cloned, then we are dealing with an
4844 // aggregated modem protocol (multiple pkts per skb), so we have to make a copy to guarantee
4845 // our tmp ether header isn't written into the data space of the previous pkt from the set.
4846 //
4847 if((align && ((uintptr_t)(skb->data) & 0x3)) || (skb_headroom(skb) < ETH_HLEN) || skb_cloned(skb)){
4848 //printk("copy: align: %d, head: %d, cloned: %d, len: %d\n", ((uintptr_t)(skb->data) & 0x3), skb_headroom(skb), skb_cloned(skb), skb->len);
4849 //num_ip_copies++;
4850 skb_new = skb_copy_expand(skb, 16+pad, 0, GFP_ATOMIC);
4851 dev_kfree_skb_any(skb);
4852 skb=skb_new;
4853 }
4854
4855 if (!skb) {
4856 // packet dropped
4857 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_dropped, 1);
4858 return -ENOMEM;
4859 }
4860
4861 if (0x60 == (skb->data[0] & 0xF0)) { //mask off version bits of first byte of IP packet to check for ip version
4862 // set the hdr protocol type to IPV6
4863 pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IPV6);
4864 } else {
4865 // probably ipv4, but not explicitly checking
4866 // set the hdr protocol type to IPV4
4867 pm_net->eth_hdr.h_proto = __constant_htons(ETH_P_IP);
4868 }
4869 memcpy(skb_push(skb, sizeof(struct ethhdr)), (unsigned char *)&pm_net->eth_hdr, sizeof(struct ethhdr));
4870 //this strips the ether hdr off the packet
4871 skb->protocol = eth_type_trans(skb, pm_net->common.net_dev);
4872 break;
4873
4874 default:
4875 DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, pm_net->common.type);
4876 // packet dropped
4877 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_errors, 1);
4878 kfree(skb);
4879 return NET_RX_DROP;
4880 }
4881
4882 recv_bytes = skb->len;
4883
4884 //if (cp_lkm_pm_test(skb) == 1) {
4885 // dev_kfree_skb_any(skb);
4886 // return NET_RX_SUCCESS;
4887 //}
4888
4889 //if((int)(skb->data) & 0x3){
4890 //printk("Unaligned IP pkt!!!!!!!!!!!!\n");
4891 //num_unaligned++;
4892 //}
4893
4894
4895 //if(num_iters >= 10000) {
4896 // num_iters = 0;
4897 // printk("num_ip_copies: %d, num_eth_copies: %d, num_unaligned: %d, num_pkts: %d\n",num_ip_copies,num_eth_copies,num_unaligned,num_pkts);
4898 //}
4899
4900 netif_rx(skb);
4901 err = NET_RX_SUCCESS;
4902
4903 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_packets, 1);
4904 UPDATE_STATS(pm_net->common.edi->pm_stats64_ctx, rx_bytes, recv_bytes);
4905
4906 return 0;
4907}
4908
4909
4910static void cp_lkm_pm_net_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
4911{
4912 struct cp_lkm_pm_net *pm_net;
4913 int pad;
4914 int tmp_size;
4915 int pm_hdr = ETH_HLEN;
4916 int pm_extra = 6;
4917
4918 *hdr_size = 0;
4919 *hdr_offset = 0;
4920
4921 pm_net = (struct cp_lkm_pm_net *)ctx;
4922 if(!pm_net) {
4923 return;
4924 }
4925 //temp return here
4926 //return;
4927
4928 //calculate how much header space there is before the IP hdr.
4929 //this is needed to align the IP hdr properly for optimal performance
4930 switch(pm_net->common.type) {
4931 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
4932 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
4933 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
4934 //pkts will need room for the wrapper header and the ether hdr.
4935 //both headers will be present at the same time.
4936 tmp_size = wrapper_hdr_size + pm_hdr + pm_extra;
4937 pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
4938 *hdr_size = tmp_size + pad;
4939 *hdr_offset = pad+pm_extra;
4940 break;
4941
4942 case CP_LKM_PM_TYPE_IP_DHCP:
4943 case CP_LKM_PM_TYPE_IP_STATIC:
4944 //pkts will need room for the wrapper header or the ether hdr
4945 //both headers won't be present at the same time. The wrapper is present
4946 //up through the USB side of the shim. We (the pm) add a temp ether header
4947 //for processing after the wrapper header is removed
4948 tmp_size = max(wrapper_hdr_size, pm_hdr+pm_extra);
4949 pad = ((~tmp_size)+1)&0x3; //calculate padding needed for 4 byte boundary on alloc
4950 *hdr_size = tmp_size + pad;
4951 *hdr_offset = *hdr_size - wrapper_hdr_size;
4952 break;
4953 default:
4954 break;
4955 }
4956}
4957
4958
4959static u32 cp_lkm_pm_net_get_link(struct net_device *dev)
4960{
4961 struct cp_lkm_pm_net *pm_net;
4962
4963 DEBUG_TRACE("%s()", __FUNCTION__);
4964 pm_net = netdev_priv(dev);
4965 if(!pm_net) {
4966 return 0;
4967 }
4968 return pm_net->common.attached;
4969}
4970
4971
4972#ifndef KERNEL_2_6_21
4973static const struct net_device_ops cp_lkm_pm_net_device_ops = {
4974 .ndo_open = cp_lkm_pm_net_open,
4975 .ndo_start_xmit = cp_lkm_pm_net_xmit,
4976 .ndo_stop = cp_lkm_pm_net_close,
4977 .ndo_get_stats64 = cp_lkm_pm_get_stats64
4978};
4979#endif
4980
4981static const struct ethtool_ops cp_lkm_pm_net_ethtool_ops = {
4982 .get_link = cp_lkm_pm_net_get_link,
4983};
4984
4985static void cp_lkm_pm_net_setup(struct net_device *net_dev)
4986{
4987 struct cp_lkm_pm_net *pm_net;
4988
4989 DEBUG_INFO("%s()", __FUNCTION__);
4990 pm_net = netdev_priv(net_dev);
4991 ether_setup(net_dev);
4992
4993#ifdef KERNEL_2_6_21
4994 net_dev->open = cp_lkm_pm_net_open;
4995 net_dev->hard_start_xmit = cp_lkm_pm_net_xmit;
4996 net_dev->stop = cp_lkm_pm_net_close;
4997#else
4998 net_dev->netdev_ops = &cp_lkm_pm_net_device_ops;
4999 net_dev->needed_headroom = 48;
5000 net_dev->needed_tailroom = 8;
5001#endif
5002
5003 net_dev->ethtool_ops = &cp_lkm_pm_net_ethtool_ops;
5004
5005}
5006
5007static int cp_lkm_pm_net_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name, unsigned char *mac)
5008{
5009 int err;
5010 struct cp_lkm_pm_net *pm_net;
5011 struct net_device *net_dev;
5012 unsigned long flags;
5013#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17,0)
5014 net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, NET_NAME_UNKNOWN, cp_lkm_pm_net_setup);
5015#else
5016 net_dev = alloc_netdev(sizeof(struct cp_lkm_pm_net), name, cp_lkm_pm_net_setup);
5017#endif
5018 if (!net_dev) {
5019 DEBUG_INFO("%s() alloc failed: %s", __FUNCTION__, name);
5020 return -ENOMEM;
5021 }
5022
5023 pm_net= netdev_priv(net_dev);
5024
5025 err = cp_lkm_common_init(&pm_net->common);
5026 if (err) {
5027 free_netdev(net_dev);
5028 return err;
5029 }
5030
5031 pm_net->common.net_dev = net_dev;
5032 pm_net->common.unique_id = uid;
5033 pm_net->common.type = type;
5034 pm_net->common.edi = NULL;
5035
5036 //printk("%s(%p) pm-uid: %d, pm_net: %p\n", __FUNCTION__, mgr, uid, pm_net);
5037
5038 switch (type) {
5039 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5040 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5041 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5042 if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN)) {
5043 random_ether_addr(net_dev->dev_addr);
5044 } else {
5045 memcpy (net_dev->dev_addr, mac, ETH_ALEN);
5046 }
5047
5048 /////////////////////////Need to only do if driver says so.
5049 if (type == CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP) {
5050 net_dev->flags |= IFF_NOARP;
5051 }
5052 break;
5053 case CP_LKM_PM_TYPE_IP_DHCP:
5054 case CP_LKM_PM_TYPE_IP_STATIC:
5055 // random addr for DHCP functionality
5056 if(!memcmp(mac, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) || !memcmp(mac, "\x00\x30\x44\x00\x00\x00", ETH_ALEN)) {
5057 random_ether_addr(net_dev->dev_addr);
5058 } else {
5059 memcpy (net_dev->dev_addr, mac, ETH_ALEN);
5060 }
5061
5062 net_dev->flags |= IFF_NOARP;
5063 memcpy(pm_net->eth_hdr.h_dest, net_dev->dev_addr, ETH_ALEN);
5064 random_ether_addr(pm_net->eth_hdr.h_source);
5065 break;
5066 default:
5067 DEBUG_INFO("%s() invalid protocol type: %d", __FUNCTION__, type);
5068 cp_lkm_common_deinit(&pm_net->common);
5069 free_netdev(net_dev);
5070 return -EINVAL;
5071 }
5072
5073 DEBUG_INFO("%s register netdev", __FUNCTION__);
5074 err = register_netdev(net_dev);
5075 if (err < 0) {
5076 DEBUG_INFO("%s netdev registration error", __FUNCTION__);
5077 cp_lkm_common_deinit(&pm_net->common);
5078 free_netdev(net_dev);
5079 return err;
5080 }
5081
5082 netif_device_attach(pm_net->common.net_dev);
5083
5084 netif_stop_queue(pm_net->common.net_dev);
5085
5086 pm_net->common.attached = 1;
5087
5088 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5089 list_add(&pm_net->common.list, &mgr->pm_list);
5090 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5091
5092 return 0;
5093}
5094
5095static int cp_lkm_pm_net_detach(struct cp_lkm_pm_ctx *mgr, int uid)
5096{
5097
5098 // find the object in the list
5099 struct list_head *pos;
5100 struct cp_lkm_pm_common *pm = NULL;
5101 unsigned long flags;
5102
5103 DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
5104
5105 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5106 list_for_each(pos, &mgr->pm_list){
5107 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5108 if(pm_tmp->unique_id == uid) {
5109 pm = pm_tmp;
5110 break;
5111 }
5112 }
5113
5114 if (!pm) {
5115 // already detached
5116 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5117 DEBUG_INFO("%s() already detached", __FUNCTION__);
5118 return 0;
5119 }
5120
5121 // remove the object
5122 list_del(&pm->list);
5123 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5124
5125 if (pm->attached) {
5126 DEBUG_INFO("%s() detaching", __FUNCTION__);
5127 netif_device_detach(pm->net_dev);
5128 pm->attached = 0;
5129 }
5130
5131 unregister_netdev(pm->net_dev);
5132
5133 // clean the filter list
5134 cp_lkm_pm_filter_empty_list(pm);
5135
5136 cp_lkm_common_deinit(pm);
5137 free_netdev(pm->net_dev); // this also frees the pm since it was allocated as part of the net_dev
5138
5139 return 0;
5140}
5141
5142static int cp_lkm_pm_net_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
5143{
5144 // find the object in the list
5145 struct list_head *pos;
5146 struct cp_lkm_pm_common *pm = NULL;
5147 unsigned long flags;
5148 //printk("%s(%p) activate: %d\n", __FUNCTION__, mgr, activate);
5149
5150 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5151 list_for_each(pos, &mgr->pm_list){
5152 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5153 if(pm_tmp->unique_id == uid) {
5154 pm = pm_tmp;
5155 break;
5156 }
5157 }
5158
5159 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5160
5161 if (!pm) {
5162 // couldn't find object - already unplugged
5163 DEBUG_INFO("%s() already unplugged", __FUNCTION__);
5164 return 0;
5165 }
5166
5167 if (activate) {
5168 //netif_start_queue(pm->net_dev);
5169 if (pm->edi) {
5170 pm->edi->pm_recv_ctx = pm;
5171 }
5172 netif_wake_queue(pm->net_dev);
5173 } else {
5174 netif_stop_queue(pm->net_dev);
5175 if (pm->edi) {
5176 pm->edi->pm_recv_ctx = NULL;
5177 //printk("pm_recv_ctx null\n");
5178 }
5179
5180 // remove the filters - will be added back in before activate
5181 cp_lkm_pm_filter_empty_list(pm);
5182 }
5183
5184 return 0;
5185}
5186
5187int cp_lkm_pm_net_pause(void *ctx)
5188{
5189 struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
5190 if(!ctx) {
5191 return 0;
5192 }
5193 netif_stop_queue(pm->net_dev);
5194 return 0;
5195
5196}
5197int cp_lkm_pm_net_resume(void *ctx)
5198{
5199 struct cp_lkm_pm_common* pm = (struct cp_lkm_pm_common *)ctx;
5200 if(!ctx) {
5201 return 0;
5202 }
5203 //netif_start_queue(pm->net_dev);
5204 netif_wake_queue(pm->net_dev);
5205 return 0;
5206}
5207
5208
5209/******************************* kernel module PPP/tty PM functionality **********************************/
5210struct cp_lkm_pm_ppp {
5211 struct cp_lkm_pm_common common;
5212 u8 *no_carrier_ptr;
5213 bool in_frame;
5214
5215 struct tty_struct *tty; // pointer to the tty for this device
5216 int minor;
5217 int open_count;
5218};
5219
5220#define CP_TTY_MINORS 10
5221#define CP_TTY_DEVICE_NAME "ttyCP"
5222
5223#define PPP_MGR_NO_CARRIER "NO CARRIER"
5224#define PPP_FLAG 0x7E
5225
5226static struct cp_lkm_pm_ppp *cp_lkm_pm_ppp_table[CP_TTY_MINORS];
5227static struct tty_driver *cp_lkm_pm_tty_driver = NULL;
5228static struct tty_port cp_lkm_pm_tty_port[CP_TTY_MINORS];
5229
5230static void cp_lkm_pm_ppp_finalize(void *arg)
5231{
5232 struct cp_lkm_pm_ppp *pm_ppp = (struct cp_lkm_pm_ppp *)arg;
5233 tty_unregister_device(cp_lkm_pm_tty_driver, pm_ppp->minor);
5234 cp_lkm_pm_ppp_table[pm_ppp->minor] = NULL;
5235 if (pm_ppp->common.edi) {
5236 pm_ppp->common.edi = NULL;
5237 }
5238 // clean the filter list
5239 cp_lkm_pm_filter_empty_list(&pm_ppp->common);
5240}
5241
5242static int cp_lkm_pm_ppp_attach(struct cp_lkm_pm_ctx *mgr, cp_lkm_pm_type_t type, int uid, char *name)
5243{
5244 int minor;
5245 int err;
5246 unsigned long flags;
5247 struct cp_lkm_pm_ppp *pm_ppp;
5248
5249 DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
5250
5251 //printk("%s() uid: %d, type: %d\n", __FUNCTION__, uid, type);
5252
5253 // find an empty minor device slot and register
5254 for (minor = 0; minor < CP_TTY_MINORS && cp_lkm_pm_ppp_table[minor]; minor++);
5255
5256 if (minor == CP_TTY_MINORS) {
5257 DEBUG_WARN("%s(%p) - out of devices", __FUNCTION__, mgr);
5258 return -ENODEV;
5259 }
5260
5261 if (!(pm_ppp = memref_alloc_and_zero(sizeof(struct cp_lkm_pm_ppp), cp_lkm_pm_ppp_finalize))) {
5262 DEBUG_WARN("%s(%p) - no memory", __FUNCTION__, mgr);
5263 return -ENOMEM;
5264 }
5265
5266 err = cp_lkm_common_init(&pm_ppp->common);
5267 if (err) {
5268 return -ENOMEM;
5269 }
5270 pm_ppp->common.type = type;
5271 pm_ppp->common.unique_id = uid;
5272
5273 pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
5274
5275 pm_ppp->minor = minor;
5276
5277 cp_lkm_pm_ppp_table[minor] = pm_ppp;
5278 sprintf(name, "%s%d", CP_TTY_DEVICE_NAME, minor);
5279
5280 //printk("%s(%p) attached\n", __FUNCTION__, &pm_ppp->common);
5281 pm_ppp->common.attached = 1;
5282 pm_ppp->open_count = 0;
5283
5284 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5285 list_add(&pm_ppp->common.list, &mgr->pm_list);
5286 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5287
5288 tty_port_register_device(&cp_lkm_pm_tty_port[minor], cp_lkm_pm_tty_driver, minor, NULL);
5289
5290 return 0;
5291}
5292
5293static int cp_lkm_pm_ppp_detach(struct cp_lkm_pm_ctx *mgr, int uid)
5294{
5295
5296 // find the object in the list
5297 struct list_head *pos;
5298 struct cp_lkm_pm_common *pm = NULL;
5299 struct cp_lkm_pm_ppp *pm_ppp;
5300 unsigned long flags;
5301
5302 DEBUG_INFO("%s(%p)", __FUNCTION__, mgr);
5303 //printk("%s() uid: %d\n", __FUNCTION__, uid);
5304
5305 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5306 list_for_each(pos, &mgr->pm_list){
5307 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5308 if(pm_tmp->unique_id == uid) {
5309 pm = pm_tmp;
5310 break;
5311 }
5312 }
5313
5314 if (!pm) {
5315 // already detached
5316 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5317 DEBUG_INFO("%s() already detached", __FUNCTION__);
5318 return 0;
5319 }
5320
5321 // remove the object
5322 list_del(&pm->list);
5323
5324 pm_ppp = (struct cp_lkm_pm_ppp *)pm;
5325
5326 //printk("%s() !attached\n", __FUNCTION__);
5327 pm->attached = 0;
5328
5329 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5330
5331 // clean the filter list
5332 cp_lkm_pm_filter_empty_list(pm);
5333
5334 cp_lkm_common_deinit(pm);
5335
5336 memref_deref(pm_ppp);
5337
5338 return 0;
5339}
5340
5341static int cp_lkm_pm_ppp_activate(struct cp_lkm_pm_ctx *mgr, int uid, bool activate)
5342{
5343 // find the object in the list
5344 struct list_head *pos;
5345 struct cp_lkm_pm_common *pm = NULL;
5346 unsigned long flags;
5347
5348 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5349 list_for_each(pos, &mgr->pm_list){
5350 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5351 if(pm_tmp->unique_id == uid) {
5352 pm = pm_tmp;
5353 break;
5354 }
5355 }
5356
5357 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5358
5359 if (!pm) {
5360 // already detached
5361 DEBUG_INFO("%s() already detached", __FUNCTION__);
5362 return 0;
5363 }
5364 //printk("%s(%p) activate: %d, attached: %d\n", __FUNCTION__, pm, activate, pm->attached);
5365
5366 if (activate) {
5367 if (pm->edi) {
5368 pm->edi->pm_recv_ctx = pm;
5369 }
5370 } else {
5371 if (pm->edi) {
5372 pm->edi->pm_recv_ctx = NULL;
5373 //printk("pm_recv_ctx null\n");
5374 }
5375 // clean the filter list
5376 cp_lkm_pm_filter_empty_list(pm);
5377 }
5378
5379 return 0;
5380}
5381
5382
5383static int cp_lkm_pm_tty_open(struct tty_struct * tty, struct file * filp)
5384{
5385 struct cp_lkm_pm_ppp *pm_ppp;
5386 int index;
5387 unsigned long flags;
5388
5389 DEBUG_INFO("%s()", __FUNCTION__);
5390
5391 index = tty->index;
5392
5393 // get the pm_ppp associated with this tty pointer
5394 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5395 pm_ppp = cp_lkm_pm_ppp_table[index];
5396 if (!pm_ppp /*|| tty->driver_data */|| !pm_ppp->common.attached) {
5397 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5398 return -EINVAL;
5399 }
5400
5401 if (pm_ppp->open_count++) {
5402 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5403 return 0;
5404 }
5405
5406 memref_ref(pm_ppp);
5407 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5408
5409 // save our structure within the tty structure
5410 tty->driver_data = pm_ppp;
5411 pm_ppp->tty = tty;
5412
5413 // XXX 3.10 hack
5414 //tty->low_latency = 0;
5415
5416 return 0;
5417}
5418
5419static void cp_lkm_pm_tty_close(struct tty_struct * tty, struct file * filp)
5420{
5421 struct cp_lkm_pm_ppp *pm_ppp;
5422 unsigned long flags;
5423
5424 DEBUG_INFO("%s()", __FUNCTION__);
5425
5426 pm_ppp = tty->driver_data;
5427 if(!pm_ppp) {
5428 return;
5429 }
5430
5431 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5432 if (--pm_ppp->open_count) {
5433 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5434 return;
5435 }
5436 tty->driver_data = NULL;
5437 pm_ppp->tty = NULL;
5438 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5439 memref_deref(pm_ppp);
5440}
5441static bool cp_lkm_pm_ppp_check_match(struct cp_lkm_pm_ppp *pm_ppp, u8 ch)
5442{
5443 if (*(pm_ppp->no_carrier_ptr) == ch) {
5444 // character match - advance to next character
5445 pm_ppp->no_carrier_ptr++;
5446 if (! *(pm_ppp->no_carrier_ptr)) {
5447 // end of no carrier string - found oob no carrier
5448 return true;
5449 }
5450 return false;
5451 }
5452 // characters don't match
5453 if (pm_ppp->no_carrier_ptr != (u8 *)PPP_MGR_NO_CARRIER) {
5454 // characters don't match - start over
5455 pm_ppp->no_carrier_ptr = (u8 *)PPP_MGR_NO_CARRIER;
5456 // check not matching character against first character of no carrier - 1 level of recursion
5457 return cp_lkm_pm_ppp_check_match(pm_ppp, ch);
5458 }
5459
5460 return false;
5461}
5462
5463static bool cp_lkm_pm_ppp_is_no_carrier(struct cp_lkm_pm_ppp *pm_ppp, struct sk_buff *skb)
5464{
5465 // search thru skb for data between frame markers for NO CARRIER
5466 bool no_carrier = false;
5467 unsigned int len = skb->len;
5468 u8 *pos = skb->data;
5469
5470 DEBUG_TRACE("%s()", __FUNCTION__);
5471
5472 while (len--) {
5473 if (PPP_FLAG == (*pos)) {
5474 pm_ppp->in_frame = !pm_ppp->in_frame;
5475 } else if (!pm_ppp->in_frame) {
5476 // look for match
5477 no_carrier = cp_lkm_pm_ppp_check_match(pm_ppp, *pos);
5478 if (no_carrier) {
5479 DEBUG_INFO("%s() found no carrier", __FUNCTION__);
5480 return true;
5481 }
5482 } else {
5483 pm_ppp->no_carrier_ptr = PPP_MGR_NO_CARRIER;
5484 }
5485
5486 pos++;
5487 }
5488
5489 return false;
5490}
5491
5492static void cp_lkm_pm_ppp_get_hdr_size(void *ctx, int wrapper_hdr_size, int* hdr_size, int* hdr_offset)
5493{
5494 *hdr_size = 0;
5495 *hdr_offset = 0;
5496}
5497
5498// called in soft interrupt context
5499static int cp_lkm_pm_ppp_recv(void *ctx, struct sk_buff *skb)
5500{
5501#ifdef KERNEL_2_6_21
5502 int size;
5503#endif
5504 struct cp_lkm_pm_ppp *pm_ppp;
5505 bool oob_no_carrier;
5506
5507 if(NULL == ctx || !skb->len) {
5508 DEBUG_INFO("%s() - null ctx - dropped", __FUNCTION__);
5509 goto done;
5510 }
5511
5512 pm_ppp = (struct cp_lkm_pm_ppp *)ctx;
5513
5514 if (!pm_ppp) {
5515 DEBUG_INFO("%s() - NULL pm_ppp - dropped", __FUNCTION__);
5516 goto done;
5517 }
5518
5519 // check for OOB NO CARRIER - signal up through file descriptor
5520 oob_no_carrier = cp_lkm_pm_ppp_is_no_carrier(pm_ppp, skb);
5521 if (oob_no_carrier) {
5522 struct cp_lkm_msg_hdr hdr;
5523
5524 DEBUG_INFO("%s() - posting no carrier", __FUNCTION__);
5525 memset(&hdr,0,sizeof(hdr));
5526 hdr.instance_id = pm_ppp->common.unique_id;
5527 hdr.cmd = CP_LKM_PM_LINK_DOWN;
5528 hdr.status = CP_LKM_STATUS_OK;
5529 hdr.len = 0;
5530
5531 LOG("Received NO CARRIER\n");
5532 DEBUG_INFO("%s() - posting link down", __FUNCTION__);
5533 cp_lkm_post_message(&cp_lkm_pm_mgr.common, &hdr, NULL);
5534
5535 goto done;
5536 }
5537
5538 if (!pm_ppp->tty || !pm_ppp->tty->driver_data) {
5539 DEBUG_INFO("%s() - not setup - dropped", __FUNCTION__);
5540 goto done;
5541 }
5542
5543#ifdef KERNEL_2_6_21
5544 size = tty_buffer_request_room(pm_ppp->tty, skb->len);
5545 if(size < skb->len) {
5546 // dropped data - or we need to queue for later
5547 DEBUG_WARN("%s() - dropping network data", __FUNCTION__);
5548 goto done;
5549 }
5550#endif
5551
5552 tty_insert_flip_string(pm_ppp->tty->port, skb->data, skb->len);
5553 tty_flip_buffer_push(pm_ppp->tty->port);
5554
5555done:
5556 dev_kfree_skb_any(skb);
5557 return 0;
5558}
5559
5560// this can be called from interrupt thread or normal kernel thread
5561static int cp_lkm_pm_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
5562{
5563 struct cp_lkm_pm_ppp *pm_ppp;
5564 struct sk_buff *skb;
5565 int link_res;
5566 int retval = count;
5567
5568 if (!count) {
5569 //printk("%s() !count \n", __FUNCTION__);
5570 return 0;
5571 }
5572
5573 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5574
5575 if (!pm_ppp) {
5576 //printk("%s() !pm_ppp \n", __FUNCTION__);
5577 return -EINVAL;
5578 }
5579
5580 //printk("%s(%p) id:%d, attached: %d\n", __FUNCTION__, &pm_ppp->common, pm_ppp->common.unique_id, pm_ppp->common.attached);
5581
5582 //see if we can grab the link lock, if not, we are either bringing up or taking down the link between USB and PM, so not safe to proceed
5583 link_res = cp_lkm_common_inc_link_lock(&pm_ppp->common);
5584 if(link_res < 0) {
5585 //printk("%s() !link \n", __FUNCTION__);
5586 return 0;
5587 }
5588
5589 /* Drop packet if interface is not attached */
5590 if (!pm_ppp->common.attached){
5591 retval = 0;
5592 //printk("%s() !attached: %d \n", __FUNCTION__, pm_ppp->common.attached);
5593 goto drop;
5594 }
5595
5596 if (!(pm_ppp->common.edi) || !(pm_ppp->common.edi->usb_send) || !(pm_ppp->common.edi->usb_send_ctx)) {
5597 retval = 0;
5598 //printk("%s() !edi \n", __FUNCTION__);
5599 goto drop;
5600 }
5601
5602 //benk check for enabled filter - send in buffer pointer to ip header
5603
5604 // alloc skb to send
5605 if ((skb = alloc_skb (count, GFP_ATOMIC)) == NULL) {
5606 retval = -ENOMEM;
5607 goto pm_tty_write_done;
5608 }
5609
5610 memcpy(skb->data, buf, count);
5611 skb->len = count;
5612 skb_set_tail_pointer(skb, skb->len);
5613
5614 // send data to USB module
5615 pm_ppp->common.edi->usb_send(pm_ppp->common.edi->usb_send_ctx, skb);
5616 retval = count;
5617 goto pm_tty_write_done;
5618
5619drop:
5620pm_tty_write_done:
5621 cp_lkm_common_dec_link_lock(&pm_ppp->common);
5622 //printk("%s() done\n", __FUNCTION__);
5623
5624 return retval;
5625}
5626
5627static int cp_lkm_pm_tty_write_room(struct tty_struct *tty)
5628{
5629 struct cp_lkm_pm_ppp *pm_ppp;
5630
5631 DEBUG_INFO("%s()", __FUNCTION__);
5632
5633 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5634
5635 if (!pm_ppp) {
5636 return -EINVAL;
5637 }
5638
5639 return 2048;
5640}
5641
5642static int cp_lkm_pm_tty_chars_in_buffer(struct tty_struct *tty)
5643{
5644 struct cp_lkm_pm_ppp *pm_ppp;
5645
5646 DEBUG_INFO("%s()", __FUNCTION__);
5647
5648 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5649
5650 if (!pm_ppp) {
5651 return -EINVAL;
5652 }
5653
5654 return 0;
5655}
5656
5657static void cp_lkm_pm_tty_set_termios(struct tty_struct *tty, struct ktermios * old)
5658{
5659 DEBUG_INFO("%s()", __FUNCTION__);
5660
5661}
5662
5663#ifdef KERNEL_2_6_21
5664static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg)
5665#else
5666static int cp_lkm_pm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
5667#endif
5668{
5669 struct cp_lkm_pm_ppp *pm_ppp;
5670
5671 DEBUG_TRACE("%s(%x)", __FUNCTION__, cmd);
5672
5673 pm_ppp = (struct cp_lkm_pm_ppp *)tty->driver_data;
5674
5675 if (!pm_ppp) {
5676 return -EINVAL;
5677 }
5678
5679 return -ENOIOCTLCMD;
5680}
5681
5682static struct tty_operations cp_lkm_pm_tty_ops = {
5683.open = cp_lkm_pm_tty_open,
5684.close = cp_lkm_pm_tty_close,
5685.write = cp_lkm_pm_tty_write,
5686.write_room = cp_lkm_pm_tty_write_room,
5687.chars_in_buffer = cp_lkm_pm_tty_chars_in_buffer,
5688.set_termios = cp_lkm_pm_tty_set_termios,
5689.ioctl = cp_lkm_pm_tty_ioctl
5690
5691/*
5692.throttle = acm_tty_throttle,
5693.unthrottle = acm_tty_unthrottle,
5694*/
5695};
5696
5697static int cp_lkm_pm_tty_init(void)
5698{
5699 int retval;
5700 int i;
5701
5702 for(i = 0; i < CP_TTY_MINORS; i++) {
5703 tty_port_init(&cp_lkm_pm_tty_port[i]);
5704 }
5705
5706 cp_lkm_pm_tty_driver = alloc_tty_driver(CP_TTY_MINORS);
5707 if (!cp_lkm_pm_tty_driver) {
5708 return -ENOMEM;
5709 }
5710
5711 // initialize the tty driver
5712 cp_lkm_pm_tty_driver->owner = THIS_MODULE;
5713 cp_lkm_pm_tty_driver->driver_name = "cptty";
5714 cp_lkm_pm_tty_driver->name = CP_TTY_DEVICE_NAME;
5715 cp_lkm_pm_tty_driver->major = 0; // dynamically assign major number
5716 cp_lkm_pm_tty_driver->minor_start = 0,
5717 cp_lkm_pm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
5718 cp_lkm_pm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
5719 cp_lkm_pm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
5720 cp_lkm_pm_tty_driver->init_termios = tty_std_termios;
5721 tty_set_operations(cp_lkm_pm_tty_driver, &cp_lkm_pm_tty_ops);
5722
5723 retval = tty_register_driver(cp_lkm_pm_tty_driver);
5724 if (retval) {
5725 DEBUG_ERROR("%s() failed to register cp tty driver", __FUNCTION__);
5726 put_tty_driver(cp_lkm_pm_tty_driver);
5727 for(i = 0; i < CP_TTY_MINORS; i++) {
5728 tty_port_destroy(&cp_lkm_pm_tty_port[i]);
5729 }
5730 }
5731 return retval;
5732
5733}
5734
5735static void cp_lkm_pm_tty_cleanup(void)
5736{
5737 int i;
5738 if (cp_lkm_pm_tty_driver) {
5739 tty_unregister_driver(cp_lkm_pm_tty_driver);
5740 put_tty_driver(cp_lkm_pm_tty_driver);
5741 for(i = 0; i < CP_TTY_MINORS; i++) {
5742 tty_port_destroy(&cp_lkm_pm_tty_port[i]);
5743 }
5744 cp_lkm_pm_tty_driver = NULL;
5745 }
5746}
5747
5748/******************************* kernel module PM mgr functionality **********************************/
5749
5750
5751static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx);
5752static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx);
5753static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb);
5754static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp);
5755
5756
5757static int cp_lkm_pm_init(void)
5758{
5759 DEBUG_INFO("%s()", __FUNCTION__);
5760
5761 memset(&cp_lkm_pm_mgr, 0x00, sizeof(struct cp_lkm_pm_ctx));
5762 cp_lkm_pm_mgr.common.open = cp_lkm_pm_open;
5763 cp_lkm_pm_mgr.common.close = cp_lkm_pm_close;
5764 cp_lkm_pm_mgr.common.handle_msg = cp_lkm_pm_handle_msg;
5765 cp_lkm_pm_mgr.common.handle_ioctl = cp_lkm_pm_handle_ioctl;
5766 INIT_LIST_HEAD(&cp_lkm_pm_mgr.pm_list);
5767 spin_lock_init(&cp_lkm_pm_mgr.pm_list_lock);
5768
5769 cp_lkm_common_ctx_init(&cp_lkm_pm_mgr.common);
5770
5771 return 0;
5772}
5773
5774static int cp_lkm_pm_cleanup(void)
5775{
5776 struct cp_lkm_pm_common *pmi;
5777 struct list_head *entry, *tmp;
5778 unsigned long flags;
5779
5780 DEBUG_INFO("%s()", __FUNCTION__);
5781
5782 // clean up msg list
5783 cp_lkm_cleanup_msg_list(&cp_lkm_pm_mgr.common);
5784
5785 // cleanup any PM in list
5786 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5787
5788 list_for_each_safe(entry, tmp, &cp_lkm_pm_mgr.pm_list) {
5789 pmi = list_entry(entry, struct cp_lkm_pm_common, list);
5790 if (pmi->edi) {
5791 pmi->edi->pm_recv_ctx = NULL;
5792 //printk("pm_recv_ctx null\n");
5793 pmi->edi->pm_stats64_ctx = NULL;
5794 pmi->edi = NULL;
5795 }
5796 list_del(&pmi->list);
5797 // clean the filter list
5798 cp_lkm_pm_filter_empty_list(pmi);
5799
5800 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5801 if (pmi->net_dev) {
5802 // network device
5803 cp_lkm_common_deinit(pmi);
5804 unregister_netdev(pmi->net_dev);
5805 free_netdev(pmi->net_dev); // this also frees the pmi since it was allocated as part of the net_dev
5806 } else {
5807 // tty device
5808 memref_deref(pmi);
5809 }
5810
5811 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5812 }
5813 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5814
5815 return 0;
5816}
5817
5818static int cp_lkm_pm_open(struct cp_lkm_common_ctx *ctx)
5819{
5820// struct cp_lkm_pm_ctx *pm_mgr;
5821
5822 DEBUG_INFO("%s(%p)", __FUNCTION__, ctx);
5823
5824// pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5825
5826 return 0;
5827}
5828
5829static int cp_lkm_pm_close(struct cp_lkm_common_ctx *ctx)
5830{
5831 //struct cp_lkm_pm_ctx *pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5832 //struct cp_lkm_pm_common *pm_tmp = NULL;
5833 //struct list_head *entry, *tmp;
5834 //unsigned long flags;
5835
5836 LOG("%s() called unexpectedly.", __FUNCTION__);
5837
5838 //NOTE: catkin 10/11/2019 - Close is only called in our system if the modem stack crashes. This means
5839 // things are in a bad state and the router will be rebooting. We decided not
5840 // to clean things up here because close code on usb side got into an infinite loop
5841 // and prevented the router from rebooting. Revisit if close ever becomes a normal event.
5842
5843 /*
5844 spin_lock_irqsave(&pm_mgr->pm_list_lock, flags);
5845
5846 list_for_each_safe(entry, tmp, &pm_mgr->pm_list) {
5847 pm_tmp = list_entry(entry, struct cp_lkm_pm_common, list);
5848 spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
5849
5850 // call detach to clean up network interface
5851 if (CP_LKM_PM_TYPE_PPP_CLIENT == pm_tmp->type || CP_LKM_PM_TYPE_PPP_SERVER == pm_tmp->type) {
5852 cp_lkm_pm_ppp_detach(pm_mgr, pm_tmp->unique_id);
5853 } else {
5854 cp_lkm_pm_net_detach(pm_mgr, pm_tmp->unique_id);
5855 }
5856 }
5857
5858 spin_unlock_irqrestore(&pm_mgr->pm_list_lock, flags);
5859
5860 cp_lkm_cleanup_msg_list(ctx);
5861 */
5862 return 0;
5863}
5864
5865static int cp_lkm_pm_handle_msg(struct cp_lkm_common_ctx *ctx, struct cp_lkm_msg_hdr *hdr, struct sk_buff *skb)
5866{
5867 struct cp_lkm_pm_ctx *pm_mgr;
5868
5869 //printk("%s(%p)\n", __FUNCTION__, ctx);
5870
5871 pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5872
5873
5874 // how to write back response with common function?
5875 if (skb) {
5876 kfree(skb);
5877 }
5878
5879 return 0;
5880}
5881
5882static int cp_lkm_pm_add_filter(struct cp_lkm_pm_ctx *mgr, int uid, struct cp_lkm_pm_filter *filter)
5883{
5884 // find the object in the list
5885 struct list_head *pos;
5886 struct cp_lkm_pm_common *pm = NULL;
5887 unsigned long flags;
5888 struct cp_lkm_pm_filter *new_filter;
5889
5890 DEBUG_TRACE("%s(%p)", __FUNCTION__, mgr);
5891
5892 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
5893 list_for_each(pos, &mgr->pm_list){
5894 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
5895 if(pm_tmp->unique_id == uid) {
5896 pm = pm_tmp;
5897 break;
5898 }
5899 }
5900
5901 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
5902
5903 if (!pm) {
5904 DEBUG_WARN("%s() pm not attached", __FUNCTION__);
5905 return -ENODEV;
5906 }
5907
5908 new_filter = kmalloc(sizeof(struct cp_lkm_pm_filter), GFP_ATOMIC);
5909 if (!new_filter) {
5910 DEBUG_WARN("%s() - failed to alloc filter\n", __FUNCTION__);
5911 return -1;
5912 }
5913
5914 memcpy(new_filter, filter, sizeof(struct cp_lkm_pm_filter));
5915 INIT_LIST_HEAD(&new_filter->list);
5916
5917 list_add_tail(&new_filter->list, &pm->filter_list);
5918
5919 return 0;
5920}
5921
5922static int cp_lkm_pm_handle_ioctl(struct cp_lkm_common_ctx *ctx, int cmd, void *k_argp)
5923{
5924 struct cp_lkm_pm_ctx *pm_mgr;
5925 int result = 0;
5926 struct cp_lkm_pm_attach_ioctl *attach_params;
5927 struct cp_lkm_pm_detach_ioctl *detach_params;
5928 struct cp_lkm_pm_activate_deactivate_ioctl *activate_params;
5929 struct cp_lkm_pm_add_filter_ioctl *filter_params;
5930
5931 char name[CP_LKM_MAX_IF_NAME];
5932 unsigned long not_copied;
5933
5934 //printk("%s(%p) cmd:%d\n", __FUNCTION__, ctx, _IOC_NR(cmd));
5935
5936 pm_mgr = (struct cp_lkm_pm_ctx *)ctx;
5937
5938 switch (cmd) {
5939 case CP_LKM_IOCTL_PM_ATTACH:
5940 attach_params = (struct cp_lkm_pm_attach_ioctl *)k_argp;
5941 not_copied = copy_from_user(name, attach_params->name, CP_LKM_MAX_IF_NAME);
5942 if (not_copied) {
5943 return -ENOMEM;
5944 }
5945 DEBUG_INFO("%s(%s) attach", __FUNCTION__, name);
5946 switch(attach_params->type) {
5947 case CP_LKM_PM_TYPE_PPP_CLIENT:
5948 case CP_LKM_PM_TYPE_PPP_SERVER:
5949 result = cp_lkm_pm_ppp_attach(pm_mgr, attach_params->type, attach_params->uid, name);
5950 if (!result) {
5951 not_copied = copy_to_user(attach_params->name, name, CP_LKM_MAX_IF_NAME);
5952 if (not_copied) {
5953 return -ENOMEM;
5954 }
5955 }
5956 break;
5957 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5958 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5959 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5960 case CP_LKM_PM_TYPE_IP_STATIC:
5961 case CP_LKM_PM_TYPE_IP_DHCP:
5962 result = cp_lkm_pm_net_attach(pm_mgr, attach_params->type, attach_params->uid, name, attach_params->mac);
5963 break;
5964 default:
5965 result = -ENOTSUPP;
5966 break;
5967 }
5968 break;
5969 case CP_LKM_IOCTL_PM_DETACH:
5970 detach_params = (struct cp_lkm_pm_detach_ioctl *)k_argp;
5971 DEBUG_INFO("%s() detach uid:%d", __FUNCTION__, detach_params->uid);
5972 switch(detach_params->type) {
5973 case CP_LKM_PM_TYPE_PPP_CLIENT:
5974 case CP_LKM_PM_TYPE_PPP_SERVER:
5975 result = cp_lkm_pm_ppp_detach(pm_mgr, detach_params->uid);
5976 break;
5977 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5978 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5979 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5980 case CP_LKM_PM_TYPE_IP_STATIC:
5981 case CP_LKM_PM_TYPE_IP_DHCP:
5982 result = cp_lkm_pm_net_detach(pm_mgr, detach_params->uid);
5983 break;
5984 default:
5985 result = -ENOTSUPP;
5986 break;
5987 }
5988 break;
5989 case CP_LKM_IOCTL_PM_ACTIVATE:
5990 activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
5991 switch(activate_params->type) {
5992 case CP_LKM_PM_TYPE_PPP_CLIENT:
5993 case CP_LKM_PM_TYPE_PPP_SERVER:
5994 result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, true);
5995 break;
5996 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
5997 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
5998 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
5999 case CP_LKM_PM_TYPE_IP_STATIC:
6000 case CP_LKM_PM_TYPE_IP_DHCP:
6001 result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, true);
6002 break;
6003 default:
6004 result = -ENOTSUPP;
6005 break;
6006 }
6007 break;
6008 case CP_LKM_IOCTL_PM_DEACTIVATE:
6009 activate_params = (struct cp_lkm_pm_activate_deactivate_ioctl *)k_argp;
6010 switch(activate_params->type) {
6011 case CP_LKM_PM_TYPE_PPP_CLIENT:
6012 case CP_LKM_PM_TYPE_PPP_SERVER:
6013 result = cp_lkm_pm_ppp_activate(pm_mgr, activate_params->uid, false);
6014 break;
6015 case CP_LKM_PM_TYPE_ETHERNET_DHCP:
6016 case CP_LKM_PM_TYPE_ETHERNET_STATIC:
6017 case CP_LKM_PM_TYPE_ETHERNET_STATIC_NOARP:
6018 case CP_LKM_PM_TYPE_IP_STATIC:
6019 case CP_LKM_PM_TYPE_IP_DHCP:
6020 result = cp_lkm_pm_net_activate(pm_mgr, activate_params->uid, false);
6021 break;
6022 default:
6023 result = -ENOTSUPP;
6024 break;
6025 }
6026 break;
6027 case CP_LKM_IOCTL_PM_ADD_FILTER:
6028 filter_params = (struct cp_lkm_pm_add_filter_ioctl *)k_argp;
6029 result = cp_lkm_pm_add_filter(pm_mgr, filter_params->uid, &filter_params->filter);
6030 break;
6031 default:
6032 break;
6033 }
6034
6035 return result;
6036}
6037
6038static bool cp_lkm_pm_usb_do_link_lock(void* ctx1, void* ctx2)
6039{
6040 struct cp_lkm_pm_common *pm = (struct cp_lkm_pm_common*)ctx1;
6041 bool done = false;
6042 unsigned long flags;
6043 // grab the lock and set the link_count. The link_count is used to keep send and poll from
6044 // being called over to the USB layer while we are mucking with the send and poll pointers
6045 spin_lock_irqsave(&pm->pm_link_lock, flags);
6046 if(pm->pm_link_count <= 0) {
6047 pm->pm_link_count = -1;
6048 done = true;
6049 }
6050 spin_unlock_irqrestore(&pm->pm_link_lock, flags);
6051
6052 return done;
6053}
6054
6055// This function changes the shared edi pointers.
6056// !!!It is the only function in the pm that is permitted to change edi function pointers!!!
6057// Other functions can change the ctxt pointers
6058static int cp_lkm_pm_usb_link(struct cp_lkm_edi *edi, int pm_unique_id, int link)
6059{
6060 struct list_head *pos;
6061 struct cp_lkm_pm_common *pm = NULL;
6062 unsigned long flags;
6063 struct cp_lkm_edi *tmp_edi;
6064
6065 spin_lock_irqsave(&cp_lkm_pm_mgr.pm_list_lock, flags);
6066 list_for_each(pos, &cp_lkm_pm_mgr.pm_list){
6067 struct cp_lkm_pm_common *pm_tmp = list_entry(pos, struct cp_lkm_pm_common, list);
6068 if(pm_tmp->unique_id == pm_unique_id) {
6069 pm = pm_tmp;
6070 break;
6071 }
6072 }
6073 spin_unlock_irqrestore(&cp_lkm_pm_mgr.pm_list_lock, flags);
6074
6075 if (!pm) {
6076 // couldn't find object
6077 //printk("%s() unable to find protocol manager with id:%d\n", __FUNCTION__, pm_unique_id);
6078 return -EINVAL;
6079 }
6080
6081 //printk("%s() pm_net: %p\n", __FUNCTION__, pm);
6082
6083 // grab the lock and set the link_count. The link_count is used to keep send and poll from
6084 // being called over to the USB layer while we are mucking with the send and poll pointers
6085 cp_lkm_do_or_die(pm, NULL, cp_lkm_pm_usb_do_link_lock, CP_LKM_TIMEOUT_MS, CP_LKM_ITER, "Failed to grab cp pm lock");
6086
6087 //printk("%s() pm: %p, attached: %d, pm_type: %d\n", __FUNCTION__, pm, pm->attached,pm->type);
6088 tmp_edi = pm->edi;
6089 pm->edi = NULL;
6090 if (link) {
6091 if (tmp_edi) {
6092 // already linked - unlink from previous edi
6093 // just a precaution, should never happen
6094 tmp_edi->pm_recv = NULL;
6095 tmp_edi->pm_recv_ctx = NULL;
6096 tmp_edi->pm_get_hdr_size = NULL;
6097
6098 //printk("pm_recv_ctx null\n");
6099 tmp_edi->pm_send_pause = NULL;
6100 tmp_edi->pm_send_resume = NULL;
6101
6102 tmp_edi->pm_stats64_ctx = NULL;
6103
6104 //pm->edi = NULL;
6105 }
6106
6107 tmp_edi = edi;
6108 tmp_edi->pm_recv_ctx = pm;
6109
6110 switch(pm->type) {
6111 case CP_LKM_PM_TYPE_PPP_CLIENT:
6112 case CP_LKM_PM_TYPE_PPP_SERVER:
6113 tmp_edi->pm_recv = cp_lkm_pm_ppp_recv;
6114 tmp_edi->pm_get_hdr_size = cp_lkm_pm_ppp_get_hdr_size;
6115 tmp_edi->pm_stats64_ctx = NULL;
6116 break;
6117 default:
6118 tmp_edi->pm_recv = cp_lkm_pm_net_recv;
6119 tmp_edi->pm_get_hdr_size = cp_lkm_pm_net_get_hdr_size;
6120 tmp_edi->pm_send_pause = cp_lkm_pm_net_pause;
6121 tmp_edi->pm_send_resume = cp_lkm_pm_net_resume;
6122 tmp_edi->pm_stats64_ctx = pm;
6123 break;
6124 }
6125
6126 pm->edi = tmp_edi;
6127
6128 // release the link_count on link so things can start flowing.
6129 // don't release it on unlink since we don't want things to flow when unlinked
6130 spin_lock_irqsave(&pm->pm_link_lock, flags);
6131 pm->pm_link_count = 0;
6132 spin_unlock_irqrestore(&pm->pm_link_lock, flags);
6133
6134 } else {
6135 if (tmp_edi) {
6136 tmp_edi->pm_recv = NULL;
6137 tmp_edi->pm_recv_ctx = NULL;
6138 tmp_edi->pm_get_hdr_size = NULL;
6139
6140 //printk("pm_recv_ctx null\n");
6141 tmp_edi->pm_send_pause = NULL;
6142 tmp_edi->pm_send_resume = NULL;
6143 tmp_edi->pm_stats64_ctx = NULL;
6144
6145 //pm->edi = NULL;
6146 }
6147 }
6148
6149 return 0;
6150
6151}
6152
6153/******************** common user/kernel communication functions **************/
6154
6155static void cp_lkm_common_ctx_init(struct cp_lkm_common_ctx *common)
6156{
6157 DEBUG_WARN("%s()", __FUNCTION__);
6158
6159 INIT_LIST_HEAD(&common->read_list);
6160 spin_lock_init(&common->read_list_lock);
6161
6162 init_waitqueue_head(&common->inq);
6163 common->open_cnt = 0;
6164 common->reading_data = false;
6165 common->write_skb = NULL;
6166}
6167
6168static void cp_lkm_cleanup_msg_list(struct cp_lkm_common_ctx *common)
6169{
6170 struct cp_lkm_read_msg *msg;
6171 unsigned long flags;
6172 struct list_head *entry, *tmp;
6173
6174 spin_lock_irqsave(&common->read_list_lock, flags);
6175
6176 list_for_each_safe(entry, tmp, &common->read_list) {
6177 msg = list_entry(entry, struct cp_lkm_read_msg, list);
6178 list_del(&msg->list);
6179 dev_kfree_skb_any(msg->skb);
6180 kfree(msg);
6181 }
6182 spin_unlock_irqrestore(&common->read_list_lock, flags);
6183}
6184
6185// this may be called from soft interrupt context or normal kernel thread context
6186static int cp_lkm_post_message(struct cp_lkm_common_ctx *mgr, struct cp_lkm_msg_hdr* hdr, struct sk_buff *skb)
6187{
6188
6189 struct cp_lkm_read_msg *msg;
6190 unsigned long flags;
6191
6192 msg = kmalloc(sizeof(struct cp_lkm_read_msg), GFP_ATOMIC);
6193 if (!msg) {
6194 if (skb) {
6195 dev_kfree_skb_any(skb);
6196 }
6197 return -ENOMEM;
6198 }
6199
6200 msg->skb = skb;
6201 memcpy(&msg->hdr, hdr, sizeof(struct cp_lkm_msg_hdr));
6202
6203 spin_lock_irqsave(&mgr->read_list_lock, flags);
6204 list_add_tail(&msg->list, &mgr->read_list);
6205 spin_unlock_irqrestore(&mgr->read_list_lock, flags);
6206
6207 mgr->q_waiting = false;
6208
6209 // signal poll
6210 wake_up_interruptible(&mgr->inq);
6211
6212 return 0;
6213}
6214
6215int cp_lkm_open(struct inode *inode, struct file *filp)
6216{
6217
6218 int result = 0;
6219 struct cp_lkm_common_ctx *common;
6220
6221 DEBUG_TRACE("%s()", __FUNCTION__);
6222
6223 try_module_get(THIS_MODULE);
6224
6225 // set private data
6226 if (iminor(inode) == CP_LKM_USB_MGR_MINOR) {
6227 filp->private_data = &cp_lkm_usb_mgr;
6228 common = &cp_lkm_usb_mgr.common;
6229 DEBUG_INFO("%s() open usb manager", __FUNCTION__);
6230 } else if (iminor(inode) == CP_LKM_PM_MGR_MINOR) {
6231 filp->private_data = &cp_lkm_pm_mgr;
6232 common = &cp_lkm_pm_mgr.common;
6233 DEBUG_INFO("%s() open pm manager", __FUNCTION__);
6234 } else {
6235 return -ENOENT;
6236 }
6237
6238 if (common->open_cnt) {
6239 return -EBUSY;
6240 }
6241
6242 common->open_cnt++;
6243
6244 if (common->open) {
6245 result = common->open(common);
6246 }
6247
6248 return result;
6249}
6250
6251int cp_lkm_release(struct inode *inode, struct file *filp)
6252{
6253
6254 int result = 0;
6255 struct cp_lkm_common_ctx *common;
6256 common = (struct cp_lkm_common_ctx *)filp->private_data;
6257
6258 DEBUG_TRACE("%s() release", __FUNCTION__);
6259
6260 if (0 == common->open_cnt) {
6261 return 0;
6262 }
6263
6264 if (common->close) {
6265 result = common->close(common);
6266 }
6267
6268 module_put(THIS_MODULE);
6269
6270 common->open_cnt--;
6271
6272 return result;
6273}
6274
6275// first read is the header
6276// second read is the data. If no data, then no second read
6277// if error in either stage, negative value is returned and next read will be for header
6278// messages are not removed until successfully read header and data (if any)
6279ssize_t cp_lkm_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
6280{
6281
6282 struct cp_lkm_common_ctx *common;
6283 ssize_t result;
6284 struct cp_lkm_read_msg *msg;
6285 unsigned long flags;
6286 unsigned long not_copied;
6287
6288// DEBUG_INFO("%s() reading %d bytes", __FUNCTION__, count);
6289 common = (struct cp_lkm_common_ctx *)filp->private_data;
6290
6291 spin_lock_irqsave(&common->read_list_lock, flags);
6292 if (list_empty(&common->read_list)) {
6293 spin_unlock_irqrestore(&common->read_list_lock, flags);
6294 return -EAGAIN;
6295 }
6296 msg = list_first_entry(&common->read_list, struct cp_lkm_read_msg, list);
6297 spin_unlock_irqrestore(&common->read_list_lock, flags);
6298
6299 if (!common->reading_data) { // header mode
6300 // read header
6301 if (sizeof(struct cp_lkm_msg_hdr) != count) {
6302 return -EINVAL;
6303 }
6304
6305 not_copied = copy_to_user(buf, &msg->hdr, sizeof(struct cp_lkm_msg_hdr));
6306 if (not_copied) {
6307 return -ENOMEM;
6308 }
6309
6310 if (!msg->hdr.len) {
6311 result = count;
6312 goto read_free;
6313 }
6314
6315 // switch to data mode
6316 common->reading_data = !common->reading_data;
6317 return count;
6318 }
6319
6320 // switch to header mode
6321 common->reading_data = !common->reading_data;
6322
6323 // data mode - handle the data transfer
6324 if (msg->hdr.len != count) {
6325 return -EINVAL;
6326 }
6327
6328 not_copied = copy_to_user(buf, msg->skb->data, msg->hdr.len);
6329
6330 if (not_copied) {
6331 return -ENOMEM;
6332 }
6333
6334 result = count;
6335
6336read_free:
6337 spin_lock_irqsave(&common->read_list_lock, flags);
6338 list_del(&msg->list);
6339 spin_unlock_irqrestore(&common->read_list_lock, flags);
6340
6341 if (msg->skb) {
6342 dev_kfree_skb_any(msg->skb);
6343 }
6344 kfree(msg);
6345
6346 return result;
6347}
6348// the user must write the header first
6349// then the user must write the data equivalent to the hdr.len
6350// on error, a negative value is returned and the entire message is lost
6351// on error, the next write must be header
6352ssize_t cp_lkm_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
6353{
6354 struct cp_lkm_common_ctx *common;
6355 unsigned long not_copied;
6356 int result;
6357 struct sk_buff *skb = NULL;
6358 struct cp_lkm_msg_hdr hdr;
6359 struct cp_lkm_msg_hdr *hdrp;
6360
6361// DEBUG_INFO("%s() writing %d bytes", __FUNCTION__, count);
6362
6363 common = (struct cp_lkm_common_ctx *)filp->private_data;
6364
6365 if (!common->write_skb) {
6366 // handle the header
6367 if (count != sizeof(struct cp_lkm_msg_hdr)) {
6368 return -EINVAL;
6369 }
6370 not_copied = copy_from_user(&hdr, buf, count);
6371 if (not_copied) {
6372 return -ENOMEM;
6373 }
6374
6375 if ((skb = alloc_skb (count + hdr.len, GFP_KERNEL)) == NULL) {
6376 return -ENOMEM;
6377 }
6378
6379 memcpy(skb->data, &hdr, count);
6380
6381 // setup skb pointers - skb->data points to message data with header immediately before skb->data
6382 skb->len = hdr.len;
6383 skb->data += sizeof(struct cp_lkm_msg_hdr);
6384 skb_set_tail_pointer(skb, hdr.len);
6385
6386 if (!hdr.len) {
6387 goto send_msg;
6388 }
6389
6390 // save until we get the data
6391 common->write_skb = skb;
6392
6393 return count;
6394 }
6395
6396 // handle the data
6397 skb = common->write_skb;
6398 common->write_skb = NULL;
6399
6400 hdrp = (struct cp_lkm_msg_hdr *)(skb->data) - 1;
6401 if (count != hdrp->len) {
6402 dev_kfree_skb_any(skb);
6403 return -EINVAL;
6404 }
6405
6406 not_copied = copy_from_user(skb->data, buf, count);
6407 if (not_copied) {
6408 dev_kfree_skb_any(skb);
6409 return -ENOMEM;
6410 }
6411
6412
6413send_msg:
6414 if (common->handle_msg) {
6415 result = common->handle_msg(common, (struct cp_lkm_msg_hdr *)(skb->data) - 1, skb);
6416 if (result) {
6417 return result;
6418 }
6419 }
6420
6421 return count;
6422}
6423
6424unsigned int cp_lkm_poll(struct file *filp, struct poll_table_struct *wait)
6425{
6426 unsigned long flags;
6427 unsigned int mask = 0;
6428 struct cp_lkm_common_ctx *common;
6429
6430 common = (struct cp_lkm_common_ctx *)filp->private_data;
6431
6432 poll_wait(filp, &common->inq, wait);
6433
6434 spin_lock_irqsave(&common->read_list_lock, flags);
6435
6436 if (!list_empty(&common->read_list)) {
6437 mask = POLLIN | POLLRDNORM; // readable
6438 }
6439
6440 spin_unlock_irqrestore(&common->read_list_lock, flags);
6441
6442 return mask;
6443}
6444
6445#ifdef KERNEL_2_6_21
6446int cp_lkm_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
6447#else
6448long cp_lkm_ioctl (struct file *filp, unsigned int cmd, unsigned long arg)
6449#endif
6450{
6451 int result = -EINVAL;
6452
6453 void __user *uargp = (void __user *)arg;
6454 void *kargp = NULL;
6455 struct cp_lkm_common_ctx *common = (struct cp_lkm_common_ctx *)filp->private_data;
6456
6457 DEBUG_TRACE("%s(%p) - cmd:%d", __FUNCTION__, filp, _IOC_NR(cmd));
6458
6459 switch(cmd) {
6460 case CP_LKM_IOCTL_SET_LOG_LEVEL:
6461 cp_lkm_log_level = (uintptr_t)uargp;
6462 LOG("Setting debug log level:%d", cp_lkm_log_level);
6463 cp_lkm_wrapper_set_log_level(cp_lkm_log_level);
6464 return 0;
6465 default:
6466 if (_IOC_SIZE(cmd)) {
6467 kargp = kmalloc(_IOC_SIZE(cmd), GFP_ATOMIC);
6468 if (!kargp) {
6469 result = -ENOMEM;
6470 goto done;
6471 }
6472 if (copy_from_user(kargp, uargp, _IOC_SIZE(cmd))) {
6473 result = -EFAULT;
6474 goto done;
6475 }
6476 }
6477 }
6478
6479 if (common->handle_ioctl) {
6480 result = common->handle_ioctl(common, cmd, kargp);
6481 }
6482
6483
6484 if (_IOC_DIR(cmd) & _IOC_READ) {
6485 if (copy_to_user(uargp, kargp, _IOC_SIZE(cmd))) {
6486 result = -EFAULT;
6487 goto done;
6488 }
6489 }
6490
6491done:
6492 if (kargp) {
6493 kfree(kargp);
6494 }
6495
6496 return result;
6497}
6498
6499
6500static int __init cp_lkm_start(void)
6501{
6502 int err;
6503
6504 //printk("%s() Initializing module...\n", __FUNCTION__);
6505
6506 // initialize global structures
6507
6508 err = cp_lkm_pm_tty_init();
6509 if (err) {
6510 return err;
6511 }
6512
6513 cp_lkm_usb_init();
6514
6515 cp_lkm_pm_init();
6516
6517 // Allocating memory for the buffer
6518 if ((major = register_chrdev(0, "cp_lkm", &cp_lkm_fops)) < 0) {
6519 DEBUG_INFO("%s() failed dynamic registration", __FUNCTION__);
6520 cp_lkm_pm_tty_cleanup();
6521 return major;
6522 }
6523
6524 cp_lkm_class = class_create(THIS_MODULE, "cp_lkm");
6525 if (IS_ERR(cp_lkm_class)) {
6526 DEBUG_INFO("%s() failed class create", __FUNCTION__);
6527 unregister_chrdev(major, "cp_lkm");
6528 cp_lkm_pm_tty_cleanup();
6529 return -ENODEV;
6530 }
6531#ifdef KERNEL_2_6_21
6532 cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), "cp_lkm_usb");
6533#else
6534 cp_lkm_dev[0] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_USB_MGR_MINOR), NULL, "cp_lkm_usb");
6535#endif
6536 if (IS_ERR(cp_lkm_dev[0])){
6537 DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
6538 // clean up previous devices
6539 class_destroy(cp_lkm_class);
6540 unregister_chrdev(major, "cp_lkm");
6541 cp_lkm_pm_tty_cleanup();
6542 return -ENODEV;
6543 }
6544
6545#ifdef KERNEL_2_6_21
6546 cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), "cp_lkm_pm");
6547#else
6548 cp_lkm_dev[1] = device_create(cp_lkm_class, NULL, MKDEV(major, CP_LKM_PM_MGR_MINOR), NULL, "cp_lkm_pm");
6549#endif
6550 if (IS_ERR(cp_lkm_dev[1])){
6551 DEBUG_INFO("%s() failed device create: i", __FUNCTION__);
6552 // clean up previous devices
6553 device_destroy(cp_lkm_class, MKDEV(major, 0));
6554 class_destroy(cp_lkm_class);
6555 unregister_chrdev(major, "cp_lkm");
6556 cp_lkm_pm_tty_cleanup();
6557 return -ENODEV;
6558 }
6559
6560 LOG("cp_lkm: Inserting kernel module");
6561
6562 return 0;
6563}
6564
6565static void __exit cp_lkm_end(void)
6566{
6567 int i;
6568
6569 //TODO remove
6570 //del_timer_sync (&dbg_memleak_timer);
6571
6572
6573 cp_lkm_pm_cleanup();
6574 cp_lkm_usb_cleanup();
6575
6576 for (i = 0; i < 2; i++) {
6577 device_destroy(cp_lkm_class, MKDEV(major, i));
6578 }
6579 class_destroy(cp_lkm_class);
6580 unregister_chrdev(major, "cp_lkm");
6581
6582 cp_lkm_pm_tty_cleanup();
6583
6584 LOG("cp_lkm: Removing kernel module");
6585}
6586
6587module_init(cp_lkm_start);
6588module_exit(cp_lkm_end);
6589MODULE_LICENSE("GPL");
6590
6591