blob: d0e27539457475d38b2df07d1ec22bc7ea1618ca [file] [log] [blame]
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06002 * fast-classifier.c
3 * Shortcut forwarding engine connection manager.
4 * fast-classifier style
5 *
6 * XXX - fill in the appropriate GPL notice.
Matthew McClintock6f29aa12013-11-06 15:49:01 -06007 */
Matthew McClintock6f29aa12013-11-06 15:49:01 -06008#include <linux/module.h>
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06009#include <linux/sysfs.h>
10#include <linux/skbuff.h>
11#include <net/route.h>
12#include <linux/inetdevice.h>
13#include <linux/netfilter_bridge.h>
14#include <net/netfilter/nf_conntrack_acct.h>
15#include <net/netfilter/nf_conntrack_helper.h>
16#include <net/netfilter/nf_conntrack_zones.h>
17#include <net/netfilter/nf_conntrack_core.h>
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060018#include <net/genetlink.h>
Matthew McClintockea00adf2013-11-25 19:24:30 -060019#include <linux/list.h>
20#include <linux/spinlock.h>
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060021
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060022#include "../shortcut-fe/sfe.h"
23#include "../shortcut-fe/sfe_ipv4.h"
24#include "fast-classifier-priv.h"
25
26/*
27 * Per-module structure.
28 */
29struct fast_classifier {
30 spinlock_t lock; /* Lock for SMP correctness */
31
32 /*
33 * Control state.
34 */
35 struct kobject *sys_fast_classifier; /* sysfs linkage */
36
37 /*
38 * Callback notifiers.
39 */
40 struct notifier_block dev_notifier;
41 /* Device notifier */
42 struct notifier_block inet_notifier;
43 /* IP notifier */
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060044};
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060045
46struct fast_classifier __sc;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060047
48static struct nla_policy fast_classifier_genl_policy[FAST_CLASSIFIER_A_MAX + 1] = {
49 [FAST_CLASSIFIER_A_MSG] = { .type = NLA_NUL_STRING },
50};
51
52static struct genl_family fast_classifier_gnl_family = {
53 .id = GENL_ID_GENERATE,
54 .hdrsize = 0,
55 .name = "FAST_CLASSIFIER",
56 .version = 1,
57 .maxattr = FAST_CLASSIFIER_A_MAX,
58};
59
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060060
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060061#define FAST_CLASSIFIER_C_MAX (__FAST_CLASSIFIER_C_MAX - 1)
62
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060063static int fast_classifier_recv_genl_msg(struct sk_buff *skb, struct genl_info *info);
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060064
65static struct genl_ops fast_classifier_gnl_ops_recv = {
66 .cmd = FAST_CLASSIFIER_C_RECV,
67 .flags = 0,
68 .policy = fast_classifier_genl_policy,
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060069 .doit = fast_classifier_recv_genl_msg,
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060070 .dumpit = NULL,
71};
72
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060073/*
74 * Expose the hook for the receive processing.
75 */
76extern int (*athrs_fast_nat_recv)(struct sk_buff *skb);
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060077
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060078/*
79 * Expose what should be a static flag in the TCP connection tracker.
80 */
81extern int nf_ct_tcp_no_window_check;
82
83/*
84 * fast_classifier_recv()
85 * Handle packet receives.
86 *
87 * Returns 1 if the packet is forwarded or 0 if it isn't.
88 */
89int fast_classifier_recv(struct sk_buff *skb)
90{
91 struct net_device *dev;
92#if (SFE_HOOK_ABOVE_BRIDGE)
93 struct in_device *in_dev;
94#endif
95
96 /*
97 * We know that for the vast majority of packets we need the transport
98 * layer header so we may as well start to fetch it now!
99 */
100 prefetch(skb->data + 32);
101 barrier();
102
103 dev = skb->dev;
104
105#if (SFE_HOOK_ABOVE_BRIDGE)
106 /*
107 * Does our input device support IP processing?
108 */
109 in_dev = (struct in_device *)dev->ip_ptr;
110 if (unlikely(!in_dev)) {
111 DEBUG_TRACE("no IP processing for device: %s\n", dev->name);
112 return 0;
113 }
114
115 /*
116 * Does it have an IP address? If it doesn't then we can't do anything
117 * interesting here!
118 */
119 if (unlikely(!in_dev->ifa_list)) {
120 DEBUG_TRACE("no IP address for device: %s\n", dev->name);
121 return 0;
122 }
123#endif
124
125 /*
126 * We're only interested in IP packets.
127 */
128 if (likely(htons(ETH_P_IP) == skb->protocol)) {
129 return sfe_ipv4_recv(dev, skb);
130 }
131
132 DEBUG_TRACE("not IP packet\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -0600133 return 0;
134}
Matthew McClintock6f29aa12013-11-06 15:49:01 -0600135
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600136/*
137 * fast_classifier_find_mac_addr()
138 * Find the MAC address for a given IPv4 address.
139 *
140 * Returns true if we find the MAC address, otherwise false.
141 *
142 * We look up the rtable entry for the address and, from its neighbour
143 * structure, obtain the hardware address. This means this function also
144 * works if the neighbours are routers too.
145 */
146static bool fast_classifier_find_mac_addr(uint32_t addr, uint8_t *mac_addr)
Matthew McClintock6f29aa12013-11-06 15:49:01 -0600147{
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600148 struct neighbour *neigh;
149 struct rtable *rt;
150 struct dst_entry *dst;
151 struct net_device *dev;
152
153 /*
154 * Look up the rtable entry for the IP address then get the hardware
155 * address from its neighbour structure. This means this work when the
156 * neighbours are routers too.
157 */
158 rt = ip_route_output(&init_net, addr, 0, 0, 0);
159 if (unlikely(IS_ERR(rt))) {
160 return false;
161 }
162
163 dst = (struct dst_entry *)rt;
164
165 rcu_read_lock();
166 neigh = dst_get_neighbour_noref(dst);
167 if (unlikely(!neigh)) {
168 rcu_read_unlock();
169 dst_release(dst);
170 return false;
171 }
172
173 if (unlikely(!(neigh->nud_state & NUD_VALID))) {
174 rcu_read_unlock();
175 dst_release(dst);
176 return false;
177 }
178
179 dev = neigh->dev;
180 if (!dev) {
181 rcu_read_unlock();
182 dst_release(dst);
183 return false;
184 }
185
186 memcpy(mac_addr, neigh->ha, (size_t)dev->addr_len);
187 rcu_read_unlock();
188
189 dst_release(dst);
190
191 /*
192 * We're only interested in unicast MAC addresses - if it's not a unicast
193 * address then our IP address mustn't be unicast either.
194 */
195 if (is_multicast_ether_addr(mac_addr)) {
196 DEBUG_TRACE("MAC is non-unicast - ignoring\n");
197 return false;
198 }
199
200 return true;
201}
202
Matthew McClintockea00adf2013-11-25 19:24:30 -0600203static DEFINE_SPINLOCK(sfe_connections_lock);
204
205struct sfe_connection {
206 struct list_head list;
207 struct sfe_ipv4_create *sic;
208 struct nf_conn *ct;
Matthew McClintockc5739382013-12-02 14:17:46 -0600209 int hits;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600210};
211
212static LIST_HEAD(sfe_connections);
213
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600214/*
Matthew McClintockc5739382013-12-02 14:17:46 -0600215 * fast_classifier_update_protocol()
216 * Update sfe_ipv4_create struct with new protocol information before we offload
217 */
218static int fast_classifier_update_protocol(struct sfe_ipv4_create *p_sic, struct nf_conn *ct)
219{
220 switch (p_sic->protocol) {
221 case IPPROTO_TCP:
222 p_sic->src_td_window_scale = ct->proto.tcp.seen[0].td_scale;
223 p_sic->src_td_max_window = ct->proto.tcp.seen[0].td_maxwin;
224 p_sic->src_td_end = ct->proto.tcp.seen[0].td_end;
225 p_sic->src_td_max_end = ct->proto.tcp.seen[0].td_maxend;
226 p_sic->dest_td_window_scale = ct->proto.tcp.seen[1].td_scale;
227 p_sic->dest_td_max_window = ct->proto.tcp.seen[1].td_maxwin;
228 p_sic->dest_td_end = ct->proto.tcp.seen[1].td_end;
229 p_sic->dest_td_max_end = ct->proto.tcp.seen[1].td_maxend;
230 if (nf_ct_tcp_no_window_check
231 || (ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_BE_LIBERAL)
232 || (ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_BE_LIBERAL)) {
233 p_sic->flags |= SFE_IPV4_CREATE_FLAG_NO_SEQ_CHECK;
234 }
235
236 /*
237 * If the connection is shutting down do not manage it.
238 * state can not be SYN_SENT, SYN_RECV because connection is assured
239 * Not managed states: FIN_WAIT, CLOSE_WAIT, LAST_ACK, TIME_WAIT, CLOSE.
240 */
241 spin_lock(&ct->lock);
242 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
243 spin_unlock(&ct->lock);
244 DEBUG_TRACE("connection in termination state: %#x, s: %pI4:%u, d: %pI4:%u\n",
245 ct->proto.tcp.state, &p_sic->src_ip, ntohs(p_sic->src_port),
246 &p_sic->dest_ip, ntohs(p_sic->dest_port));
247 return 0;
248 }
249 spin_unlock(&ct->lock);
250 break;
251
252 case IPPROTO_UDP:
253 break;
254
255 default:
256 DEBUG_TRACE("unhandled protocol %d\n", p_sic->protocol);
257 return 0;
258 }
259
260 return 1;
261}
262
263/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600264 * fast_classifier_recv_genl_msg()
265 * Called from user space to offload a connection
266 */
267static int fast_classifier_recv_genl_msg(struct sk_buff *skb, struct genl_info *info)
268{
269 struct nlattr *na;
270 struct fast_classifier_msg *fc_msg;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600271 struct sfe_ipv4_create *p_sic;
272 struct sfe_connection *conn;
273 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600274
275 na = info->attrs[FAST_CLASSIFIER_C_RECV];
276 fc_msg = nla_data(na);
Matthew McClintockea00adf2013-11-25 19:24:30 -0600277
278 DEBUG_TRACE("INFO: want to offload: %d, %d, %d, %d, %d\n", fc_msg->proto,
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600279 fc_msg->src_saddr,
280 fc_msg->dst_saddr,
281 fc_msg->sport, fc_msg->dport);
Matthew McClintockea00adf2013-11-25 19:24:30 -0600282 spin_lock_irqsave(&sfe_connections_lock, flags);
283 list_for_each_entry(conn, &sfe_connections, list) {
Matthew McClintockea00adf2013-11-25 19:24:30 -0600284 p_sic = conn->sic;
285
286 DEBUG_TRACE(" -> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
287 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
288 p_sic->src_port, p_sic->dest_port);
289
290 if (p_sic->protocol == fc_msg->proto &&
291 p_sic->src_port == fc_msg->sport &&
292 p_sic->dest_port == fc_msg->dport &&
293 p_sic->src_ip == fc_msg->src_saddr &&
294 p_sic->dest_ip == fc_msg->dst_saddr ) {
295 DEBUG_TRACE("FOUND, WILL OFFLOAD\n");
Matthew McClintockc5739382013-12-02 14:17:46 -0600296 if (fast_classifier_update_protocol(p_sic, conn->ct) == 0) {
297 spin_unlock_irqrestore(&sfe_connections_lock, flags);
298 DEBUG_TRACE("UNKNOWN PROTOCOL OR CONNECTION CLOSING, SKIPPING\n");
299 return 0;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600300 }
Matthew McClintockea00adf2013-11-25 19:24:30 -0600301 DEBUG_TRACE("INFO: calling sfe rule creation!\n");
302 spin_unlock_irqrestore(&sfe_connections_lock, flags);
303 sfe_ipv4_create_rule(p_sic);
304 return 0;
305 }
306 DEBUG_TRACE("SEARCH CONTINUES\n");
307 }
308
309 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600310 return 0;
311}
312
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600313/*
314 * fast_classifier_ipv4_post_routing_hook()
315 * Called for packets about to leave the box - either locally generated or forwarded from another interface
316 */
317static unsigned int fast_classifier_ipv4_post_routing_hook(unsigned int hooknum,
318 struct sk_buff *skb,
319 const struct net_device *in_unused,
320 const struct net_device *out,
321 int (*okfn)(struct sk_buff *))
322{
323 struct sfe_ipv4_create sic;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600324 struct sfe_ipv4_create *p_sic;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600325 struct net_device *in;
326 struct nf_conn *ct;
327 enum ip_conntrack_info ctinfo;
328 struct net_device *src_dev;
329 struct net_device *dest_dev;
330 struct net_device *src_br_dev = NULL;
331 struct net_device *dest_br_dev = NULL;
332 struct nf_conntrack_tuple orig_tuple;
333 struct nf_conntrack_tuple reply_tuple;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600334 struct sfe_connection *conn;
335 int sfe_connections_size = 0;
336 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600337
338 /*
339 * Don't process broadcast or multicast packets.
340 */
341 if (unlikely(skb->pkt_type == PACKET_BROADCAST)) {
342 DEBUG_TRACE("broadcast, ignoring\n");
343 return NF_ACCEPT;
344 }
345 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
346 DEBUG_TRACE("multicast, ignoring\n");
347 return NF_ACCEPT;
348 }
349
350 /*
351 * Don't process packets that are not being forwarded.
352 */
353 in = dev_get_by_index(&init_net, skb->skb_iif);
354 if (!in) {
355 DEBUG_TRACE("packet not forwarding\n");
356 return NF_ACCEPT;
357 }
358
359 /*
360 * Don't process packets with non-standard 802.3 MAC address sizes.
361 */
362 if (unlikely(in->addr_len != ETH_ALEN)) {
363 DEBUG_TRACE("in device: %s not 802.3 hw addr len: %u, ignoring\n",
364 in->name, (unsigned)in->addr_len);
365 goto done1;
366 }
367 if (unlikely(out->addr_len != ETH_ALEN)) {
368 DEBUG_TRACE("out device: %s not 802.3 hw addr len: %u, ignoring\n",
369 out->name, (unsigned)out->addr_len);
370 goto done1;
371 }
372
373 /*
374 * Don't process packets that aren't being tracked by conntrack.
375 */
376 ct = nf_ct_get(skb, &ctinfo);
377 if (unlikely(!ct)) {
378 DEBUG_TRACE("no conntrack connection, ignoring\n");
379 goto done1;
380 }
381
382 /*
383 * Don't process untracked connections.
384 */
385 if (unlikely(ct == &nf_conntrack_untracked)) {
386 DEBUG_TRACE("untracked connection\n");
387 goto done1;
388 }
389
390 /*
391 * Don't process connections that require support from a 'helper' (typically a NAT ALG).
392 */
393 if (unlikely(nfct_help(ct))) {
394 DEBUG_TRACE("connection has helper\n");
395 goto done1;
396 }
397
398 /*
399 * Look up the details of our connection in conntrack.
400 *
401 * Note that the data we get from conntrack is for the "ORIGINAL" direction
402 * but our packet may actually be in the "REPLY" direction.
403 */
404 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
405 reply_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
406 sic.protocol = (int32_t)orig_tuple.dst.protonum;
407
408 /*
409 * Get addressing information, non-NAT first
410 */
411 sic.src_ip = (__be32)orig_tuple.src.u3.ip;
412 sic.dest_ip = (__be32)orig_tuple.dst.u3.ip;
413
414 /*
415 * NAT'ed addresses - note these are as seen from the 'reply' direction
416 * When NAT does not apply to this connection these will be identical to the above.
417 */
418 sic.src_ip_xlate = (__be32)reply_tuple.dst.u3.ip;
419 sic.dest_ip_xlate = (__be32)reply_tuple.src.u3.ip;
420
421 sic.flags = 0;
422
423 switch (sic.protocol) {
424 case IPPROTO_TCP:
425 sic.src_port = orig_tuple.src.u.tcp.port;
426 sic.dest_port = orig_tuple.dst.u.tcp.port;
427 sic.src_port_xlate = reply_tuple.dst.u.tcp.port;
428 sic.dest_port_xlate = reply_tuple.src.u.tcp.port;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600429
430 /*
431 * Don't try to manage a non-established connection.
432 */
433 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
434 DEBUG_TRACE("non-established connection\n");
435 goto done1;
436 }
437
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600438 break;
439
440 case IPPROTO_UDP:
441 sic.src_port = orig_tuple.src.u.udp.port;
442 sic.dest_port = orig_tuple.dst.u.udp.port;
443 sic.src_port_xlate = reply_tuple.dst.u.udp.port;
444 sic.dest_port_xlate = reply_tuple.src.u.udp.port;
445 break;
446
447 default:
448 DEBUG_TRACE("unhandled protocol %d\n", sic.protocol);
449 goto done1;
450 }
451
452 /*
Matthew McClintockea00adf2013-11-25 19:24:30 -0600453 * If we already have this connection in our list, skip it
454 * XXX: this may need to be optimized
455 */
456 DEBUG_TRACE("POST_ROUTE: checking new connection: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
457 sic.protocol, sic.src_ip, sic.dest_ip,
458 sic.src_port, sic.dest_port);
459 spin_lock_irqsave(&sfe_connections_lock, flags);
460 list_for_each_entry(conn, &sfe_connections, list) {
461 p_sic = conn->sic;
462 DEBUG_TRACE("\t\t-> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
463 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
464 p_sic->src_port, p_sic->dest_port);
465
466 if (p_sic->protocol == sic.protocol &&
467 p_sic->src_port == sic.src_port &&
468 p_sic->dest_port == sic.dest_port &&
469 p_sic->src_ip == sic.src_ip &&
470 p_sic->dest_ip == sic.dest_ip ) {
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600471 if (skb->mark) {
472 DEBUG_TRACE("UPDATING MARK %x\n", skb->mark);
473 }
474 p_sic->mark = skb->mark;
Matthew McClintockc5739382013-12-02 14:17:46 -0600475
476 conn->hits++;
477 if (conn->hits == 128) {
478 DEBUG_TRACE("OFFLOADING CONNECTION, TOO MANY HITS\n");
479 if (fast_classifier_update_protocol(p_sic, conn->ct) == 0) {
480 spin_unlock_irqrestore(&sfe_connections_lock, flags);
481 DEBUG_TRACE("UNKNOWN PROTOCOL OR CONNECTION CLOSING, SKIPPING\n");
482 return 0;
483 }
484 DEBUG_TRACE("INFO: calling sfe rule creation!\n");
485 spin_unlock_irqrestore(&sfe_connections_lock, flags);
486 sfe_ipv4_create_rule(p_sic);
487 return 0;
488 } else if (conn->hits > 128) {
489 struct sfe_ipv4_mark mark;
490
491 DEBUG_TRACE("CONNECTION ALREADY OFFLOADED, UPDATING MARK\n");
492 mark.protocol = p_sic->protocol;
493 mark.src_ip = p_sic->src_ip;
494 mark.src_port = p_sic->src_port;
495 mark.dest_ip = p_sic->dest_ip;
496 mark.dest_port = p_sic->dest_port;
497 mark.mark = skb->mark;
498 sfe_ipv4_mark_rule(&mark);
499 }
500
501 DEBUG_TRACE("FOUND, SKIPPING\n");
502
Matthew McClintockea00adf2013-11-25 19:24:30 -0600503 spin_unlock_irqrestore(&sfe_connections_lock, flags);
504 goto done1;
505 } else {
506 DEBUG_TRACE("SEARCH CONTINUES");
507 }
508
509 sfe_connections_size++;
510 }
511 spin_unlock_irqrestore(&sfe_connections_lock, flags);
512
513 /*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600514 * Get the MAC addresses that correspond to source and destination host addresses.
515 */
516 if (!fast_classifier_find_mac_addr(sic.src_ip, sic.src_mac)) {
517 DEBUG_TRACE("failed to find MAC address for src IP: %pI4\n", &sic.src_ip);
518 goto done1;
519 }
520
521 if (!fast_classifier_find_mac_addr(sic.src_ip_xlate, sic.src_mac_xlate)) {
522 DEBUG_TRACE("failed to find MAC address for xlate src IP: %pI4\n", &sic.src_ip_xlate);
523 goto done1;
524 }
525
526 /*
527 * Do dest now
528 */
529 if (!fast_classifier_find_mac_addr(sic.dest_ip, sic.dest_mac)) {
530 DEBUG_TRACE("failed to find MAC address for dest IP: %pI4\n", &sic.dest_ip);
531 goto done1;
532 }
533
534 if (!fast_classifier_find_mac_addr(sic.dest_ip_xlate, sic.dest_mac_xlate)) {
535 DEBUG_TRACE("failed to find MAC address for xlate dest IP: %pI4\n", &sic.dest_ip_xlate);
536 goto done1;
537 }
538
539 /*
540 * Get our device info. If we're dealing with the "reply" direction here then
541 * we'll need things swapped around.
542 */
543 if (ctinfo < IP_CT_IS_REPLY) {
544 src_dev = in;
545 dest_dev = (struct net_device *)out;
546 } else {
547 src_dev = (struct net_device *)out;
548 dest_dev = in;
549 }
550
551#if (!SFE_HOOK_ABOVE_BRIDGE)
552 /*
553 * Now our devices may actually be a bridge interface. If that's
554 * the case then we need to hunt down the underlying interface.
555 */
556 if (src_dev->priv_flags & IFF_EBRIDGE) {
557 src_br_dev = br_port_dev_get(src_dev, sic.src_mac);
558 if (!src_br_dev) {
559 DEBUG_TRACE("no port found on bridge\n");
560 goto done1;
561 }
562
563 src_dev = src_br_dev;
564 }
565
566 if (dest_dev->priv_flags & IFF_EBRIDGE) {
567 dest_br_dev = br_port_dev_get(dest_dev, sic.dest_mac_xlate);
568 if (!dest_br_dev) {
569 DEBUG_TRACE("no port found on bridge\n");
570 goto done2;
571 }
572
573 dest_dev = dest_br_dev;
574 }
575#else
576 /*
577 * Our devices may actually be part of a bridge interface. If that's
578 * the case then find the bridge interface instead.
579 */
580 if (src_dev->priv_flags & IFF_BRIDGE_PORT) {
581 src_br_dev = src_dev->master;
582 if (!src_br_dev) {
583 DEBUG_TRACE("no bridge found for: %s\n", src_dev->name);
584 goto done1;
585 }
586
587 dev_hold(src_br_dev);
588 src_dev = src_br_dev;
589 }
590
591 if (dest_dev->priv_flags & IFF_BRIDGE_PORT) {
592 dest_br_dev = dest_dev->master;
593 if (!dest_br_dev) {
594 DEBUG_TRACE("no bridge found for: %s\n", dest_dev->name);
595 goto done2;
596 }
597
598 dev_hold(dest_br_dev);
599 dest_dev = dest_br_dev;
600 }
601#endif
602
603 sic.src_dev = src_dev;
604 sic.dest_dev = dest_dev;
605
606// XXX - these MTUs need handling correctly!
607 sic.src_mtu = 1500;
608 sic.dest_mtu = 1500;
609
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600610 if (skb->mark) {
611 DEBUG_TRACE("SKB MARK NON ZERO %x\n", skb->mark);
612 }
613 sic.mark = skb->mark;
614
Matthew McClintockea00adf2013-11-25 19:24:30 -0600615 conn = kmalloc(sizeof(struct sfe_connection), GFP_KERNEL);
616 if (conn == NULL) {
617 printk(KERN_CRIT "ERROR: no memory for sfe\n");
618 goto done3;
619 }
Matthew McClintockc5739382013-12-02 14:17:46 -0600620 conn->hits = 0;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600621
Matthew McClintockea00adf2013-11-25 19:24:30 -0600622 p_sic = kmalloc(sizeof(struct sfe_ipv4_create), GFP_KERNEL);
623 if (p_sic == NULL) {
624 printk(KERN_CRIT "ERROR: no memory for sfe\n");
625 kfree(conn);
626 goto done3;
627 }
628
629 memcpy(p_sic, &sic, sizeof(sic));
630 conn->sic = p_sic;
631 conn->ct = ct;
632 DEBUG_TRACE(" -> adding item to sfe_connections, new size: %d\n", ++sfe_connections_size);
633 DEBUG_TRACE("POST_ROUTE: new offloadable connection: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
634 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
635 p_sic->src_port, p_sic->dest_port);
636 spin_lock_irqsave(&sfe_connections_lock, flags);
637 list_add_tail(&(conn->list), &sfe_connections);
638 spin_unlock_irqrestore(&sfe_connections_lock, flags);
639done3:
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600640 /*
641 * If we had bridge ports then release them too.
642 */
643 if (dest_br_dev) {
644 dev_put(dest_br_dev);
645 }
646
647done2:
648 if (src_br_dev) {
649 dev_put(src_br_dev);
650 }
651
652done1:
653 /*
654 * Release the interface on which this skb arrived
655 */
656 dev_put(in);
657
658 return NF_ACCEPT;
659}
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600660
661#ifdef CONFIG_NF_CONNTRACK_EVENTS
662/*
663 * fast_classifier_conntrack_event()
664 * Callback event invoked when a conntrack connection's state changes.
665 */
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600666#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
667static int fast_classifier_conntrack_event(struct notifier_block *this,
668 unsigned int events, struct nf_ct_event *item)
669#else
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600670static int fast_classifier_conntrack_event(unsigned int events, struct nf_ct_event *item)
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600671#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600672{
673 struct sfe_ipv4_destroy sid;
674 struct nf_conn *ct = item->ct;
675 struct nf_conntrack_tuple orig_tuple;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600676 struct sfe_connection *conn;
677 struct sfe_ipv4_create *p_sic;
678 int sfe_found_match = 0;
679 int sfe_connections_size = 0;
680 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600681
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600682 if (events & IPCT_MARK) {
683 struct sfe_ipv4_mark mark;
684 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
685
686 mark.protocol = (int32_t)orig_tuple.dst.protonum;
687 mark.src_ip = (__be32)orig_tuple.src.u3.ip;
688 mark.dest_ip = (__be32)orig_tuple.dst.u3.ip;
689 switch (mark.protocol) {
690 case IPPROTO_TCP:
691 mark.src_port = orig_tuple.src.u.tcp.port;
692 mark.dest_port = orig_tuple.dst.u.tcp.port;
693 break;
694 case IPPROTO_UDP:
695 mark.src_port = orig_tuple.src.u.udp.port;
696 mark.dest_port = orig_tuple.dst.u.udp.port;
697 break;
698 default:
699 break;
700 }
701
702 sfe_ipv4_mark_rule(&mark);
703 }
704
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600705 /*
706 * If we don't have a conntrack entry then we're done.
707 */
708 if (unlikely(!ct)) {
709 DEBUG_WARN("no ct in conntrack event callback\n");
710 return NOTIFY_DONE;
711 }
712
713 /*
714 * If this is an untracked connection then we can't have any state either.
715 */
716 if (unlikely(ct == &nf_conntrack_untracked)) {
717 DEBUG_TRACE("ignoring untracked conn\n");
718 return NOTIFY_DONE;
719 }
720
721 /*
722 * Ignore anything other than IPv4 connections.
723 */
724 if (unlikely(nf_ct_l3num(ct) != AF_INET)) {
725 DEBUG_TRACE("ignoring non-IPv4 conn\n");
726 return NOTIFY_DONE;
727 }
728
729 /*
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600730 * We're only interested in destroy events at this point
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600731 */
732 if (unlikely(!(events & (1 << IPCT_DESTROY)))) {
733 DEBUG_TRACE("ignoring non-destroy event\n");
734 return NOTIFY_DONE;
735 }
736
737 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
738 sid.protocol = (int32_t)orig_tuple.dst.protonum;
739
740 /*
741 * Extract information from the conntrack connection. We're only interested
742 * in nominal connection information (i.e. we're ignoring any NAT information).
743 */
744 sid.src_ip = (__be32)orig_tuple.src.u3.ip;
745 sid.dest_ip = (__be32)orig_tuple.dst.u3.ip;
746
747 switch (sid.protocol) {
748 case IPPROTO_TCP:
749 sid.src_port = orig_tuple.src.u.tcp.port;
750 sid.dest_port = orig_tuple.dst.u.tcp.port;
751 break;
752
753 case IPPROTO_UDP:
754 sid.src_port = orig_tuple.src.u.udp.port;
755 sid.dest_port = orig_tuple.dst.u.udp.port;
756 break;
757
758 default:
759 DEBUG_TRACE("unhandled protocol: %d\n", sid.protocol);
760 return NOTIFY_DONE;
761 }
762
Matthew McClintockea00adf2013-11-25 19:24:30 -0600763 /*
764 * If we already have this connection in our list, skip it
765 * XXX: this may need to be optimized
766 */
767 DEBUG_TRACE("INFO: want to clean up: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
768 sid.protocol, sid.src_ip, sid.dest_ip,
769 sid.src_port, sid.dest_port);
770 spin_lock_irqsave(&sfe_connections_lock, flags);
771 list_for_each_entry(conn, &sfe_connections, list) {
772 p_sic = conn->sic;
773 DEBUG_TRACE(" -> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
774 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
775 p_sic->src_port, p_sic->dest_port);
776
777 if (p_sic->protocol == sid.protocol &&
778 p_sic->src_port == sid.src_port &&
779 p_sic->dest_port == sid.dest_port &&
780 p_sic->src_ip == sid.src_ip &&
781 p_sic->dest_ip == sid.dest_ip ) {
782 sfe_found_match = 1;
783 DEBUG_TRACE("FOUND, DELETING\n");
784 break;
785 } else {
786 DEBUG_TRACE("SEARCH CONTINUES\n");
787 }
788 sfe_connections_size++;
789 }
790
791 if (sfe_found_match) {
792 DEBUG_TRACE("INFO: connection over proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
793 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
794 p_sic->src_port, p_sic->dest_port);
795 kfree(conn->sic);
796 list_del(&(conn->list));
797 kfree(conn);
798 } else {
799 DEBUG_TRACE("NO MATCH FOUND IN %d ENTRIES!!\n", sfe_connections_size);
800 }
801 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600802
803 sfe_ipv4_destroy_rule(&sid);
804 return NOTIFY_DONE;
805}
806
807/*
808 * Netfilter conntrack event system to monitor connection tracking changes
809 */
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600810#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
811static struct notifier_block fast_classifier_conntrack_notifier = {
812 .notifier_call = fast_classifier_conntrack_event,
813};
814#else
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600815static struct nf_ct_event_notifier fast_classifier_conntrack_notifier = {
816 .fcn = fast_classifier_conntrack_event,
817};
818#endif
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600819#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600820
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600821/*
822 * Structure to establish a hook into the post routing netfilter point - this
823 * will pick up local outbound and packets going from one interface to another.
824 *
825 * Note: see include/linux/netfilter_ipv4.h for info related to priority levels.
826 * We want to examine packets after NAT translation and any ALG processing.
827 */
828static struct nf_hook_ops fast_classifier_ipv4_ops_post_routing[] __read_mostly = {
829 {
830 .hook = fast_classifier_ipv4_post_routing_hook,
831 .owner = THIS_MODULE,
832 .pf = PF_INET,
833 .hooknum = NF_INET_POST_ROUTING,
834 .priority = NF_IP_PRI_NAT_SRC + 1,
835 },
836};
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600837
838/*
839 * fast_classifier_sync_rule()
840 * Synchronize a connection's state.
841 */
842static void fast_classifier_sync_rule(struct sfe_ipv4_sync *sis)
843{
844 struct nf_conntrack_tuple_hash *h;
845 struct nf_conntrack_tuple tuple;
846 struct nf_conn *ct;
847 struct nf_conn_counter *acct;
848
849 /*
850 * Create a tuple so as to be able to look up a connection
851 */
852 memset(&tuple, 0, sizeof(tuple));
853 tuple.src.u3.ip = sis->src_ip;
854 tuple.src.u.all = (__be16)sis->src_port;
855 tuple.src.l3num = AF_INET;
856
857 tuple.dst.u3.ip = sis->dest_ip;
858 tuple.dst.dir = IP_CT_DIR_ORIGINAL;
859 tuple.dst.protonum = (uint8_t)sis->protocol;
860 tuple.dst.u.all = (__be16)sis->dest_port;
861
862 DEBUG_TRACE("update connection - p: %d, s: %pI4:%u, d: %pI4:%u\n",
863 (int)tuple.dst.protonum,
864 &tuple.src.u3.ip, (unsigned int)ntohs(tuple.src.u.all),
865 &tuple.dst.u3.ip, (unsigned int)ntohs(tuple.dst.u.all));
866
867 /*
868 * Look up conntrack connection
869 */
870 h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
871 if (unlikely(!h)) {
872 DEBUG_TRACE("no connection found\n");
873 return;
874 }
875
876 ct = nf_ct_tuplehash_to_ctrack(h);
877 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
878
879 /*
880 * Only update if this is not a fixed timeout
881 */
882 if (!test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
883 ct->timeout.expires += sis->delta_jiffies;
884 }
885
886 acct = nf_conn_acct_find(ct);
887 if (acct) {
888 spin_lock_bh(&ct->lock);
889 atomic64_add(sis->src_packet_count, &acct[IP_CT_DIR_ORIGINAL].packets);
890 atomic64_add(sis->src_byte_count, &acct[IP_CT_DIR_ORIGINAL].bytes);
891 atomic64_add(sis->dest_packet_count, &acct[IP_CT_DIR_REPLY].packets);
892 atomic64_add(sis->dest_byte_count, &acct[IP_CT_DIR_REPLY].bytes);
893 spin_unlock_bh(&ct->lock);
894 }
895
896 switch (sis->protocol) {
897 case IPPROTO_TCP:
898 spin_lock_bh(&ct->lock);
899 if (ct->proto.tcp.seen[0].td_maxwin < sis->src_td_max_window) {
900 ct->proto.tcp.seen[0].td_maxwin = sis->src_td_max_window;
901 }
902 if ((int32_t)(ct->proto.tcp.seen[0].td_end - sis->src_td_end) < 0) {
903 ct->proto.tcp.seen[0].td_end = sis->src_td_end;
904 }
905 if ((int32_t)(ct->proto.tcp.seen[0].td_maxend - sis->src_td_max_end) < 0) {
906 ct->proto.tcp.seen[0].td_maxend = sis->src_td_max_end;
907 }
908 if (ct->proto.tcp.seen[1].td_maxwin < sis->dest_td_max_window) {
909 ct->proto.tcp.seen[1].td_maxwin = sis->dest_td_max_window;
910 }
911 if ((int32_t)(ct->proto.tcp.seen[1].td_end - sis->dest_td_end) < 0) {
912 ct->proto.tcp.seen[1].td_end = sis->dest_td_end;
913 }
914 if ((int32_t)(ct->proto.tcp.seen[1].td_maxend - sis->dest_td_max_end) < 0) {
915 ct->proto.tcp.seen[1].td_maxend = sis->dest_td_max_end;
916 }
917 spin_unlock_bh(&ct->lock);
918 break;
919 }
920
921 /*
922 * Release connection
923 */
924 nf_ct_put(ct);
925}
926
927/*
928 * fast_classifier_device_event()
929 */
930static int fast_classifier_device_event(struct notifier_block *this, unsigned long event, void *ptr)
931{
932 struct net_device *dev = (struct net_device *)ptr;
933
934 switch (event) {
935 case NETDEV_DOWN:
936 if (dev) {
937 sfe_ipv4_destroy_all_rules_for_dev(dev);
938 }
939 break;
940 }
941
942 return NOTIFY_DONE;
943}
944
945/*
946 * fast_classifier_inet_event()
947 */
948static int fast_classifier_inet_event(struct notifier_block *this, unsigned long event, void *ptr)
949{
950 struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
951 return fast_classifier_device_event(this, event, dev);
952}
953
954/*
955 * fast_classifier_init()
956 */
957static int __init fast_classifier_init(void)
958{
959 struct fast_classifier *sc = &__sc;
960 int result = -1;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -0600961
962 printk(KERN_ALERT "fast-classifier: starting up\n");
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600963 DEBUG_INFO("SFE CM init\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -0600964
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600965 /*
966 * Create sys/fast_classifier
967 */
968 sc->sys_fast_classifier = kobject_create_and_add("fast_classifier", NULL);
969 if (!sc->sys_fast_classifier) {
970 DEBUG_ERROR("failed to register fast_classifier\n");
971 goto exit1;
972 }
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -0600973
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600974 sc->dev_notifier.notifier_call = fast_classifier_device_event;
975 sc->dev_notifier.priority = 1;
976 register_netdevice_notifier(&sc->dev_notifier);
977
978 sc->inet_notifier.notifier_call = fast_classifier_inet_event;
979 sc->inet_notifier.priority = 1;
980 register_inetaddr_notifier(&sc->inet_notifier);
981
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600982 /*
983 * Register our netfilter hooks.
984 */
985 result = nf_register_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
986 if (result < 0) {
987 DEBUG_ERROR("can't register nf post routing hook: %d\n", result);
988 goto exit6;
989 }
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600990
991#ifdef CONFIG_NF_CONNTRACK_EVENTS
992 /*
993 * Register a notifier hook to get fast notifications of expired connections.
994 */
995 result = nf_conntrack_register_notifier(&init_net, &fast_classifier_conntrack_notifier);
996 if (result < 0) {
997 DEBUG_ERROR("can't register nf notifier hook: %d\n", result);
998 goto exit7;
999 }
1000#endif
1001
1002 spin_lock_init(&sc->lock);
1003
1004 /*
1005 * Hook the receive path in the network stack.
1006 */
1007 BUG_ON(athrs_fast_nat_recv != NULL);
1008 RCU_INIT_POINTER(athrs_fast_nat_recv, fast_classifier_recv);
1009
1010 /*
1011 * Hook the shortcut sync callback.
1012 */
1013 sfe_ipv4_register_sync_rule_callback(fast_classifier_sync_rule);
1014
1015 result = genl_register_family(&fast_classifier_gnl_family);
1016 if (result!= 0)
1017 goto exit8;
1018
1019 result = genl_register_ops(&fast_classifier_gnl_family, &fast_classifier_gnl_ops_recv);
1020 if (result != 0)
1021 goto exit9;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001022
1023 printk(KERN_ALERT "fast-classifier: registered\n");
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001024
1025 return 0;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001026
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001027exit9:
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001028 genl_unregister_family(&fast_classifier_gnl_family);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001029exit8:
1030
1031#ifdef CONFIG_NF_CONNTRACK_EVENTS
1032exit7:
1033#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001034 nf_unregister_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001035
1036exit6:
1037 unregister_inetaddr_notifier(&sc->inet_notifier);
1038 unregister_netdevice_notifier(&sc->dev_notifier);
1039 kobject_put(sc->sys_fast_classifier);
1040
1041exit1:
1042 return result;
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001043}
1044
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001045/*
1046 * fast_classifier_exit()
1047 */
1048static void __exit fast_classifier_exit(void)
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001049{
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001050 struct fast_classifier *sc = &__sc;
1051 int result = -1;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001052
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001053 DEBUG_INFO("SFE CM exit\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001054 printk(KERN_ALERT "fast-classifier: shutting down\n");
1055
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001056 /*
1057 * Unregister our sync callback.
1058 */
1059 sfe_ipv4_register_sync_rule_callback(NULL);
1060
1061 /*
1062 * Unregister our receive callback.
1063 */
1064 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1065
1066 /*
1067 * Wait for all callbacks to complete.
1068 */
1069 rcu_barrier();
1070
1071 /*
1072 * Destroy all connections.
1073 */
1074 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1075
1076// XXX - this is where we need to unregister with any lower level offload services.
1077
1078#ifdef CONFIG_NF_CONNTRACK_EVENTS
1079 nf_conntrack_unregister_notifier(&init_net, &fast_classifier_conntrack_notifier);
1080
1081#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001082 nf_unregister_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001083
1084 unregister_inetaddr_notifier(&sc->inet_notifier);
1085 unregister_netdevice_notifier(&sc->dev_notifier);
1086
1087 kobject_put(sc->sys_fast_classifier);
1088
1089 result = genl_register_family(&fast_classifier_gnl_family);
1090 if (result != 0)
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001091 printk(KERN_CRIT "Unable to unreigster genl_family\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001092
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001093 result = genl_register_ops(&fast_classifier_gnl_family, &fast_classifier_gnl_ops_recv);
1094 if (result != 0)
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001095 printk(KERN_CRIT "Unable to unreigster genl_ops\n");
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001096}
1097
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001098module_init(fast_classifier_init)
1099module_exit(fast_classifier_exit)
1100
1101MODULE_AUTHOR("Qualcomm Atheros Inc.");
1102MODULE_DESCRIPTION("Shortcut Forwarding Engine - Connection Manager");
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001103MODULE_LICENSE("GPL");
1104