blob: 9f080031727bd06178b876cbf98150d66d54e6ce [file] [log] [blame]
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06002 * fast-classifier.c
3 * Shortcut forwarding engine connection manager.
4 * fast-classifier style
5 *
6 * XXX - fill in the appropriate GPL notice.
Matthew McClintock6f29aa12013-11-06 15:49:01 -06007 */
Matthew McClintock6f29aa12013-11-06 15:49:01 -06008#include <linux/module.h>
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06009#include <linux/sysfs.h>
10#include <linux/skbuff.h>
11#include <net/route.h>
12#include <linux/inetdevice.h>
13#include <linux/netfilter_bridge.h>
14#include <net/netfilter/nf_conntrack_acct.h>
15#include <net/netfilter/nf_conntrack_helper.h>
16#include <net/netfilter/nf_conntrack_zones.h>
17#include <net/netfilter/nf_conntrack_core.h>
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060018#include <net/genetlink.h>
Matthew McClintockea00adf2013-11-25 19:24:30 -060019#include <linux/list.h>
20#include <linux/spinlock.h>
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060021
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060022#include "../shortcut-fe/sfe.h"
23#include "../shortcut-fe/sfe_ipv4.h"
24#include "fast-classifier-priv.h"
25
26/*
27 * Per-module structure.
28 */
29struct fast_classifier {
30 spinlock_t lock; /* Lock for SMP correctness */
31
32 /*
33 * Control state.
34 */
35 struct kobject *sys_fast_classifier; /* sysfs linkage */
36
37 /*
38 * Callback notifiers.
39 */
40 struct notifier_block dev_notifier;
41 /* Device notifier */
42 struct notifier_block inet_notifier;
43 /* IP notifier */
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060044};
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060045
46struct fast_classifier __sc;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060047
48static struct nla_policy fast_classifier_genl_policy[FAST_CLASSIFIER_A_MAX + 1] = {
49 [FAST_CLASSIFIER_A_MSG] = { .type = NLA_NUL_STRING },
50};
51
52static struct genl_family fast_classifier_gnl_family = {
53 .id = GENL_ID_GENERATE,
54 .hdrsize = 0,
55 .name = "FAST_CLASSIFIER",
56 .version = 1,
57 .maxattr = FAST_CLASSIFIER_A_MAX,
58};
59
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060060
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060061#define FAST_CLASSIFIER_C_MAX (__FAST_CLASSIFIER_C_MAX - 1)
62
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060063static int fast_classifier_recv_genl_msg(struct sk_buff *skb, struct genl_info *info);
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060064
65static struct genl_ops fast_classifier_gnl_ops_recv = {
66 .cmd = FAST_CLASSIFIER_C_RECV,
67 .flags = 0,
68 .policy = fast_classifier_genl_policy,
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060069 .doit = fast_classifier_recv_genl_msg,
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060070 .dumpit = NULL,
71};
72
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060073/*
74 * Expose the hook for the receive processing.
75 */
76extern int (*athrs_fast_nat_recv)(struct sk_buff *skb);
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060077
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060078/*
79 * Expose what should be a static flag in the TCP connection tracker.
80 */
81extern int nf_ct_tcp_no_window_check;
82
83/*
84 * fast_classifier_recv()
85 * Handle packet receives.
86 *
87 * Returns 1 if the packet is forwarded or 0 if it isn't.
88 */
89int fast_classifier_recv(struct sk_buff *skb)
90{
91 struct net_device *dev;
92#if (SFE_HOOK_ABOVE_BRIDGE)
93 struct in_device *in_dev;
94#endif
95
96 /*
97 * We know that for the vast majority of packets we need the transport
98 * layer header so we may as well start to fetch it now!
99 */
100 prefetch(skb->data + 32);
101 barrier();
102
103 dev = skb->dev;
104
105#if (SFE_HOOK_ABOVE_BRIDGE)
106 /*
107 * Does our input device support IP processing?
108 */
109 in_dev = (struct in_device *)dev->ip_ptr;
110 if (unlikely(!in_dev)) {
111 DEBUG_TRACE("no IP processing for device: %s\n", dev->name);
112 return 0;
113 }
114
115 /*
116 * Does it have an IP address? If it doesn't then we can't do anything
117 * interesting here!
118 */
119 if (unlikely(!in_dev->ifa_list)) {
120 DEBUG_TRACE("no IP address for device: %s\n", dev->name);
121 return 0;
122 }
123#endif
124
125 /*
126 * We're only interested in IP packets.
127 */
128 if (likely(htons(ETH_P_IP) == skb->protocol)) {
129 return sfe_ipv4_recv(dev, skb);
130 }
131
132 DEBUG_TRACE("not IP packet\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -0600133 return 0;
134}
Matthew McClintock6f29aa12013-11-06 15:49:01 -0600135
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600136/*
137 * fast_classifier_find_mac_addr()
138 * Find the MAC address for a given IPv4 address.
139 *
140 * Returns true if we find the MAC address, otherwise false.
141 *
142 * We look up the rtable entry for the address and, from its neighbour
143 * structure, obtain the hardware address. This means this function also
144 * works if the neighbours are routers too.
145 */
146static bool fast_classifier_find_mac_addr(uint32_t addr, uint8_t *mac_addr)
Matthew McClintock6f29aa12013-11-06 15:49:01 -0600147{
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600148 struct neighbour *neigh;
149 struct rtable *rt;
150 struct dst_entry *dst;
151 struct net_device *dev;
152
153 /*
154 * Look up the rtable entry for the IP address then get the hardware
155 * address from its neighbour structure. This means this work when the
156 * neighbours are routers too.
157 */
158 rt = ip_route_output(&init_net, addr, 0, 0, 0);
159 if (unlikely(IS_ERR(rt))) {
160 return false;
161 }
162
163 dst = (struct dst_entry *)rt;
164
165 rcu_read_lock();
166 neigh = dst_get_neighbour_noref(dst);
167 if (unlikely(!neigh)) {
168 rcu_read_unlock();
169 dst_release(dst);
170 return false;
171 }
172
173 if (unlikely(!(neigh->nud_state & NUD_VALID))) {
174 rcu_read_unlock();
175 dst_release(dst);
176 return false;
177 }
178
179 dev = neigh->dev;
180 if (!dev) {
181 rcu_read_unlock();
182 dst_release(dst);
183 return false;
184 }
185
186 memcpy(mac_addr, neigh->ha, (size_t)dev->addr_len);
187 rcu_read_unlock();
188
189 dst_release(dst);
190
191 /*
192 * We're only interested in unicast MAC addresses - if it's not a unicast
193 * address then our IP address mustn't be unicast either.
194 */
195 if (is_multicast_ether_addr(mac_addr)) {
196 DEBUG_TRACE("MAC is non-unicast - ignoring\n");
197 return false;
198 }
199
200 return true;
201}
202
Matthew McClintockea00adf2013-11-25 19:24:30 -0600203static DEFINE_SPINLOCK(sfe_connections_lock);
204
205struct sfe_connection {
206 struct list_head list;
207 struct sfe_ipv4_create *sic;
208 struct nf_conn *ct;
Matthew McClintockc5739382013-12-02 14:17:46 -0600209 int hits;
Matthew McClintock55c86982013-12-02 14:24:24 -0600210 int offloaded;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600211};
212
213static LIST_HEAD(sfe_connections);
214
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600215/*
Matthew McClintockc5739382013-12-02 14:17:46 -0600216 * fast_classifier_update_protocol()
217 * Update sfe_ipv4_create struct with new protocol information before we offload
218 */
219static int fast_classifier_update_protocol(struct sfe_ipv4_create *p_sic, struct nf_conn *ct)
220{
221 switch (p_sic->protocol) {
222 case IPPROTO_TCP:
223 p_sic->src_td_window_scale = ct->proto.tcp.seen[0].td_scale;
224 p_sic->src_td_max_window = ct->proto.tcp.seen[0].td_maxwin;
225 p_sic->src_td_end = ct->proto.tcp.seen[0].td_end;
226 p_sic->src_td_max_end = ct->proto.tcp.seen[0].td_maxend;
227 p_sic->dest_td_window_scale = ct->proto.tcp.seen[1].td_scale;
228 p_sic->dest_td_max_window = ct->proto.tcp.seen[1].td_maxwin;
229 p_sic->dest_td_end = ct->proto.tcp.seen[1].td_end;
230 p_sic->dest_td_max_end = ct->proto.tcp.seen[1].td_maxend;
231 if (nf_ct_tcp_no_window_check
232 || (ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_BE_LIBERAL)
233 || (ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_BE_LIBERAL)) {
234 p_sic->flags |= SFE_IPV4_CREATE_FLAG_NO_SEQ_CHECK;
235 }
236
237 /*
238 * If the connection is shutting down do not manage it.
239 * state can not be SYN_SENT, SYN_RECV because connection is assured
240 * Not managed states: FIN_WAIT, CLOSE_WAIT, LAST_ACK, TIME_WAIT, CLOSE.
241 */
242 spin_lock(&ct->lock);
243 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
244 spin_unlock(&ct->lock);
245 DEBUG_TRACE("connection in termination state: %#x, s: %pI4:%u, d: %pI4:%u\n",
246 ct->proto.tcp.state, &p_sic->src_ip, ntohs(p_sic->src_port),
247 &p_sic->dest_ip, ntohs(p_sic->dest_port));
248 return 0;
249 }
250 spin_unlock(&ct->lock);
251 break;
252
253 case IPPROTO_UDP:
254 break;
255
256 default:
257 DEBUG_TRACE("unhandled protocol %d\n", p_sic->protocol);
258 return 0;
259 }
260
261 return 1;
262}
263
264/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600265 * fast_classifier_recv_genl_msg()
266 * Called from user space to offload a connection
267 */
268static int fast_classifier_recv_genl_msg(struct sk_buff *skb, struct genl_info *info)
269{
270 struct nlattr *na;
271 struct fast_classifier_msg *fc_msg;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600272 struct sfe_ipv4_create *p_sic;
273 struct sfe_connection *conn;
274 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600275
276 na = info->attrs[FAST_CLASSIFIER_C_RECV];
277 fc_msg = nla_data(na);
Matthew McClintockea00adf2013-11-25 19:24:30 -0600278
279 DEBUG_TRACE("INFO: want to offload: %d, %d, %d, %d, %d\n", fc_msg->proto,
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600280 fc_msg->src_saddr,
281 fc_msg->dst_saddr,
282 fc_msg->sport, fc_msg->dport);
Matthew McClintockea00adf2013-11-25 19:24:30 -0600283 spin_lock_irqsave(&sfe_connections_lock, flags);
284 list_for_each_entry(conn, &sfe_connections, list) {
Matthew McClintockea00adf2013-11-25 19:24:30 -0600285 p_sic = conn->sic;
286
287 DEBUG_TRACE(" -> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
288 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
289 p_sic->src_port, p_sic->dest_port);
290
291 if (p_sic->protocol == fc_msg->proto &&
292 p_sic->src_port == fc_msg->sport &&
293 p_sic->dest_port == fc_msg->dport &&
294 p_sic->src_ip == fc_msg->src_saddr &&
295 p_sic->dest_ip == fc_msg->dst_saddr ) {
Matthew McClintock55c86982013-12-02 14:24:24 -0600296 if (conn->offloaded == 0) {
297 DEBUG_TRACE("USERSPACE OFFLOAD REQUEST, MATCH FOUND, WILL OFFLOAD\n");
298 if (fast_classifier_update_protocol(p_sic, conn->ct) == 0) {
299 spin_unlock_irqrestore(&sfe_connections_lock, flags);
300 DEBUG_TRACE("UNKNOWN PROTOCOL OR CONNECTION CLOSING, SKIPPING\n");
301 return 0;
302 }
303 DEBUG_TRACE("INFO: calling sfe rule creation!\n");
304 conn->offloaded = 1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600305 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintock55c86982013-12-02 14:24:24 -0600306 sfe_ipv4_create_rule(p_sic);
Matthew McClintockc5739382013-12-02 14:17:46 -0600307 return 0;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600308 }
Matthew McClintock55c86982013-12-02 14:24:24 -0600309
310 DEBUG_TRACE("GOT REQUEST TO OFFLOAD ALREADY OFFLOADED CONN FROM USERSPACE\n");
Matthew McClintockea00adf2013-11-25 19:24:30 -0600311 }
312 DEBUG_TRACE("SEARCH CONTINUES\n");
313 }
314
315 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600316 return 0;
317}
318
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600319/* auto offload connection once we have this many packets*/
320static int offload_at_pkts = 128;
321
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600322/*
323 * fast_classifier_ipv4_post_routing_hook()
324 * Called for packets about to leave the box - either locally generated or forwarded from another interface
325 */
326static unsigned int fast_classifier_ipv4_post_routing_hook(unsigned int hooknum,
327 struct sk_buff *skb,
328 const struct net_device *in_unused,
329 const struct net_device *out,
330 int (*okfn)(struct sk_buff *))
331{
332 struct sfe_ipv4_create sic;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600333 struct sfe_ipv4_create *p_sic;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600334 struct net_device *in;
335 struct nf_conn *ct;
336 enum ip_conntrack_info ctinfo;
337 struct net_device *src_dev;
338 struct net_device *dest_dev;
339 struct net_device *src_br_dev = NULL;
340 struct net_device *dest_br_dev = NULL;
341 struct nf_conntrack_tuple orig_tuple;
342 struct nf_conntrack_tuple reply_tuple;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600343 struct sfe_connection *conn;
344 int sfe_connections_size = 0;
345 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600346
347 /*
348 * Don't process broadcast or multicast packets.
349 */
350 if (unlikely(skb->pkt_type == PACKET_BROADCAST)) {
351 DEBUG_TRACE("broadcast, ignoring\n");
352 return NF_ACCEPT;
353 }
354 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
355 DEBUG_TRACE("multicast, ignoring\n");
356 return NF_ACCEPT;
357 }
358
359 /*
360 * Don't process packets that are not being forwarded.
361 */
362 in = dev_get_by_index(&init_net, skb->skb_iif);
363 if (!in) {
364 DEBUG_TRACE("packet not forwarding\n");
365 return NF_ACCEPT;
366 }
367
368 /*
369 * Don't process packets with non-standard 802.3 MAC address sizes.
370 */
371 if (unlikely(in->addr_len != ETH_ALEN)) {
372 DEBUG_TRACE("in device: %s not 802.3 hw addr len: %u, ignoring\n",
373 in->name, (unsigned)in->addr_len);
374 goto done1;
375 }
376 if (unlikely(out->addr_len != ETH_ALEN)) {
377 DEBUG_TRACE("out device: %s not 802.3 hw addr len: %u, ignoring\n",
378 out->name, (unsigned)out->addr_len);
379 goto done1;
380 }
381
382 /*
383 * Don't process packets that aren't being tracked by conntrack.
384 */
385 ct = nf_ct_get(skb, &ctinfo);
386 if (unlikely(!ct)) {
387 DEBUG_TRACE("no conntrack connection, ignoring\n");
388 goto done1;
389 }
390
391 /*
392 * Don't process untracked connections.
393 */
394 if (unlikely(ct == &nf_conntrack_untracked)) {
395 DEBUG_TRACE("untracked connection\n");
396 goto done1;
397 }
398
399 /*
400 * Don't process connections that require support from a 'helper' (typically a NAT ALG).
401 */
402 if (unlikely(nfct_help(ct))) {
403 DEBUG_TRACE("connection has helper\n");
404 goto done1;
405 }
406
407 /*
408 * Look up the details of our connection in conntrack.
409 *
410 * Note that the data we get from conntrack is for the "ORIGINAL" direction
411 * but our packet may actually be in the "REPLY" direction.
412 */
413 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
414 reply_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
415 sic.protocol = (int32_t)orig_tuple.dst.protonum;
416
417 /*
418 * Get addressing information, non-NAT first
419 */
420 sic.src_ip = (__be32)orig_tuple.src.u3.ip;
421 sic.dest_ip = (__be32)orig_tuple.dst.u3.ip;
422
423 /*
424 * NAT'ed addresses - note these are as seen from the 'reply' direction
425 * When NAT does not apply to this connection these will be identical to the above.
426 */
427 sic.src_ip_xlate = (__be32)reply_tuple.dst.u3.ip;
428 sic.dest_ip_xlate = (__be32)reply_tuple.src.u3.ip;
429
430 sic.flags = 0;
431
432 switch (sic.protocol) {
433 case IPPROTO_TCP:
434 sic.src_port = orig_tuple.src.u.tcp.port;
435 sic.dest_port = orig_tuple.dst.u.tcp.port;
436 sic.src_port_xlate = reply_tuple.dst.u.tcp.port;
437 sic.dest_port_xlate = reply_tuple.src.u.tcp.port;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600438
439 /*
440 * Don't try to manage a non-established connection.
441 */
442 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
443 DEBUG_TRACE("non-established connection\n");
444 goto done1;
445 }
446
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600447 break;
448
449 case IPPROTO_UDP:
450 sic.src_port = orig_tuple.src.u.udp.port;
451 sic.dest_port = orig_tuple.dst.u.udp.port;
452 sic.src_port_xlate = reply_tuple.dst.u.udp.port;
453 sic.dest_port_xlate = reply_tuple.src.u.udp.port;
454 break;
455
456 default:
457 DEBUG_TRACE("unhandled protocol %d\n", sic.protocol);
458 goto done1;
459 }
460
461 /*
Matthew McClintockea00adf2013-11-25 19:24:30 -0600462 * If we already have this connection in our list, skip it
463 * XXX: this may need to be optimized
464 */
465 DEBUG_TRACE("POST_ROUTE: checking new connection: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
466 sic.protocol, sic.src_ip, sic.dest_ip,
467 sic.src_port, sic.dest_port);
468 spin_lock_irqsave(&sfe_connections_lock, flags);
469 list_for_each_entry(conn, &sfe_connections, list) {
470 p_sic = conn->sic;
471 DEBUG_TRACE("\t\t-> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
472 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
473 p_sic->src_port, p_sic->dest_port);
474
475 if (p_sic->protocol == sic.protocol &&
476 p_sic->src_port == sic.src_port &&
477 p_sic->dest_port == sic.dest_port &&
478 p_sic->src_ip == sic.src_ip &&
479 p_sic->dest_ip == sic.dest_ip ) {
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600480 if (skb->mark) {
481 DEBUG_TRACE("UPDATING MARK %x\n", skb->mark);
482 }
483 p_sic->mark = skb->mark;
Matthew McClintockc5739382013-12-02 14:17:46 -0600484
485 conn->hits++;
Matthew McClintock55c86982013-12-02 14:24:24 -0600486 if (conn->offloaded == 0) {
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600487 if (conn->hits == offload_at_pkts) {
Matthew McClintock55c86982013-12-02 14:24:24 -0600488 DEBUG_TRACE("OFFLOADING CONNECTION, TOO MANY HITS\n");
489 if (fast_classifier_update_protocol(p_sic, conn->ct) == 0) {
490 spin_unlock_irqrestore(&sfe_connections_lock, flags);
491 DEBUG_TRACE("UNKNOWN PROTOCOL OR CONNECTION CLOSING, SKIPPING\n");
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600492 sfe_ipv4_create_rule(p_sic);
Matthew McClintock55c86982013-12-02 14:24:24 -0600493 return 0;
494 }
495 DEBUG_TRACE("INFO: calling sfe rule creation!\n");
496 conn->offloaded = 1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600497 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintock55c86982013-12-02 14:24:24 -0600498 sfe_ipv4_create_rule(p_sic);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600499 goto done1;
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600500 } else if (conn->hits > offload_at_pkts) {
501 DEBUG_ERROR("ERROR: MORE THAN %d HITS AND NOT OFFLOADED\n", offload_at_pkts);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600502 spin_unlock_irqrestore(&sfe_connections_lock, flags);
503 goto done1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600504 }
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600505 }
506
507 if (conn->offloaded == 1) {
Matthew McClintockc5739382013-12-02 14:17:46 -0600508 struct sfe_ipv4_mark mark;
509
510 DEBUG_TRACE("CONNECTION ALREADY OFFLOADED, UPDATING MARK\n");
511 mark.protocol = p_sic->protocol;
512 mark.src_ip = p_sic->src_ip;
513 mark.src_port = p_sic->src_port;
514 mark.dest_ip = p_sic->dest_ip;
515 mark.dest_port = p_sic->dest_port;
516 mark.mark = skb->mark;
517 sfe_ipv4_mark_rule(&mark);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600518 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintock55c86982013-12-02 14:24:24 -0600519 sfe_ipv4_create_rule(p_sic);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600520 goto done1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600521 }
522
523 DEBUG_TRACE("FOUND, SKIPPING\n");
Matthew McClintockea00adf2013-11-25 19:24:30 -0600524 spin_unlock_irqrestore(&sfe_connections_lock, flags);
525 goto done1;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600526 }
527
Matthew McClintock55c86982013-12-02 14:24:24 -0600528 DEBUG_TRACE("SEARCH CONTINUES");
Matthew McClintockea00adf2013-11-25 19:24:30 -0600529 sfe_connections_size++;
530 }
531 spin_unlock_irqrestore(&sfe_connections_lock, flags);
532
533 /*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600534 * Get the MAC addresses that correspond to source and destination host addresses.
535 */
536 if (!fast_classifier_find_mac_addr(sic.src_ip, sic.src_mac)) {
537 DEBUG_TRACE("failed to find MAC address for src IP: %pI4\n", &sic.src_ip);
538 goto done1;
539 }
540
541 if (!fast_classifier_find_mac_addr(sic.src_ip_xlate, sic.src_mac_xlate)) {
542 DEBUG_TRACE("failed to find MAC address for xlate src IP: %pI4\n", &sic.src_ip_xlate);
543 goto done1;
544 }
545
546 /*
547 * Do dest now
548 */
549 if (!fast_classifier_find_mac_addr(sic.dest_ip, sic.dest_mac)) {
550 DEBUG_TRACE("failed to find MAC address for dest IP: %pI4\n", &sic.dest_ip);
551 goto done1;
552 }
553
554 if (!fast_classifier_find_mac_addr(sic.dest_ip_xlate, sic.dest_mac_xlate)) {
555 DEBUG_TRACE("failed to find MAC address for xlate dest IP: %pI4\n", &sic.dest_ip_xlate);
556 goto done1;
557 }
558
559 /*
560 * Get our device info. If we're dealing with the "reply" direction here then
561 * we'll need things swapped around.
562 */
563 if (ctinfo < IP_CT_IS_REPLY) {
564 src_dev = in;
565 dest_dev = (struct net_device *)out;
566 } else {
567 src_dev = (struct net_device *)out;
568 dest_dev = in;
569 }
570
571#if (!SFE_HOOK_ABOVE_BRIDGE)
572 /*
573 * Now our devices may actually be a bridge interface. If that's
574 * the case then we need to hunt down the underlying interface.
575 */
576 if (src_dev->priv_flags & IFF_EBRIDGE) {
577 src_br_dev = br_port_dev_get(src_dev, sic.src_mac);
578 if (!src_br_dev) {
579 DEBUG_TRACE("no port found on bridge\n");
580 goto done1;
581 }
582
583 src_dev = src_br_dev;
584 }
585
586 if (dest_dev->priv_flags & IFF_EBRIDGE) {
587 dest_br_dev = br_port_dev_get(dest_dev, sic.dest_mac_xlate);
588 if (!dest_br_dev) {
589 DEBUG_TRACE("no port found on bridge\n");
590 goto done2;
591 }
592
593 dest_dev = dest_br_dev;
594 }
595#else
596 /*
597 * Our devices may actually be part of a bridge interface. If that's
598 * the case then find the bridge interface instead.
599 */
600 if (src_dev->priv_flags & IFF_BRIDGE_PORT) {
601 src_br_dev = src_dev->master;
602 if (!src_br_dev) {
603 DEBUG_TRACE("no bridge found for: %s\n", src_dev->name);
604 goto done1;
605 }
606
607 dev_hold(src_br_dev);
608 src_dev = src_br_dev;
609 }
610
611 if (dest_dev->priv_flags & IFF_BRIDGE_PORT) {
612 dest_br_dev = dest_dev->master;
613 if (!dest_br_dev) {
614 DEBUG_TRACE("no bridge found for: %s\n", dest_dev->name);
615 goto done2;
616 }
617
618 dev_hold(dest_br_dev);
619 dest_dev = dest_br_dev;
620 }
621#endif
622
623 sic.src_dev = src_dev;
624 sic.dest_dev = dest_dev;
625
626// XXX - these MTUs need handling correctly!
627 sic.src_mtu = 1500;
628 sic.dest_mtu = 1500;
629
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600630 if (skb->mark) {
631 DEBUG_TRACE("SKB MARK NON ZERO %x\n", skb->mark);
632 }
633 sic.mark = skb->mark;
634
Matthew McClintockea00adf2013-11-25 19:24:30 -0600635 conn = kmalloc(sizeof(struct sfe_connection), GFP_KERNEL);
636 if (conn == NULL) {
637 printk(KERN_CRIT "ERROR: no memory for sfe\n");
638 goto done3;
639 }
Matthew McClintockc5739382013-12-02 14:17:46 -0600640 conn->hits = 0;
Matthew McClintock55c86982013-12-02 14:24:24 -0600641 conn->offloaded = 0;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600642
Matthew McClintockea00adf2013-11-25 19:24:30 -0600643 p_sic = kmalloc(sizeof(struct sfe_ipv4_create), GFP_KERNEL);
644 if (p_sic == NULL) {
645 printk(KERN_CRIT "ERROR: no memory for sfe\n");
646 kfree(conn);
647 goto done3;
648 }
649
650 memcpy(p_sic, &sic, sizeof(sic));
651 conn->sic = p_sic;
652 conn->ct = ct;
653 DEBUG_TRACE(" -> adding item to sfe_connections, new size: %d\n", ++sfe_connections_size);
654 DEBUG_TRACE("POST_ROUTE: new offloadable connection: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
655 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
656 p_sic->src_port, p_sic->dest_port);
657 spin_lock_irqsave(&sfe_connections_lock, flags);
658 list_add_tail(&(conn->list), &sfe_connections);
659 spin_unlock_irqrestore(&sfe_connections_lock, flags);
660done3:
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600661 /*
662 * If we had bridge ports then release them too.
663 */
664 if (dest_br_dev) {
665 dev_put(dest_br_dev);
666 }
667
668done2:
669 if (src_br_dev) {
670 dev_put(src_br_dev);
671 }
672
673done1:
674 /*
675 * Release the interface on which this skb arrived
676 */
677 dev_put(in);
678
679 return NF_ACCEPT;
680}
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600681
682#ifdef CONFIG_NF_CONNTRACK_EVENTS
683/*
684 * fast_classifier_conntrack_event()
685 * Callback event invoked when a conntrack connection's state changes.
686 */
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600687#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
688static int fast_classifier_conntrack_event(struct notifier_block *this,
689 unsigned int events, struct nf_ct_event *item)
690#else
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600691static int fast_classifier_conntrack_event(unsigned int events, struct nf_ct_event *item)
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600692#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600693{
694 struct sfe_ipv4_destroy sid;
695 struct nf_conn *ct = item->ct;
696 struct nf_conntrack_tuple orig_tuple;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600697 struct sfe_connection *conn;
698 struct sfe_ipv4_create *p_sic;
699 int sfe_found_match = 0;
700 int sfe_connections_size = 0;
701 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600702
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600703 if (events & IPCT_MARK) {
704 struct sfe_ipv4_mark mark;
705 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
706
707 mark.protocol = (int32_t)orig_tuple.dst.protonum;
708 mark.src_ip = (__be32)orig_tuple.src.u3.ip;
709 mark.dest_ip = (__be32)orig_tuple.dst.u3.ip;
710 switch (mark.protocol) {
711 case IPPROTO_TCP:
712 mark.src_port = orig_tuple.src.u.tcp.port;
713 mark.dest_port = orig_tuple.dst.u.tcp.port;
714 break;
715 case IPPROTO_UDP:
716 mark.src_port = orig_tuple.src.u.udp.port;
717 mark.dest_port = orig_tuple.dst.u.udp.port;
718 break;
719 default:
720 break;
721 }
722
723 sfe_ipv4_mark_rule(&mark);
724 }
725
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600726 /*
727 * If we don't have a conntrack entry then we're done.
728 */
729 if (unlikely(!ct)) {
730 DEBUG_WARN("no ct in conntrack event callback\n");
731 return NOTIFY_DONE;
732 }
733
734 /*
735 * If this is an untracked connection then we can't have any state either.
736 */
737 if (unlikely(ct == &nf_conntrack_untracked)) {
738 DEBUG_TRACE("ignoring untracked conn\n");
739 return NOTIFY_DONE;
740 }
741
742 /*
743 * Ignore anything other than IPv4 connections.
744 */
745 if (unlikely(nf_ct_l3num(ct) != AF_INET)) {
746 DEBUG_TRACE("ignoring non-IPv4 conn\n");
747 return NOTIFY_DONE;
748 }
749
750 /*
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600751 * We're only interested in destroy events at this point
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600752 */
753 if (unlikely(!(events & (1 << IPCT_DESTROY)))) {
754 DEBUG_TRACE("ignoring non-destroy event\n");
755 return NOTIFY_DONE;
756 }
757
758 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
759 sid.protocol = (int32_t)orig_tuple.dst.protonum;
760
761 /*
762 * Extract information from the conntrack connection. We're only interested
763 * in nominal connection information (i.e. we're ignoring any NAT information).
764 */
765 sid.src_ip = (__be32)orig_tuple.src.u3.ip;
766 sid.dest_ip = (__be32)orig_tuple.dst.u3.ip;
767
768 switch (sid.protocol) {
769 case IPPROTO_TCP:
770 sid.src_port = orig_tuple.src.u.tcp.port;
771 sid.dest_port = orig_tuple.dst.u.tcp.port;
772 break;
773
774 case IPPROTO_UDP:
775 sid.src_port = orig_tuple.src.u.udp.port;
776 sid.dest_port = orig_tuple.dst.u.udp.port;
777 break;
778
779 default:
780 DEBUG_TRACE("unhandled protocol: %d\n", sid.protocol);
781 return NOTIFY_DONE;
782 }
783
Matthew McClintockea00adf2013-11-25 19:24:30 -0600784 /*
785 * If we already have this connection in our list, skip it
786 * XXX: this may need to be optimized
787 */
788 DEBUG_TRACE("INFO: want to clean up: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
789 sid.protocol, sid.src_ip, sid.dest_ip,
790 sid.src_port, sid.dest_port);
791 spin_lock_irqsave(&sfe_connections_lock, flags);
792 list_for_each_entry(conn, &sfe_connections, list) {
793 p_sic = conn->sic;
794 DEBUG_TRACE(" -> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
795 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
796 p_sic->src_port, p_sic->dest_port);
797
798 if (p_sic->protocol == sid.protocol &&
799 p_sic->src_port == sid.src_port &&
800 p_sic->dest_port == sid.dest_port &&
801 p_sic->src_ip == sid.src_ip &&
802 p_sic->dest_ip == sid.dest_ip ) {
803 sfe_found_match = 1;
804 DEBUG_TRACE("FOUND, DELETING\n");
805 break;
806 } else {
807 DEBUG_TRACE("SEARCH CONTINUES\n");
808 }
809 sfe_connections_size++;
810 }
811
812 if (sfe_found_match) {
813 DEBUG_TRACE("INFO: connection over proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
814 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
815 p_sic->src_port, p_sic->dest_port);
816 kfree(conn->sic);
817 list_del(&(conn->list));
818 kfree(conn);
819 } else {
820 DEBUG_TRACE("NO MATCH FOUND IN %d ENTRIES!!\n", sfe_connections_size);
821 }
822 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600823
824 sfe_ipv4_destroy_rule(&sid);
825 return NOTIFY_DONE;
826}
827
828/*
829 * Netfilter conntrack event system to monitor connection tracking changes
830 */
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600831#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
832static struct notifier_block fast_classifier_conntrack_notifier = {
833 .notifier_call = fast_classifier_conntrack_event,
834};
835#else
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600836static struct nf_ct_event_notifier fast_classifier_conntrack_notifier = {
837 .fcn = fast_classifier_conntrack_event,
838};
839#endif
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600840#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600841
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600842/*
843 * Structure to establish a hook into the post routing netfilter point - this
844 * will pick up local outbound and packets going from one interface to another.
845 *
846 * Note: see include/linux/netfilter_ipv4.h for info related to priority levels.
847 * We want to examine packets after NAT translation and any ALG processing.
848 */
849static struct nf_hook_ops fast_classifier_ipv4_ops_post_routing[] __read_mostly = {
850 {
851 .hook = fast_classifier_ipv4_post_routing_hook,
852 .owner = THIS_MODULE,
853 .pf = PF_INET,
854 .hooknum = NF_INET_POST_ROUTING,
855 .priority = NF_IP_PRI_NAT_SRC + 1,
856 },
857};
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600858
859/*
860 * fast_classifier_sync_rule()
861 * Synchronize a connection's state.
862 */
863static void fast_classifier_sync_rule(struct sfe_ipv4_sync *sis)
864{
865 struct nf_conntrack_tuple_hash *h;
866 struct nf_conntrack_tuple tuple;
867 struct nf_conn *ct;
868 struct nf_conn_counter *acct;
869
870 /*
871 * Create a tuple so as to be able to look up a connection
872 */
873 memset(&tuple, 0, sizeof(tuple));
874 tuple.src.u3.ip = sis->src_ip;
875 tuple.src.u.all = (__be16)sis->src_port;
876 tuple.src.l3num = AF_INET;
877
878 tuple.dst.u3.ip = sis->dest_ip;
879 tuple.dst.dir = IP_CT_DIR_ORIGINAL;
880 tuple.dst.protonum = (uint8_t)sis->protocol;
881 tuple.dst.u.all = (__be16)sis->dest_port;
882
883 DEBUG_TRACE("update connection - p: %d, s: %pI4:%u, d: %pI4:%u\n",
884 (int)tuple.dst.protonum,
885 &tuple.src.u3.ip, (unsigned int)ntohs(tuple.src.u.all),
886 &tuple.dst.u3.ip, (unsigned int)ntohs(tuple.dst.u.all));
887
888 /*
889 * Look up conntrack connection
890 */
891 h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
892 if (unlikely(!h)) {
893 DEBUG_TRACE("no connection found\n");
894 return;
895 }
896
897 ct = nf_ct_tuplehash_to_ctrack(h);
898 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
899
900 /*
901 * Only update if this is not a fixed timeout
902 */
903 if (!test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
904 ct->timeout.expires += sis->delta_jiffies;
905 }
906
907 acct = nf_conn_acct_find(ct);
908 if (acct) {
909 spin_lock_bh(&ct->lock);
Matthew McClintock704b7a62013-12-19 16:13:01 -0600910 atomic64_set(&acct[IP_CT_DIR_ORIGINAL].packets, sis->src_packet_count);
911 atomic64_set(&acct[IP_CT_DIR_ORIGINAL].bytes, sis->src_byte_count);
912 atomic64_set(&acct[IP_CT_DIR_REPLY].packets, sis->dest_packet_count);
913 atomic64_set(&acct[IP_CT_DIR_REPLY].bytes, sis->dest_byte_count);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600914 spin_unlock_bh(&ct->lock);
915 }
916
917 switch (sis->protocol) {
918 case IPPROTO_TCP:
919 spin_lock_bh(&ct->lock);
920 if (ct->proto.tcp.seen[0].td_maxwin < sis->src_td_max_window) {
921 ct->proto.tcp.seen[0].td_maxwin = sis->src_td_max_window;
922 }
923 if ((int32_t)(ct->proto.tcp.seen[0].td_end - sis->src_td_end) < 0) {
924 ct->proto.tcp.seen[0].td_end = sis->src_td_end;
925 }
926 if ((int32_t)(ct->proto.tcp.seen[0].td_maxend - sis->src_td_max_end) < 0) {
927 ct->proto.tcp.seen[0].td_maxend = sis->src_td_max_end;
928 }
929 if (ct->proto.tcp.seen[1].td_maxwin < sis->dest_td_max_window) {
930 ct->proto.tcp.seen[1].td_maxwin = sis->dest_td_max_window;
931 }
932 if ((int32_t)(ct->proto.tcp.seen[1].td_end - sis->dest_td_end) < 0) {
933 ct->proto.tcp.seen[1].td_end = sis->dest_td_end;
934 }
935 if ((int32_t)(ct->proto.tcp.seen[1].td_maxend - sis->dest_td_max_end) < 0) {
936 ct->proto.tcp.seen[1].td_maxend = sis->dest_td_max_end;
937 }
938 spin_unlock_bh(&ct->lock);
939 break;
940 }
941
942 /*
943 * Release connection
944 */
945 nf_ct_put(ct);
946}
947
948/*
949 * fast_classifier_device_event()
950 */
951static int fast_classifier_device_event(struct notifier_block *this, unsigned long event, void *ptr)
952{
953 struct net_device *dev = (struct net_device *)ptr;
954
955 switch (event) {
956 case NETDEV_DOWN:
957 if (dev) {
958 sfe_ipv4_destroy_all_rules_for_dev(dev);
959 }
960 break;
961 }
962
963 return NOTIFY_DONE;
964}
965
966/*
967 * fast_classifier_inet_event()
968 */
969static int fast_classifier_inet_event(struct notifier_block *this, unsigned long event, void *ptr)
970{
971 struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
972 return fast_classifier_device_event(this, event, dev);
973}
974
975/*
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600976 * fast_classifier_get_offload_at_pkts()
977 */
978static ssize_t fast_classifier_get_offload_at_pkts(struct device *dev,
979 struct device_attribute *attr,
980 char *buf)
981{
982 return sprintf(buf, "%d\n", offload_at_pkts);
983}
984
985/*
986 * fast_classifier_set_offload_at_pkts()
987 */
988static ssize_t fast_classifier_set_offload_at_pkts(struct device *dev,
989 struct device_attribute *attr,
990 char *buf, size_t size)
991{
992 int new;
993
994 if (strict_strtol(buf, 0, &new) < 1)
995 return -EINVAL;
996
997 offload_at_pkts = new;
998
999 return size;
1000}
1001
1002/*
1003 * sysfs attributes.
1004 */
1005static const struct device_attribute fast_classifier_offload_at_pkts_attr =
1006 __ATTR(offload_at_pkts, S_IWUGO | S_IRUGO, fast_classifier_get_offload_at_pkts, fast_classifier_set_offload_at_pkts);
1007
1008/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001009 * fast_classifier_init()
1010 */
1011static int __init fast_classifier_init(void)
1012{
1013 struct fast_classifier *sc = &__sc;
1014 int result = -1;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001015
1016 printk(KERN_ALERT "fast-classifier: starting up\n");
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001017 DEBUG_INFO("SFE CM init\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001018
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001019 /*
1020 * Create sys/fast_classifier
1021 */
1022 sc->sys_fast_classifier = kobject_create_and_add("fast_classifier", NULL);
1023 if (!sc->sys_fast_classifier) {
1024 DEBUG_ERROR("failed to register fast_classifier\n");
1025 goto exit1;
1026 }
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001027
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001028 result = sysfs_create_file(sc->sys_fast_classifier, &fast_classifier_offload_at_pkts_attr.attr);
1029 if (result) {
1030 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
1031 goto exit2;
1032 }
1033
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001034 sc->dev_notifier.notifier_call = fast_classifier_device_event;
1035 sc->dev_notifier.priority = 1;
1036 register_netdevice_notifier(&sc->dev_notifier);
1037
1038 sc->inet_notifier.notifier_call = fast_classifier_inet_event;
1039 sc->inet_notifier.priority = 1;
1040 register_inetaddr_notifier(&sc->inet_notifier);
1041
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001042 /*
1043 * Register our netfilter hooks.
1044 */
1045 result = nf_register_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
1046 if (result < 0) {
1047 DEBUG_ERROR("can't register nf post routing hook: %d\n", result);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001048 goto exit3;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001049 }
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001050
1051#ifdef CONFIG_NF_CONNTRACK_EVENTS
1052 /*
1053 * Register a notifier hook to get fast notifications of expired connections.
1054 */
1055 result = nf_conntrack_register_notifier(&init_net, &fast_classifier_conntrack_notifier);
1056 if (result < 0) {
1057 DEBUG_ERROR("can't register nf notifier hook: %d\n", result);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001058 goto exit4;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001059 }
1060#endif
1061
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001062 result = genl_register_family(&fast_classifier_gnl_family);
1063 if (result!= 0) {
1064 goto exit5;
1065 }
1066
1067 result = genl_register_ops(&fast_classifier_gnl_family, &fast_classifier_gnl_ops_recv);
1068 if (result != 0) {
1069 goto exit6;
1070 }
1071
1072 printk(KERN_ALERT "fast-classifier: registered\n");
1073
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001074 spin_lock_init(&sc->lock);
1075
1076 /*
1077 * Hook the receive path in the network stack.
1078 */
1079 BUG_ON(athrs_fast_nat_recv != NULL);
1080 RCU_INIT_POINTER(athrs_fast_nat_recv, fast_classifier_recv);
1081
1082 /*
1083 * Hook the shortcut sync callback.
1084 */
1085 sfe_ipv4_register_sync_rule_callback(fast_classifier_sync_rule);
1086
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001087 return 0;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001088
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001089exit6:
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001090 genl_unregister_family(&fast_classifier_gnl_family);
1091
1092exit5:
1093#ifdef CONFIG_NF_CONNTRACK_EVENTS
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001094 nf_conntrack_unregister_notifier(&init_net, &fast_classifier_conntrack_notifier);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001095#endif
1096
1097exit4:
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001098 nf_unregister_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001099
1100exit3:
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001101 unregister_inetaddr_notifier(&sc->inet_notifier);
1102 unregister_netdevice_notifier(&sc->dev_notifier);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001103 sysfs_remove_file(sc->sys_fast_classifier, &fast_classifier_offload_at_pkts_attr.attr);
1104
1105exit2:
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001106 kobject_put(sc->sys_fast_classifier);
1107
1108exit1:
1109 return result;
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001110}
1111
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001112/*
1113 * fast_classifier_exit()
1114 */
1115static void __exit fast_classifier_exit(void)
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001116{
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001117 struct fast_classifier *sc = &__sc;
1118 int result = -1;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001119
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001120 DEBUG_INFO("SFE CM exit\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001121 printk(KERN_ALERT "fast-classifier: shutting down\n");
1122
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001123 /*
1124 * Unregister our sync callback.
1125 */
1126 sfe_ipv4_register_sync_rule_callback(NULL);
1127
1128 /*
1129 * Unregister our receive callback.
1130 */
1131 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1132
1133 /*
1134 * Wait for all callbacks to complete.
1135 */
1136 rcu_barrier();
1137
1138 /*
1139 * Destroy all connections.
1140 */
1141 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1142
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001143 result = genl_unregister_ops(&fast_classifier_gnl_family, &fast_classifier_gnl_ops_recv);
1144 if (result != 0) {
1145 printk(KERN_CRIT "Unable to unreigster genl_ops\n");
1146 }
1147
1148 result = genl_unregister_family(&fast_classifier_gnl_family);
1149 if (result != 0) {
1150 printk(KERN_CRIT "Unable to unreigster genl_family\n");
1151 }
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001152
1153#ifdef CONFIG_NF_CONNTRACK_EVENTS
1154 nf_conntrack_unregister_notifier(&init_net, &fast_classifier_conntrack_notifier);
1155
1156#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001157 nf_unregister_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001158
1159 unregister_inetaddr_notifier(&sc->inet_notifier);
1160 unregister_netdevice_notifier(&sc->dev_notifier);
1161
1162 kobject_put(sc->sys_fast_classifier);
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001163}
1164
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001165module_init(fast_classifier_init)
1166module_exit(fast_classifier_exit)
1167
1168MODULE_AUTHOR("Qualcomm Atheros Inc.");
1169MODULE_DESCRIPTION("Shortcut Forwarding Engine - Connection Manager");
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001170MODULE_LICENSE("GPL");
1171