blob: b217674d428fe08a4766e2d343573a3028399011 [file] [log] [blame]
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06002 * fast-classifier.c
3 * Shortcut forwarding engine connection manager.
4 * fast-classifier style
5 *
6 * XXX - fill in the appropriate GPL notice.
Matthew McClintock6f29aa12013-11-06 15:49:01 -06007 */
Matthew McClintock6f29aa12013-11-06 15:49:01 -06008#include <linux/module.h>
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06009#include <linux/sysfs.h>
10#include <linux/skbuff.h>
11#include <net/route.h>
12#include <linux/inetdevice.h>
13#include <linux/netfilter_bridge.h>
14#include <net/netfilter/nf_conntrack_acct.h>
15#include <net/netfilter/nf_conntrack_helper.h>
16#include <net/netfilter/nf_conntrack_zones.h>
17#include <net/netfilter/nf_conntrack_core.h>
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060018#include <net/genetlink.h>
Matthew McClintockea00adf2013-11-25 19:24:30 -060019#include <linux/list.h>
20#include <linux/spinlock.h>
Ben Menchaca0971b7a2014-01-10 14:43:02 -060021#include <linux/ratelimit.h>
22#include <linux/if_pppox.h>
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060023
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060024#include "../shortcut-fe/sfe.h"
25#include "../shortcut-fe/sfe_ipv4.h"
26#include "fast-classifier-priv.h"
27
28/*
29 * Per-module structure.
30 */
31struct fast_classifier {
32 spinlock_t lock; /* Lock for SMP correctness */
33
34 /*
35 * Control state.
36 */
37 struct kobject *sys_fast_classifier; /* sysfs linkage */
38
39 /*
40 * Callback notifiers.
41 */
42 struct notifier_block dev_notifier;
43 /* Device notifier */
44 struct notifier_block inet_notifier;
45 /* IP notifier */
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060046};
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060047
48struct fast_classifier __sc;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060049
50static struct nla_policy fast_classifier_genl_policy[FAST_CLASSIFIER_A_MAX + 1] = {
Matthew McClintock28d75572014-01-03 11:54:08 -060051 [FAST_CLASSIFIER_A_TUPLE] = { .type = NLA_UNSPEC },
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060052};
53
54static struct genl_family fast_classifier_gnl_family = {
55 .id = GENL_ID_GENERATE,
Matthew McClintock28d75572014-01-03 11:54:08 -060056 .hdrsize = FAST_CLASSIFIER_GENL_HDRSIZE,
57 .name = FAST_CLASSIFIER_GENL_NAME,
58 .version = FAST_CLASSIFIER_GENL_VERSION,
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060059 .maxattr = FAST_CLASSIFIER_A_MAX,
60};
61
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060062static int fast_classifier_recv_genl_msg(struct sk_buff *skb, struct genl_info *info);
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060063
64static struct genl_ops fast_classifier_gnl_ops_recv = {
65 .cmd = FAST_CLASSIFIER_C_RECV,
66 .flags = 0,
67 .policy = fast_classifier_genl_policy,
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060068 .doit = fast_classifier_recv_genl_msg,
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060069 .dumpit = NULL,
70};
71
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060072/*
73 * Expose the hook for the receive processing.
74 */
75extern int (*athrs_fast_nat_recv)(struct sk_buff *skb);
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -060076
Matthew McClintocke1bcfe42013-11-22 15:33:09 -060077/*
78 * Expose what should be a static flag in the TCP connection tracker.
79 */
80extern int nf_ct_tcp_no_window_check;
81
82/*
83 * fast_classifier_recv()
84 * Handle packet receives.
85 *
86 * Returns 1 if the packet is forwarded or 0 if it isn't.
87 */
88int fast_classifier_recv(struct sk_buff *skb)
89{
90 struct net_device *dev;
91#if (SFE_HOOK_ABOVE_BRIDGE)
92 struct in_device *in_dev;
93#endif
94
95 /*
96 * We know that for the vast majority of packets we need the transport
97 * layer header so we may as well start to fetch it now!
98 */
99 prefetch(skb->data + 32);
100 barrier();
101
102 dev = skb->dev;
103
Ben Menchaca0971b7a2014-01-10 14:43:02 -0600104 /*
105 * And PPPoE packets
106 */
107 if (htons(ETH_P_PPP_SES) == skb->protocol) {
108 return sfe_pppoe_recv(dev, skb);
109 }
110
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600111#if (SFE_HOOK_ABOVE_BRIDGE)
112 /*
113 * Does our input device support IP processing?
114 */
115 in_dev = (struct in_device *)dev->ip_ptr;
116 if (unlikely(!in_dev)) {
117 DEBUG_TRACE("no IP processing for device: %s\n", dev->name);
118 return 0;
119 }
120
121 /*
122 * Does it have an IP address? If it doesn't then we can't do anything
123 * interesting here!
124 */
125 if (unlikely(!in_dev->ifa_list)) {
126 DEBUG_TRACE("no IP address for device: %s\n", dev->name);
127 return 0;
128 }
129#endif
130
131 /*
132 * We're only interested in IP packets.
133 */
134 if (likely(htons(ETH_P_IP) == skb->protocol)) {
135 return sfe_ipv4_recv(dev, skb);
136 }
137
Ben Menchaca0971b7a2014-01-10 14:43:02 -0600138 DEBUG_TRACE("not IP or PPPoE packet\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -0600139 return 0;
140}
Matthew McClintock6f29aa12013-11-06 15:49:01 -0600141
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600142/*
143 * fast_classifier_find_mac_addr()
144 * Find the MAC address for a given IPv4 address.
145 *
146 * Returns true if we find the MAC address, otherwise false.
147 *
148 * We look up the rtable entry for the address and, from its neighbour
149 * structure, obtain the hardware address. This means this function also
150 * works if the neighbours are routers too.
151 */
152static bool fast_classifier_find_mac_addr(uint32_t addr, uint8_t *mac_addr)
Matthew McClintock6f29aa12013-11-06 15:49:01 -0600153{
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600154 struct neighbour *neigh;
155 struct rtable *rt;
156 struct dst_entry *dst;
157 struct net_device *dev;
158
159 /*
160 * Look up the rtable entry for the IP address then get the hardware
161 * address from its neighbour structure. This means this work when the
162 * neighbours are routers too.
163 */
164 rt = ip_route_output(&init_net, addr, 0, 0, 0);
165 if (unlikely(IS_ERR(rt))) {
166 return false;
167 }
168
169 dst = (struct dst_entry *)rt;
170
171 rcu_read_lock();
172 neigh = dst_get_neighbour_noref(dst);
173 if (unlikely(!neigh)) {
174 rcu_read_unlock();
175 dst_release(dst);
176 return false;
177 }
178
179 if (unlikely(!(neigh->nud_state & NUD_VALID))) {
180 rcu_read_unlock();
181 dst_release(dst);
182 return false;
183 }
184
185 dev = neigh->dev;
186 if (!dev) {
187 rcu_read_unlock();
188 dst_release(dst);
189 return false;
190 }
191
192 memcpy(mac_addr, neigh->ha, (size_t)dev->addr_len);
193 rcu_read_unlock();
194
195 dst_release(dst);
196
197 /*
198 * We're only interested in unicast MAC addresses - if it's not a unicast
199 * address then our IP address mustn't be unicast either.
200 */
201 if (is_multicast_ether_addr(mac_addr)) {
202 DEBUG_TRACE("MAC is non-unicast - ignoring\n");
203 return false;
204 }
205
206 return true;
207}
208
Matthew McClintockea00adf2013-11-25 19:24:30 -0600209static DEFINE_SPINLOCK(sfe_connections_lock);
210
211struct sfe_connection {
212 struct list_head list;
213 struct sfe_ipv4_create *sic;
214 struct nf_conn *ct;
Matthew McClintockc5739382013-12-02 14:17:46 -0600215 int hits;
Matthew McClintock55c86982013-12-02 14:24:24 -0600216 int offloaded;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600217};
218
219static LIST_HEAD(sfe_connections);
220
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600221/*
Matthew McClintockc5739382013-12-02 14:17:46 -0600222 * fast_classifier_update_protocol()
223 * Update sfe_ipv4_create struct with new protocol information before we offload
224 */
225static int fast_classifier_update_protocol(struct sfe_ipv4_create *p_sic, struct nf_conn *ct)
226{
227 switch (p_sic->protocol) {
228 case IPPROTO_TCP:
229 p_sic->src_td_window_scale = ct->proto.tcp.seen[0].td_scale;
230 p_sic->src_td_max_window = ct->proto.tcp.seen[0].td_maxwin;
231 p_sic->src_td_end = ct->proto.tcp.seen[0].td_end;
232 p_sic->src_td_max_end = ct->proto.tcp.seen[0].td_maxend;
233 p_sic->dest_td_window_scale = ct->proto.tcp.seen[1].td_scale;
234 p_sic->dest_td_max_window = ct->proto.tcp.seen[1].td_maxwin;
235 p_sic->dest_td_end = ct->proto.tcp.seen[1].td_end;
236 p_sic->dest_td_max_end = ct->proto.tcp.seen[1].td_maxend;
237 if (nf_ct_tcp_no_window_check
238 || (ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_BE_LIBERAL)
239 || (ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_BE_LIBERAL)) {
240 p_sic->flags |= SFE_IPV4_CREATE_FLAG_NO_SEQ_CHECK;
241 }
242
243 /*
244 * If the connection is shutting down do not manage it.
245 * state can not be SYN_SENT, SYN_RECV because connection is assured
246 * Not managed states: FIN_WAIT, CLOSE_WAIT, LAST_ACK, TIME_WAIT, CLOSE.
247 */
248 spin_lock(&ct->lock);
249 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
250 spin_unlock(&ct->lock);
251 DEBUG_TRACE("connection in termination state: %#x, s: %pI4:%u, d: %pI4:%u\n",
252 ct->proto.tcp.state, &p_sic->src_ip, ntohs(p_sic->src_port),
253 &p_sic->dest_ip, ntohs(p_sic->dest_port));
254 return 0;
255 }
256 spin_unlock(&ct->lock);
257 break;
258
259 case IPPROTO_UDP:
260 break;
261
262 default:
263 DEBUG_TRACE("unhandled protocol %d\n", p_sic->protocol);
264 return 0;
265 }
266
267 return 1;
268}
269
270/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600271 * fast_classifier_recv_genl_msg()
272 * Called from user space to offload a connection
273 */
274static int fast_classifier_recv_genl_msg(struct sk_buff *skb, struct genl_info *info)
275{
276 struct nlattr *na;
Matthew McClintock28d75572014-01-03 11:54:08 -0600277 struct fast_classifier_tuple *fc_msg;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600278 struct sfe_ipv4_create *p_sic;
279 struct sfe_connection *conn;
280 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600281
282 na = info->attrs[FAST_CLASSIFIER_C_RECV];
283 fc_msg = nla_data(na);
Matthew McClintockea00adf2013-11-25 19:24:30 -0600284
285 DEBUG_TRACE("INFO: want to offload: %d, %d, %d, %d, %d\n", fc_msg->proto,
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600286 fc_msg->src_saddr,
287 fc_msg->dst_saddr,
288 fc_msg->sport, fc_msg->dport);
Matthew McClintockea00adf2013-11-25 19:24:30 -0600289 spin_lock_irqsave(&sfe_connections_lock, flags);
290 list_for_each_entry(conn, &sfe_connections, list) {
Matthew McClintockea00adf2013-11-25 19:24:30 -0600291 p_sic = conn->sic;
292
293 DEBUG_TRACE(" -> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
294 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
295 p_sic->src_port, p_sic->dest_port);
296
297 if (p_sic->protocol == fc_msg->proto &&
298 p_sic->src_port == fc_msg->sport &&
299 p_sic->dest_port == fc_msg->dport &&
300 p_sic->src_ip == fc_msg->src_saddr &&
301 p_sic->dest_ip == fc_msg->dst_saddr ) {
Matthew McClintock55c86982013-12-02 14:24:24 -0600302 if (conn->offloaded == 0) {
303 DEBUG_TRACE("USERSPACE OFFLOAD REQUEST, MATCH FOUND, WILL OFFLOAD\n");
304 if (fast_classifier_update_protocol(p_sic, conn->ct) == 0) {
305 spin_unlock_irqrestore(&sfe_connections_lock, flags);
306 DEBUG_TRACE("UNKNOWN PROTOCOL OR CONNECTION CLOSING, SKIPPING\n");
307 return 0;
308 }
309 DEBUG_TRACE("INFO: calling sfe rule creation!\n");
310 conn->offloaded = 1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600311 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintock55c86982013-12-02 14:24:24 -0600312 sfe_ipv4_create_rule(p_sic);
Matthew McClintockc5739382013-12-02 14:17:46 -0600313 return 0;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600314 }
Matthew McClintock55c86982013-12-02 14:24:24 -0600315
316 DEBUG_TRACE("GOT REQUEST TO OFFLOAD ALREADY OFFLOADED CONN FROM USERSPACE\n");
Matthew McClintockea00adf2013-11-25 19:24:30 -0600317 }
318 DEBUG_TRACE("SEARCH CONTINUES\n");
319 }
320
321 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600322 return 0;
323}
324
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600325/* auto offload connection once we have this many packets*/
326static int offload_at_pkts = 128;
327
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600328/*
329 * fast_classifier_ipv4_post_routing_hook()
330 * Called for packets about to leave the box - either locally generated or forwarded from another interface
331 */
332static unsigned int fast_classifier_ipv4_post_routing_hook(unsigned int hooknum,
333 struct sk_buff *skb,
334 const struct net_device *in_unused,
335 const struct net_device *out,
336 int (*okfn)(struct sk_buff *))
337{
338 struct sfe_ipv4_create sic;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600339 struct sfe_ipv4_create *p_sic;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600340 struct net_device *in;
341 struct nf_conn *ct;
342 enum ip_conntrack_info ctinfo;
343 struct net_device *src_dev;
344 struct net_device *dest_dev;
345 struct net_device *src_br_dev = NULL;
346 struct net_device *dest_br_dev = NULL;
347 struct nf_conntrack_tuple orig_tuple;
348 struct nf_conntrack_tuple reply_tuple;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600349 struct sfe_connection *conn;
350 int sfe_connections_size = 0;
351 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600352
353 /*
354 * Don't process broadcast or multicast packets.
355 */
356 if (unlikely(skb->pkt_type == PACKET_BROADCAST)) {
357 DEBUG_TRACE("broadcast, ignoring\n");
358 return NF_ACCEPT;
359 }
360 if (unlikely(skb->pkt_type == PACKET_MULTICAST)) {
361 DEBUG_TRACE("multicast, ignoring\n");
362 return NF_ACCEPT;
363 }
364
365 /*
366 * Don't process packets that are not being forwarded.
367 */
368 in = dev_get_by_index(&init_net, skb->skb_iif);
369 if (!in) {
370 DEBUG_TRACE("packet not forwarding\n");
371 return NF_ACCEPT;
372 }
373
374 /*
375 * Don't process packets with non-standard 802.3 MAC address sizes.
376 */
377 if (unlikely(in->addr_len != ETH_ALEN)) {
378 DEBUG_TRACE("in device: %s not 802.3 hw addr len: %u, ignoring\n",
379 in->name, (unsigned)in->addr_len);
380 goto done1;
381 }
382 if (unlikely(out->addr_len != ETH_ALEN)) {
383 DEBUG_TRACE("out device: %s not 802.3 hw addr len: %u, ignoring\n",
384 out->name, (unsigned)out->addr_len);
385 goto done1;
386 }
387
388 /*
389 * Don't process packets that aren't being tracked by conntrack.
390 */
391 ct = nf_ct_get(skb, &ctinfo);
392 if (unlikely(!ct)) {
393 DEBUG_TRACE("no conntrack connection, ignoring\n");
394 goto done1;
395 }
396
397 /*
398 * Don't process untracked connections.
399 */
400 if (unlikely(ct == &nf_conntrack_untracked)) {
401 DEBUG_TRACE("untracked connection\n");
402 goto done1;
403 }
404
405 /*
406 * Don't process connections that require support from a 'helper' (typically a NAT ALG).
407 */
408 if (unlikely(nfct_help(ct))) {
409 DEBUG_TRACE("connection has helper\n");
410 goto done1;
411 }
412
413 /*
414 * Look up the details of our connection in conntrack.
415 *
416 * Note that the data we get from conntrack is for the "ORIGINAL" direction
417 * but our packet may actually be in the "REPLY" direction.
418 */
419 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
420 reply_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
421 sic.protocol = (int32_t)orig_tuple.dst.protonum;
422
423 /*
424 * Get addressing information, non-NAT first
425 */
426 sic.src_ip = (__be32)orig_tuple.src.u3.ip;
427 sic.dest_ip = (__be32)orig_tuple.dst.u3.ip;
428
429 /*
430 * NAT'ed addresses - note these are as seen from the 'reply' direction
431 * When NAT does not apply to this connection these will be identical to the above.
432 */
433 sic.src_ip_xlate = (__be32)reply_tuple.dst.u3.ip;
434 sic.dest_ip_xlate = (__be32)reply_tuple.src.u3.ip;
435
436 sic.flags = 0;
437
438 switch (sic.protocol) {
439 case IPPROTO_TCP:
440 sic.src_port = orig_tuple.src.u.tcp.port;
441 sic.dest_port = orig_tuple.dst.u.tcp.port;
442 sic.src_port_xlate = reply_tuple.dst.u.tcp.port;
443 sic.dest_port_xlate = reply_tuple.src.u.tcp.port;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600444
445 /*
446 * Don't try to manage a non-established connection.
447 */
448 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
449 DEBUG_TRACE("non-established connection\n");
450 goto done1;
451 }
452
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600453 break;
454
455 case IPPROTO_UDP:
456 sic.src_port = orig_tuple.src.u.udp.port;
457 sic.dest_port = orig_tuple.dst.u.udp.port;
458 sic.src_port_xlate = reply_tuple.dst.u.udp.port;
459 sic.dest_port_xlate = reply_tuple.src.u.udp.port;
460 break;
461
462 default:
463 DEBUG_TRACE("unhandled protocol %d\n", sic.protocol);
464 goto done1;
465 }
466
467 /*
Matthew McClintockea00adf2013-11-25 19:24:30 -0600468 * If we already have this connection in our list, skip it
469 * XXX: this may need to be optimized
470 */
471 DEBUG_TRACE("POST_ROUTE: checking new connection: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
472 sic.protocol, sic.src_ip, sic.dest_ip,
473 sic.src_port, sic.dest_port);
474 spin_lock_irqsave(&sfe_connections_lock, flags);
475 list_for_each_entry(conn, &sfe_connections, list) {
476 p_sic = conn->sic;
477 DEBUG_TRACE("\t\t-> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
478 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
479 p_sic->src_port, p_sic->dest_port);
480
481 if (p_sic->protocol == sic.protocol &&
482 p_sic->src_port == sic.src_port &&
483 p_sic->dest_port == sic.dest_port &&
484 p_sic->src_ip == sic.src_ip &&
485 p_sic->dest_ip == sic.dest_ip ) {
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600486 if (skb->mark) {
487 DEBUG_TRACE("UPDATING MARK %x\n", skb->mark);
488 }
489 p_sic->mark = skb->mark;
Matthew McClintockc5739382013-12-02 14:17:46 -0600490
Ben Menchaca0971b7a2014-01-10 14:43:02 -0600491
Matthew McClintockc5739382013-12-02 14:17:46 -0600492 conn->hits++;
Matthew McClintock55c86982013-12-02 14:24:24 -0600493 if (conn->offloaded == 0) {
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600494 if (conn->hits == offload_at_pkts) {
Matthew McClintock55c86982013-12-02 14:24:24 -0600495 DEBUG_TRACE("OFFLOADING CONNECTION, TOO MANY HITS\n");
496 if (fast_classifier_update_protocol(p_sic, conn->ct) == 0) {
497 spin_unlock_irqrestore(&sfe_connections_lock, flags);
498 DEBUG_TRACE("UNKNOWN PROTOCOL OR CONNECTION CLOSING, SKIPPING\n");
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600499 sfe_ipv4_create_rule(p_sic);
Matthew McClintock55c86982013-12-02 14:24:24 -0600500 return 0;
501 }
502 DEBUG_TRACE("INFO: calling sfe rule creation!\n");
503 conn->offloaded = 1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600504 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintock55c86982013-12-02 14:24:24 -0600505 sfe_ipv4_create_rule(p_sic);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600506 goto done1;
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600507 } else if (conn->hits > offload_at_pkts) {
508 DEBUG_ERROR("ERROR: MORE THAN %d HITS AND NOT OFFLOADED\n", offload_at_pkts);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600509 spin_unlock_irqrestore(&sfe_connections_lock, flags);
510 goto done1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600511 }
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600512 }
513
514 if (conn->offloaded == 1) {
Matthew McClintockc5739382013-12-02 14:17:46 -0600515 struct sfe_ipv4_mark mark;
516
517 DEBUG_TRACE("CONNECTION ALREADY OFFLOADED, UPDATING MARK\n");
518 mark.protocol = p_sic->protocol;
519 mark.src_ip = p_sic->src_ip;
520 mark.src_port = p_sic->src_port;
521 mark.dest_ip = p_sic->dest_ip;
522 mark.dest_port = p_sic->dest_port;
523 mark.mark = skb->mark;
524 sfe_ipv4_mark_rule(&mark);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600525 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintock55c86982013-12-02 14:24:24 -0600526 sfe_ipv4_create_rule(p_sic);
Matthew McClintock16a47ec2013-12-05 17:03:15 -0600527 goto done1;
Matthew McClintockc5739382013-12-02 14:17:46 -0600528 }
529
530 DEBUG_TRACE("FOUND, SKIPPING\n");
Matthew McClintockea00adf2013-11-25 19:24:30 -0600531 spin_unlock_irqrestore(&sfe_connections_lock, flags);
532 goto done1;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600533 }
534
Matthew McClintock55c86982013-12-02 14:24:24 -0600535 DEBUG_TRACE("SEARCH CONTINUES");
Matthew McClintockea00adf2013-11-25 19:24:30 -0600536 sfe_connections_size++;
537 }
538 spin_unlock_irqrestore(&sfe_connections_lock, flags);
539
540 /*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600541 * Get the MAC addresses that correspond to source and destination host addresses.
542 */
543 if (!fast_classifier_find_mac_addr(sic.src_ip, sic.src_mac)) {
544 DEBUG_TRACE("failed to find MAC address for src IP: %pI4\n", &sic.src_ip);
545 goto done1;
546 }
547
548 if (!fast_classifier_find_mac_addr(sic.src_ip_xlate, sic.src_mac_xlate)) {
549 DEBUG_TRACE("failed to find MAC address for xlate src IP: %pI4\n", &sic.src_ip_xlate);
550 goto done1;
551 }
552
553 /*
554 * Do dest now
555 */
556 if (!fast_classifier_find_mac_addr(sic.dest_ip, sic.dest_mac)) {
557 DEBUG_TRACE("failed to find MAC address for dest IP: %pI4\n", &sic.dest_ip);
558 goto done1;
559 }
560
561 if (!fast_classifier_find_mac_addr(sic.dest_ip_xlate, sic.dest_mac_xlate)) {
562 DEBUG_TRACE("failed to find MAC address for xlate dest IP: %pI4\n", &sic.dest_ip_xlate);
563 goto done1;
564 }
565
566 /*
567 * Get our device info. If we're dealing with the "reply" direction here then
568 * we'll need things swapped around.
569 */
570 if (ctinfo < IP_CT_IS_REPLY) {
571 src_dev = in;
572 dest_dev = (struct net_device *)out;
573 } else {
574 src_dev = (struct net_device *)out;
575 dest_dev = in;
576 }
577
578#if (!SFE_HOOK_ABOVE_BRIDGE)
579 /*
580 * Now our devices may actually be a bridge interface. If that's
581 * the case then we need to hunt down the underlying interface.
582 */
583 if (src_dev->priv_flags & IFF_EBRIDGE) {
584 src_br_dev = br_port_dev_get(src_dev, sic.src_mac);
585 if (!src_br_dev) {
586 DEBUG_TRACE("no port found on bridge\n");
587 goto done1;
588 }
589
590 src_dev = src_br_dev;
591 }
592
593 if (dest_dev->priv_flags & IFF_EBRIDGE) {
594 dest_br_dev = br_port_dev_get(dest_dev, sic.dest_mac_xlate);
595 if (!dest_br_dev) {
596 DEBUG_TRACE("no port found on bridge\n");
597 goto done2;
598 }
599
600 dest_dev = dest_br_dev;
601 }
602#else
603 /*
604 * Our devices may actually be part of a bridge interface. If that's
605 * the case then find the bridge interface instead.
606 */
607 if (src_dev->priv_flags & IFF_BRIDGE_PORT) {
608 src_br_dev = src_dev->master;
609 if (!src_br_dev) {
610 DEBUG_TRACE("no bridge found for: %s\n", src_dev->name);
611 goto done1;
612 }
613
614 dev_hold(src_br_dev);
615 src_dev = src_br_dev;
616 }
617
618 if (dest_dev->priv_flags & IFF_BRIDGE_PORT) {
619 dest_br_dev = dest_dev->master;
620 if (!dest_br_dev) {
621 DEBUG_TRACE("no bridge found for: %s\n", dest_dev->name);
622 goto done2;
623 }
624
625 dev_hold(dest_br_dev);
626 dest_dev = dest_br_dev;
627 }
628#endif
629
630 sic.src_dev = src_dev;
631 sic.dest_dev = dest_dev;
632
633// XXX - these MTUs need handling correctly!
634 sic.src_mtu = 1500;
635 sic.dest_mtu = 1500;
636
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600637 if (skb->mark) {
638 DEBUG_TRACE("SKB MARK NON ZERO %x\n", skb->mark);
639 }
640 sic.mark = skb->mark;
641
Matthew McClintock29cd6aa2014-01-14 18:27:10 -0600642 if (last_pppox_sock && last_pppox_sock->pppoe_dev == in->name) {
Ben Menchaca0971b7a2014-01-10 14:43:02 -0600643 struct sock *sk = &last_pppox_sock->sk;
644
645 if (sk->sk_family == PF_PPPOX && sk->sk_protocol == PX_PROTO_OE) {
646 sic.dest_pppoe_sk = sk;
647 }
648 } else {
649 sic.dest_pppoe_sk = NULL;
650 }
651 sic.src_pppoe_sk = NULL;
652
Matthew McClintockea00adf2013-11-25 19:24:30 -0600653 conn = kmalloc(sizeof(struct sfe_connection), GFP_KERNEL);
654 if (conn == NULL) {
655 printk(KERN_CRIT "ERROR: no memory for sfe\n");
656 goto done3;
657 }
Matthew McClintockc5739382013-12-02 14:17:46 -0600658 conn->hits = 0;
Matthew McClintock55c86982013-12-02 14:24:24 -0600659 conn->offloaded = 0;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600660
Matthew McClintockea00adf2013-11-25 19:24:30 -0600661 p_sic = kmalloc(sizeof(struct sfe_ipv4_create), GFP_KERNEL);
662 if (p_sic == NULL) {
663 printk(KERN_CRIT "ERROR: no memory for sfe\n");
664 kfree(conn);
665 goto done3;
666 }
667
668 memcpy(p_sic, &sic, sizeof(sic));
669 conn->sic = p_sic;
670 conn->ct = ct;
671 DEBUG_TRACE(" -> adding item to sfe_connections, new size: %d\n", ++sfe_connections_size);
672 DEBUG_TRACE("POST_ROUTE: new offloadable connection: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
673 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
674 p_sic->src_port, p_sic->dest_port);
675 spin_lock_irqsave(&sfe_connections_lock, flags);
676 list_add_tail(&(conn->list), &sfe_connections);
677 spin_unlock_irqrestore(&sfe_connections_lock, flags);
678done3:
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600679 /*
680 * If we had bridge ports then release them too.
681 */
682 if (dest_br_dev) {
683 dev_put(dest_br_dev);
684 }
685
686done2:
687 if (src_br_dev) {
688 dev_put(src_br_dev);
689 }
690
691done1:
692 /*
693 * Release the interface on which this skb arrived
694 */
695 dev_put(in);
696
697 return NF_ACCEPT;
698}
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600699
700#ifdef CONFIG_NF_CONNTRACK_EVENTS
701/*
702 * fast_classifier_conntrack_event()
703 * Callback event invoked when a conntrack connection's state changes.
704 */
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600705#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
706static int fast_classifier_conntrack_event(struct notifier_block *this,
707 unsigned int events, struct nf_ct_event *item)
708#else
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600709static int fast_classifier_conntrack_event(unsigned int events, struct nf_ct_event *item)
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600710#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600711{
712 struct sfe_ipv4_destroy sid;
713 struct nf_conn *ct = item->ct;
714 struct nf_conntrack_tuple orig_tuple;
Matthew McClintockea00adf2013-11-25 19:24:30 -0600715 struct sfe_connection *conn;
716 struct sfe_ipv4_create *p_sic;
717 int sfe_found_match = 0;
718 int sfe_connections_size = 0;
719 unsigned long flags;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600720
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600721 if (events & IPCT_MARK) {
722 struct sfe_ipv4_mark mark;
723 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
724
725 mark.protocol = (int32_t)orig_tuple.dst.protonum;
726 mark.src_ip = (__be32)orig_tuple.src.u3.ip;
727 mark.dest_ip = (__be32)orig_tuple.dst.u3.ip;
728 switch (mark.protocol) {
729 case IPPROTO_TCP:
730 mark.src_port = orig_tuple.src.u.tcp.port;
731 mark.dest_port = orig_tuple.dst.u.tcp.port;
732 break;
733 case IPPROTO_UDP:
734 mark.src_port = orig_tuple.src.u.udp.port;
735 mark.dest_port = orig_tuple.dst.u.udp.port;
736 break;
737 default:
738 break;
739 }
740
741 sfe_ipv4_mark_rule(&mark);
742 }
743
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600744 /*
745 * If we don't have a conntrack entry then we're done.
746 */
747 if (unlikely(!ct)) {
748 DEBUG_WARN("no ct in conntrack event callback\n");
749 return NOTIFY_DONE;
750 }
751
752 /*
753 * If this is an untracked connection then we can't have any state either.
754 */
755 if (unlikely(ct == &nf_conntrack_untracked)) {
756 DEBUG_TRACE("ignoring untracked conn\n");
757 return NOTIFY_DONE;
758 }
759
760 /*
761 * Ignore anything other than IPv4 connections.
762 */
763 if (unlikely(nf_ct_l3num(ct) != AF_INET)) {
764 DEBUG_TRACE("ignoring non-IPv4 conn\n");
765 return NOTIFY_DONE;
766 }
767
768 /*
Matthew McClintocke1cf6f22013-11-27 13:27:09 -0600769 * We're only interested in destroy events at this point
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600770 */
771 if (unlikely(!(events & (1 << IPCT_DESTROY)))) {
772 DEBUG_TRACE("ignoring non-destroy event\n");
773 return NOTIFY_DONE;
774 }
775
776 orig_tuple = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
777 sid.protocol = (int32_t)orig_tuple.dst.protonum;
778
779 /*
780 * Extract information from the conntrack connection. We're only interested
781 * in nominal connection information (i.e. we're ignoring any NAT information).
782 */
783 sid.src_ip = (__be32)orig_tuple.src.u3.ip;
784 sid.dest_ip = (__be32)orig_tuple.dst.u3.ip;
785
786 switch (sid.protocol) {
787 case IPPROTO_TCP:
788 sid.src_port = orig_tuple.src.u.tcp.port;
789 sid.dest_port = orig_tuple.dst.u.tcp.port;
790 break;
791
792 case IPPROTO_UDP:
793 sid.src_port = orig_tuple.src.u.udp.port;
794 sid.dest_port = orig_tuple.dst.u.udp.port;
795 break;
796
797 default:
798 DEBUG_TRACE("unhandled protocol: %d\n", sid.protocol);
799 return NOTIFY_DONE;
800 }
801
Matthew McClintockea00adf2013-11-25 19:24:30 -0600802 /*
803 * If we already have this connection in our list, skip it
804 * XXX: this may need to be optimized
805 */
806 DEBUG_TRACE("INFO: want to clean up: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
807 sid.protocol, sid.src_ip, sid.dest_ip,
808 sid.src_port, sid.dest_port);
809 spin_lock_irqsave(&sfe_connections_lock, flags);
810 list_for_each_entry(conn, &sfe_connections, list) {
811 p_sic = conn->sic;
812 DEBUG_TRACE(" -> COMPARING: proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d...",
813 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
814 p_sic->src_port, p_sic->dest_port);
815
816 if (p_sic->protocol == sid.protocol &&
817 p_sic->src_port == sid.src_port &&
818 p_sic->dest_port == sid.dest_port &&
819 p_sic->src_ip == sid.src_ip &&
820 p_sic->dest_ip == sid.dest_ip ) {
821 sfe_found_match = 1;
822 DEBUG_TRACE("FOUND, DELETING\n");
823 break;
824 } else {
825 DEBUG_TRACE("SEARCH CONTINUES\n");
826 }
827 sfe_connections_size++;
828 }
829
830 if (sfe_found_match) {
831 DEBUG_TRACE("INFO: connection over proto: %d src_ip: %d dst_ip: %d, src_port: %d, dst_port: %d\n",
832 p_sic->protocol, p_sic->src_ip, p_sic->dest_ip,
833 p_sic->src_port, p_sic->dest_port);
834 kfree(conn->sic);
835 list_del(&(conn->list));
836 kfree(conn);
837 } else {
838 DEBUG_TRACE("NO MATCH FOUND IN %d ENTRIES!!\n", sfe_connections_size);
839 }
840 spin_unlock_irqrestore(&sfe_connections_lock, flags);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600841
842 sfe_ipv4_destroy_rule(&sid);
843 return NOTIFY_DONE;
844}
845
846/*
847 * Netfilter conntrack event system to monitor connection tracking changes
848 */
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600849#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
850static struct notifier_block fast_classifier_conntrack_notifier = {
851 .notifier_call = fast_classifier_conntrack_event,
852};
853#else
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600854static struct nf_ct_event_notifier fast_classifier_conntrack_notifier = {
855 .fcn = fast_classifier_conntrack_event,
856};
857#endif
Matthew McClintock0680e9f2013-11-26 15:43:10 -0600858#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600859
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600860/*
861 * Structure to establish a hook into the post routing netfilter point - this
862 * will pick up local outbound and packets going from one interface to another.
863 *
864 * Note: see include/linux/netfilter_ipv4.h for info related to priority levels.
865 * We want to examine packets after NAT translation and any ALG processing.
866 */
867static struct nf_hook_ops fast_classifier_ipv4_ops_post_routing[] __read_mostly = {
868 {
869 .hook = fast_classifier_ipv4_post_routing_hook,
870 .owner = THIS_MODULE,
871 .pf = PF_INET,
872 .hooknum = NF_INET_POST_ROUTING,
873 .priority = NF_IP_PRI_NAT_SRC + 1,
874 },
875};
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600876
877/*
878 * fast_classifier_sync_rule()
879 * Synchronize a connection's state.
880 */
881static void fast_classifier_sync_rule(struct sfe_ipv4_sync *sis)
882{
883 struct nf_conntrack_tuple_hash *h;
884 struct nf_conntrack_tuple tuple;
885 struct nf_conn *ct;
886 struct nf_conn_counter *acct;
887
888 /*
889 * Create a tuple so as to be able to look up a connection
890 */
891 memset(&tuple, 0, sizeof(tuple));
892 tuple.src.u3.ip = sis->src_ip;
893 tuple.src.u.all = (__be16)sis->src_port;
894 tuple.src.l3num = AF_INET;
895
896 tuple.dst.u3.ip = sis->dest_ip;
897 tuple.dst.dir = IP_CT_DIR_ORIGINAL;
898 tuple.dst.protonum = (uint8_t)sis->protocol;
899 tuple.dst.u.all = (__be16)sis->dest_port;
900
901 DEBUG_TRACE("update connection - p: %d, s: %pI4:%u, d: %pI4:%u\n",
902 (int)tuple.dst.protonum,
903 &tuple.src.u3.ip, (unsigned int)ntohs(tuple.src.u.all),
904 &tuple.dst.u3.ip, (unsigned int)ntohs(tuple.dst.u.all));
905
906 /*
907 * Look up conntrack connection
908 */
909 h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
910 if (unlikely(!h)) {
911 DEBUG_TRACE("no connection found\n");
912 return;
913 }
914
915 ct = nf_ct_tuplehash_to_ctrack(h);
916 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
917
918 /*
919 * Only update if this is not a fixed timeout
920 */
921 if (!test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
922 ct->timeout.expires += sis->delta_jiffies;
923 }
924
925 acct = nf_conn_acct_find(ct);
926 if (acct) {
927 spin_lock_bh(&ct->lock);
Matthew McClintock704b7a62013-12-19 16:13:01 -0600928 atomic64_set(&acct[IP_CT_DIR_ORIGINAL].packets, sis->src_packet_count);
929 atomic64_set(&acct[IP_CT_DIR_ORIGINAL].bytes, sis->src_byte_count);
930 atomic64_set(&acct[IP_CT_DIR_REPLY].packets, sis->dest_packet_count);
931 atomic64_set(&acct[IP_CT_DIR_REPLY].bytes, sis->dest_byte_count);
Matthew McClintocke1bcfe42013-11-22 15:33:09 -0600932 spin_unlock_bh(&ct->lock);
933 }
934
935 switch (sis->protocol) {
936 case IPPROTO_TCP:
937 spin_lock_bh(&ct->lock);
938 if (ct->proto.tcp.seen[0].td_maxwin < sis->src_td_max_window) {
939 ct->proto.tcp.seen[0].td_maxwin = sis->src_td_max_window;
940 }
941 if ((int32_t)(ct->proto.tcp.seen[0].td_end - sis->src_td_end) < 0) {
942 ct->proto.tcp.seen[0].td_end = sis->src_td_end;
943 }
944 if ((int32_t)(ct->proto.tcp.seen[0].td_maxend - sis->src_td_max_end) < 0) {
945 ct->proto.tcp.seen[0].td_maxend = sis->src_td_max_end;
946 }
947 if (ct->proto.tcp.seen[1].td_maxwin < sis->dest_td_max_window) {
948 ct->proto.tcp.seen[1].td_maxwin = sis->dest_td_max_window;
949 }
950 if ((int32_t)(ct->proto.tcp.seen[1].td_end - sis->dest_td_end) < 0) {
951 ct->proto.tcp.seen[1].td_end = sis->dest_td_end;
952 }
953 if ((int32_t)(ct->proto.tcp.seen[1].td_maxend - sis->dest_td_max_end) < 0) {
954 ct->proto.tcp.seen[1].td_maxend = sis->dest_td_max_end;
955 }
956 spin_unlock_bh(&ct->lock);
957 break;
958 }
959
960 /*
961 * Release connection
962 */
963 nf_ct_put(ct);
964}
965
966/*
967 * fast_classifier_device_event()
968 */
969static int fast_classifier_device_event(struct notifier_block *this, unsigned long event, void *ptr)
970{
971 struct net_device *dev = (struct net_device *)ptr;
972
973 switch (event) {
974 case NETDEV_DOWN:
975 if (dev) {
976 sfe_ipv4_destroy_all_rules_for_dev(dev);
977 }
978 break;
979 }
980
981 return NOTIFY_DONE;
982}
983
984/*
985 * fast_classifier_inet_event()
986 */
987static int fast_classifier_inet_event(struct notifier_block *this, unsigned long event, void *ptr)
988{
989 struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
990 return fast_classifier_device_event(this, event, dev);
991}
992
993/*
Matthew McClintock595ee8b2013-12-02 16:21:49 -0600994 * fast_classifier_get_offload_at_pkts()
995 */
996static ssize_t fast_classifier_get_offload_at_pkts(struct device *dev,
997 struct device_attribute *attr,
998 char *buf)
999{
1000 return sprintf(buf, "%d\n", offload_at_pkts);
1001}
1002
1003/*
1004 * fast_classifier_set_offload_at_pkts()
1005 */
1006static ssize_t fast_classifier_set_offload_at_pkts(struct device *dev,
1007 struct device_attribute *attr,
1008 char *buf, size_t size)
1009{
1010 int new;
1011
1012 if (strict_strtol(buf, 0, &new) < 1)
1013 return -EINVAL;
1014
1015 offload_at_pkts = new;
1016
1017 return size;
1018}
1019
1020/*
1021 * sysfs attributes.
1022 */
1023static const struct device_attribute fast_classifier_offload_at_pkts_attr =
1024 __ATTR(offload_at_pkts, S_IWUGO | S_IRUGO, fast_classifier_get_offload_at_pkts, fast_classifier_set_offload_at_pkts);
1025
1026/*
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001027 * fast_classifier_init()
1028 */
1029static int __init fast_classifier_init(void)
1030{
1031 struct fast_classifier *sc = &__sc;
1032 int result = -1;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001033
1034 printk(KERN_ALERT "fast-classifier: starting up\n");
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001035 DEBUG_INFO("SFE CM init\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001036
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001037 /*
1038 * Create sys/fast_classifier
1039 */
1040 sc->sys_fast_classifier = kobject_create_and_add("fast_classifier", NULL);
1041 if (!sc->sys_fast_classifier) {
1042 DEBUG_ERROR("failed to register fast_classifier\n");
1043 goto exit1;
1044 }
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001045
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001046 result = sysfs_create_file(sc->sys_fast_classifier, &fast_classifier_offload_at_pkts_attr.attr);
1047 if (result) {
1048 DEBUG_ERROR("failed to register debug dev file: %d\n", result);
1049 goto exit2;
1050 }
1051
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001052 sc->dev_notifier.notifier_call = fast_classifier_device_event;
1053 sc->dev_notifier.priority = 1;
1054 register_netdevice_notifier(&sc->dev_notifier);
1055
1056 sc->inet_notifier.notifier_call = fast_classifier_inet_event;
1057 sc->inet_notifier.priority = 1;
1058 register_inetaddr_notifier(&sc->inet_notifier);
1059
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001060 /*
1061 * Register our netfilter hooks.
1062 */
1063 result = nf_register_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
1064 if (result < 0) {
1065 DEBUG_ERROR("can't register nf post routing hook: %d\n", result);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001066 goto exit3;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001067 }
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001068
1069#ifdef CONFIG_NF_CONNTRACK_EVENTS
1070 /*
1071 * Register a notifier hook to get fast notifications of expired connections.
1072 */
1073 result = nf_conntrack_register_notifier(&init_net, &fast_classifier_conntrack_notifier);
1074 if (result < 0) {
1075 DEBUG_ERROR("can't register nf notifier hook: %d\n", result);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001076 goto exit4;
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001077 }
1078#endif
1079
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001080 result = genl_register_family(&fast_classifier_gnl_family);
1081 if (result!= 0) {
1082 goto exit5;
1083 }
1084
1085 result = genl_register_ops(&fast_classifier_gnl_family, &fast_classifier_gnl_ops_recv);
1086 if (result != 0) {
1087 goto exit6;
1088 }
1089
1090 printk(KERN_ALERT "fast-classifier: registered\n");
1091
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001092 spin_lock_init(&sc->lock);
1093
1094 /*
1095 * Hook the receive path in the network stack.
1096 */
1097 BUG_ON(athrs_fast_nat_recv != NULL);
1098 RCU_INIT_POINTER(athrs_fast_nat_recv, fast_classifier_recv);
1099
1100 /*
1101 * Hook the shortcut sync callback.
1102 */
1103 sfe_ipv4_register_sync_rule_callback(fast_classifier_sync_rule);
1104
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001105 return 0;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001106
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001107exit6:
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001108 genl_unregister_family(&fast_classifier_gnl_family);
1109
1110exit5:
1111#ifdef CONFIG_NF_CONNTRACK_EVENTS
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001112 nf_conntrack_unregister_notifier(&init_net, &fast_classifier_conntrack_notifier);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001113#endif
1114
1115exit4:
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001116 nf_unregister_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001117
1118exit3:
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001119 unregister_inetaddr_notifier(&sc->inet_notifier);
1120 unregister_netdevice_notifier(&sc->dev_notifier);
Matthew McClintock595ee8b2013-12-02 16:21:49 -06001121 sysfs_remove_file(sc->sys_fast_classifier, &fast_classifier_offload_at_pkts_attr.attr);
1122
1123exit2:
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001124 kobject_put(sc->sys_fast_classifier);
1125
1126exit1:
1127 return result;
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001128}
1129
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001130/*
1131 * fast_classifier_exit()
1132 */
1133static void __exit fast_classifier_exit(void)
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001134{
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001135 struct fast_classifier *sc = &__sc;
1136 int result = -1;
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001137
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001138 DEBUG_INFO("SFE CM exit\n");
Matthew McClintock6ab3b3f2013-11-14 15:39:15 -06001139 printk(KERN_ALERT "fast-classifier: shutting down\n");
1140
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001141 /*
1142 * Unregister our sync callback.
1143 */
1144 sfe_ipv4_register_sync_rule_callback(NULL);
1145
1146 /*
1147 * Unregister our receive callback.
1148 */
1149 RCU_INIT_POINTER(athrs_fast_nat_recv, NULL);
1150
1151 /*
1152 * Wait for all callbacks to complete.
1153 */
1154 rcu_barrier();
1155
1156 /*
1157 * Destroy all connections.
1158 */
1159 sfe_ipv4_destroy_all_rules_for_dev(NULL);
1160
Dave Hudsonfd7fd072013-12-07 22:34:18 +00001161 result = genl_unregister_ops(&fast_classifier_gnl_family, &fast_classifier_gnl_ops_recv);
1162 if (result != 0) {
1163 printk(KERN_CRIT "Unable to unreigster genl_ops\n");
1164 }
1165
1166 result = genl_unregister_family(&fast_classifier_gnl_family);
1167 if (result != 0) {
1168 printk(KERN_CRIT "Unable to unreigster genl_family\n");
1169 }
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001170
1171#ifdef CONFIG_NF_CONNTRACK_EVENTS
1172 nf_conntrack_unregister_notifier(&init_net, &fast_classifier_conntrack_notifier);
1173
1174#endif
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001175 nf_unregister_hooks(fast_classifier_ipv4_ops_post_routing, ARRAY_SIZE(fast_classifier_ipv4_ops_post_routing));
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001176
1177 unregister_inetaddr_notifier(&sc->inet_notifier);
1178 unregister_netdevice_notifier(&sc->dev_notifier);
1179
1180 kobject_put(sc->sys_fast_classifier);
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001181}
1182
Matthew McClintocke1bcfe42013-11-22 15:33:09 -06001183module_init(fast_classifier_init)
1184module_exit(fast_classifier_exit)
1185
1186MODULE_AUTHOR("Qualcomm Atheros Inc.");
1187MODULE_DESCRIPTION("Shortcut Forwarding Engine - Connection Manager");
Matthew McClintock6f29aa12013-11-06 15:49:01 -06001188MODULE_LICENSE("GPL");
1189